From e67f80fd20df72b1b4f58e48539848a8928ec049 Mon Sep 17 00:00:00 2001 From: Sam Leffler Date: Sun, 19 Nov 2006 23:55:23 +0000 Subject: [PATCH] Gateworks Avila board support: o ixp425 support o NPE network driver (requires Intel microcode) o h/w qmgr support o True IDE compact flash over expansion bus o pci (ath and hifn795x parts tested) o xscale watchdog timer o ds1672 RTC on i2c bus o ad7418 voltage + temp monitoring on i2c bus o uart Work done together with cognet, kevlo, and jmg. Parts of the ixp425 support obtaine/derived from netbsd. Reviewed by: cognet, imp MFC after: 1 month --- sys/arm/arm/cpufunc.c | 1 - sys/arm/xscale/ixp425/avila_ata.c | 556 +++++++ sys/arm/xscale/ixp425/avila_machdep.c | 545 +++++++ sys/arm/xscale/ixp425/files.avila | 4 + sys/arm/xscale/ixp425/files.ixp425 | 41 + sys/arm/xscale/ixp425/if_npe.c | 1673 ++++++++++++++++++++++ sys/arm/xscale/ixp425/if_npereg.h | 288 ++++ sys/arm/xscale/ixp425/ixdp425_pci.c | 169 +++ sys/arm/xscale/ixp425/ixdp425reg.h | 54 + sys/arm/xscale/ixp425/ixp425.c | 369 +++++ sys/arm/xscale/ixp425/ixp425_a4x_io.S | 142 ++ sys/arm/xscale/ixp425/ixp425_a4x_space.c | 116 ++ sys/arm/xscale/ixp425/ixp425_iic.c | 193 +++ sys/arm/xscale/ixp425/ixp425_intr.h | 149 ++ sys/arm/xscale/ixp425/ixp425_mem.c | 85 ++ sys/arm/xscale/ixp425/ixp425_npe.c | 1396 ++++++++++++++++++ sys/arm/xscale/ixp425/ixp425_npereg.h | 434 ++++++ sys/arm/xscale/ixp425/ixp425_npevar.h | 96 ++ sys/arm/xscale/ixp425/ixp425_pci.c | 455 ++++++ sys/arm/xscale/ixp425/ixp425_pci_asm.S | 102 ++ sys/arm/xscale/ixp425/ixp425_pci_space.c | 496 +++++++ sys/arm/xscale/ixp425/ixp425_qmgr.c | 1077 ++++++++++++++ sys/arm/xscale/ixp425/ixp425_qmgr.h | 243 ++++ sys/arm/xscale/ixp425/ixp425_space.c | 215 +++ sys/arm/xscale/ixp425/ixp425_timer.c | 267 ++++ sys/arm/xscale/ixp425/ixp425_wdog.c | 118 ++ sys/arm/xscale/ixp425/ixp425reg.h | 582 ++++++++ sys/arm/xscale/ixp425/ixp425var.h | 99 ++ sys/arm/xscale/ixp425/std.avila | 6 + sys/arm/xscale/ixp425/std.ixp425 | 6 + sys/arm/xscale/ixp425/uart_bus_ixp425.c | 91 ++ sys/arm/xscale/ixp425/uart_cpu_ixp425.c | 67 + 32 files changed, 10134 insertions(+), 1 deletion(-) create mode 100644 sys/arm/xscale/ixp425/avila_ata.c create mode 100644 sys/arm/xscale/ixp425/avila_machdep.c create mode 100644 sys/arm/xscale/ixp425/files.avila create mode 100644 sys/arm/xscale/ixp425/files.ixp425 create mode 100644 sys/arm/xscale/ixp425/if_npe.c create mode 100644 sys/arm/xscale/ixp425/if_npereg.h create mode 100644 sys/arm/xscale/ixp425/ixdp425_pci.c create mode 100644 sys/arm/xscale/ixp425/ixdp425reg.h create mode 100644 sys/arm/xscale/ixp425/ixp425.c create mode 100644 sys/arm/xscale/ixp425/ixp425_a4x_io.S create mode 100644 sys/arm/xscale/ixp425/ixp425_a4x_space.c create mode 100644 sys/arm/xscale/ixp425/ixp425_iic.c create mode 100644 sys/arm/xscale/ixp425/ixp425_intr.h create mode 100644 sys/arm/xscale/ixp425/ixp425_mem.c create mode 100644 sys/arm/xscale/ixp425/ixp425_npe.c create mode 100644 sys/arm/xscale/ixp425/ixp425_npereg.h create mode 100644 sys/arm/xscale/ixp425/ixp425_npevar.h create mode 100644 sys/arm/xscale/ixp425/ixp425_pci.c create mode 100644 sys/arm/xscale/ixp425/ixp425_pci_asm.S create mode 100644 sys/arm/xscale/ixp425/ixp425_pci_space.c create mode 100644 sys/arm/xscale/ixp425/ixp425_qmgr.c create mode 100644 sys/arm/xscale/ixp425/ixp425_qmgr.h create mode 100644 sys/arm/xscale/ixp425/ixp425_space.c create mode 100644 sys/arm/xscale/ixp425/ixp425_timer.c create mode 100644 sys/arm/xscale/ixp425/ixp425_wdog.c create mode 100644 sys/arm/xscale/ixp425/ixp425reg.h create mode 100644 sys/arm/xscale/ixp425/ixp425var.h create mode 100644 sys/arm/xscale/ixp425/std.avila create mode 100644 sys/arm/xscale/ixp425/std.ixp425 create mode 100644 sys/arm/xscale/ixp425/uart_bus_ixp425.c create mode 100644 sys/arm/xscale/ixp425/uart_cpu_ixp425.c diff --git a/sys/arm/arm/cpufunc.c b/sys/arm/arm/cpufunc.c index 52f1abf12157..81922e62670b 100644 --- a/sys/arm/arm/cpufunc.c +++ b/sys/arm/arm/cpufunc.c @@ -1015,7 +1015,6 @@ set_cpufuncs() #ifdef CPU_XSCALE_IXP425 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || cputype == CPU_ID_IXP425_266) { - ixp425_icu_init(); cpufuncs = xscale_cpufuncs; #if defined(PERFCTRS) diff --git a/sys/arm/xscale/ixp425/avila_ata.c b/sys/arm/xscale/ixp425/avila_ata.c new file mode 100644 index 000000000000..9c5f2c2f2e2e --- /dev/null +++ b/sys/arm/xscale/ixp425/avila_ata.c @@ -0,0 +1,556 @@ +/*- + * Copyright (c) 2006 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Compact Flash Support for the Avila Gateworks XScale boards. + * There are 1 or 2 optional CF slots operated in "True IDE" mode. + * Registers are on the Expansion Bus connected to CS1. Interrupts + * are tied to GPIO pin 12. No DMA, just PIO. + * + * See also http://www.intel.com/design/network/applnots/302456.htm. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define AVILA_IDE_GPIN 12 /* GPIO pin # */ +#define AVILA_IDE_IRQ IXP425_INT_GPIO_12 +#define AVILA_IDE_CTRL 0x1e /* control register */ + +struct ata_avila_softc { + device_t sc_dev; + bus_space_tag_t sc_iot; + bus_space_handle_t sc_exp_ioh; /* Exp Bus config registers */ + bus_space_handle_t sc_ioh; /* CS1 data registers */ + struct bus_space sc_expbus_tag; + struct resource sc_ata; /* hand-crafted for ATA */ + int sc_rid; /* rid for IRQ */ + struct resource *sc_irq; /* IRQ resource */ + void *sc_ih; /* interrupt handler */ + struct { + void (*cb)(void *); + void *arg; + } sc_intr[1]; /* NB: 1/channel */ +}; + +static void ata_avila_intr(void *); +bs_protos(ata); +static void ata_bs_rm_2_s(void *, bus_space_handle_t, bus_size_t, + u_int16_t *, bus_size_t); +static void ata_bs_wm_2_s(void *, bus_space_handle_t, bus_size_t, + const u_int16_t *, bus_size_t); + +static int +ata_avila_probe(device_t dev) +{ + /* XXX any way to check? */ + device_set_desc_copy(dev, "Gateworks Avila IDE/CF Controller"); + return 0; +} + +static int +ata_avila_attach(device_t dev) +{ + struct ata_avila_softc *sc = device_get_softc(dev); + struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); + + sc->sc_dev = dev; + /* NB: borrow from parent */ + sc->sc_iot = sa->sc_iot; + sc->sc_exp_ioh = sa->sc_exp_ioh; + if (bus_space_map(sc->sc_iot, + IXP425_EXP_BUS_CS1_HWBASE, IXP425_EXP_BUS_CS1_SIZE, 0, &sc->sc_ioh)) + panic("%s: unable to map Expansion Bus CS1 window", __func__); + + /* + * Craft special resource for ATA bus space ops + * that go through the expansion bus and require + * special hackery to ena/dis 16-bit operations. + * + * XXX probably should just make this generic for + * accessing the expansion bus. + */ + sc->sc_expbus_tag.bs_cookie = sc; /* NB: backpointer */ + /* read single */ + sc->sc_expbus_tag.bs_r_1 = ata_bs_r_1, + sc->sc_expbus_tag.bs_r_2 = ata_bs_r_2, + /* read multiple */ + sc->sc_expbus_tag.bs_rm_2 = ata_bs_rm_2, + sc->sc_expbus_tag.bs_rm_2_s = ata_bs_rm_2_s, + /* write (single) */ + sc->sc_expbus_tag.bs_w_1 = ata_bs_w_1, + sc->sc_expbus_tag.bs_w_2 = ata_bs_w_2, + /* write multiple */ + sc->sc_expbus_tag.bs_wm_2 = ata_bs_wm_2, + sc->sc_expbus_tag.bs_wm_2_s = ata_bs_wm_2_s, + + rman_set_bustag(&sc->sc_ata, &sc->sc_expbus_tag); + rman_set_bushandle(&sc->sc_ata, sc->sc_ioh); + + GPIO_CONF_WRITE_4(sa, IXP425_GPIO_GPOER, + GPIO_CONF_READ_4(sa, IXP425_GPIO_GPOER) | (1<sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid, + AVILA_IDE_IRQ, AVILA_IDE_IRQ, 1, RF_ACTIVE); + if (!sc->sc_irq) + panic("Unable to allocate irq %u.\n", AVILA_IDE_IRQ); + bus_setup_intr(dev, sc->sc_irq, + INTR_TYPE_BIO | INTR_MPSAFE | INTR_ENTROPY, + ata_avila_intr, sc, &sc->sc_ih); + + /* attach channel on this controller */ + device_add_child(dev, "ata", devclass_find_free_unit(ata_devclass, 0)); + bus_generic_attach(dev); + + return 0; +} + +static int +ata_avila_detach(device_t dev) +{ + struct ata_avila_softc *sc = device_get_softc(dev); + device_t *children; + int nc; + + /* XXX quiesce gpio? */ + + /* detach & delete all children */ + if (device_get_children(dev, &children, &nc) == 0) { + if (nc > 0) + device_delete_child(dev, children[0]); + free(children, M_TEMP); + } + + bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); + bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq); + + return 0; +} + +static void +ata_avila_intr(void *xsc) +{ + struct ata_avila_softc *sc = xsc; + + if (sc->sc_intr[0].cb != NULL) + sc->sc_intr[0].cb(sc->sc_intr[0].arg); +} + +static struct resource * +ata_avila_alloc_resource(device_t dev, device_t child, int type, int *rid, + u_long start, u_long end, u_long count, u_int flags) +{ + struct ata_avila_softc *sc = device_get_softc(dev); + + KASSERT(type == SYS_RES_IRQ && *rid == ATA_IRQ_RID, + ("type %u rid %u start %lu end %lu count %lu flags %u", + type, *rid, start, end, count, flags)); + + /* doesn't matter what we return so reuse the real thing */ + return sc->sc_irq; +} + +static int +ata_avila_release_resource(device_t dev, device_t child, int type, int rid, + struct resource *r) +{ + KASSERT(type == SYS_RES_IRQ && rid == ATA_IRQ_RID, + ("type %u rid %u", type, rid)); + return 0; +} + +static int +ata_avila_setup_intr(device_t dev, device_t child, struct resource *irq, + int flags, driver_intr_t *function, void *argument, + void **cookiep) +{ + struct ata_avila_softc *sc = device_get_softc(dev); + int unit = ((struct ata_channel *)device_get_softc(child))->unit; + + KASSERT(unit == 0, ("unit %d", unit)); + sc->sc_intr[unit].cb = function; + sc->sc_intr[unit].arg = argument; + *cookiep = sc; + return 0; +} + +static int +ata_avila_teardown_intr(device_t dev, device_t child, struct resource *irq, + void *cookie) +{ + struct ata_avila_softc *sc = device_get_softc(dev); + int unit = ((struct ata_channel *)device_get_softc(child))->unit; + + KASSERT(unit == 0, ("unit %d", unit)); + sc->sc_intr[unit].cb = NULL; + sc->sc_intr[unit].arg = NULL; + return 0; +} + +/* + * Bus space accessors for CF-IDE PIO operations. + */ + +/* + * Enable/disable 16-bit ops on the expansion bus. + */ +static void __inline +enable_16(struct ata_avila_softc *sc) +{ + EXP_BUS_WRITE_4(sc, EXP_TIMING_CS1_OFFSET, + EXP_BUS_READ_4(sc, EXP_TIMING_CS1_OFFSET) &~ EXP_BYTE_EN); + DELAY(100); /* XXX? */ +} + +static void __inline +disable_16(struct ata_avila_softc *sc) +{ + DELAY(100); /* XXX? */ + EXP_BUS_WRITE_4(sc, EXP_TIMING_CS1_OFFSET, + EXP_BUS_READ_4(sc, EXP_TIMING_CS1_OFFSET) | EXP_BYTE_EN); +} + +uint8_t +ata_bs_r_1(void *t, bus_space_handle_t h, bus_size_t o) +{ + struct ata_avila_softc *sc = t; + + return bus_space_read_1(sc->sc_iot, h, o); +} + +void +ata_bs_w_1(void *t, bus_space_handle_t h, bus_size_t o, u_int8_t v) +{ + struct ata_avila_softc *sc = t; + + bus_space_write_1(sc->sc_iot, h, o, v); +} + +uint16_t +ata_bs_r_2(void *t, bus_space_handle_t h, bus_size_t o) +{ + struct ata_avila_softc *sc = t; + uint16_t v; + + enable_16(sc); + v = bus_space_read_2(sc->sc_iot, h, o); + disable_16(sc); + return v; +} + +void +ata_bs_w_2(void *t, bus_space_handle_t h, bus_size_t o, uint16_t v) +{ + struct ata_avila_softc *sc = t; + + enable_16(sc); + bus_space_write_2(sc->sc_iot, h, o, v); + disable_16(sc); +} + +void +ata_bs_rm_2(void *t, bus_space_handle_t h, bus_size_t o, + u_int16_t *d, bus_size_t c) +{ + struct ata_avila_softc *sc = t; + + enable_16(sc); + bus_space_read_multi_2(sc->sc_iot, h, o, d, c); + disable_16(sc); +} + +void +ata_bs_wm_2(void *t, bus_space_handle_t h, bus_size_t o, + const u_int16_t *d, bus_size_t c) +{ + struct ata_avila_softc *sc = t; + + enable_16(sc); + bus_space_write_multi_2(sc->sc_iot, h, o, d, c); + disable_16(sc); +} + +/* XXX workaround ata driver by (incorrectly) byte swapping stream cases */ + +void +ata_bs_rm_2_s(void *t, bus_space_handle_t h, bus_size_t o, + u_int16_t *d, bus_size_t c) +{ + struct ata_avila_softc *sc = t; + uint16_t v; + bus_size_t i; + + enable_16(sc); +#if 1 + for (i = 0; i < c; i++) { + v = bus_space_read_2(sc->sc_iot, h, o); + d[i] = bswap16(v); + } +#else + bus_space_read_multi_stream_2(sc->sc_iot, h, o, d, c); +#endif + disable_16(sc); +} + +void +ata_bs_wm_2_s(void *t, bus_space_handle_t h, bus_size_t o, + const u_int16_t *d, bus_size_t c) +{ + struct ata_avila_softc *sc = t; + bus_size_t i; + + enable_16(sc); +#if 1 + for (i = 0; i < c; i++) + bus_space_write_2(sc->sc_iot, h, o, bswap16(d[i])); +#else + bus_space_write_multi_stream_2(sc->sc_iot, h, o, d, c); +#endif + disable_16(sc); +} + +static device_method_t ata_avila_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, ata_avila_probe), + DEVMETHOD(device_attach, ata_avila_attach), + DEVMETHOD(device_detach, ata_avila_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + + /* bus methods */ + DEVMETHOD(bus_alloc_resource, ata_avila_alloc_resource), + DEVMETHOD(bus_release_resource, ata_avila_release_resource), + DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), + DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), + DEVMETHOD(bus_setup_intr, ata_avila_setup_intr), + DEVMETHOD(bus_teardown_intr, ata_avila_teardown_intr), + + { 0, 0 } +}; + +devclass_t ata_avila_devclass; + +static driver_t ata_avila_driver = { + "ata_avila", + ata_avila_methods, + sizeof(struct ata_avila_softc), +}; + +DRIVER_MODULE(ata_avila, ixp, ata_avila_driver, ata_avila_devclass, 0, 0); +MODULE_VERSION(ata_avila, 1); +MODULE_DEPEND(ata_avila, ata, 1, 1, 1); + +static int +avila_channel_probe(device_t dev) +{ + struct ata_channel *ch = device_get_softc(dev); + + ch->unit = 0; + ch->flags |= ATA_USE_16BIT | ATA_NO_SLAVE; + device_set_desc_copy(dev, "ATA channel 0"); + + return ata_probe(dev); +} + +static int +avila_channel_attach(device_t dev) +{ + struct ata_avila_softc *sc = device_get_softc(device_get_parent(dev)); + struct ata_channel *ch = device_get_softc(dev); + int i; + + for (i = 0; i < ATA_MAX_RES; i++) + ch->r_io[i].res = &sc->sc_ata; + + ch->r_io[ATA_DATA].offset = ATA_DATA; + ch->r_io[ATA_FEATURE].offset = ATA_FEATURE; + ch->r_io[ATA_COUNT].offset = ATA_COUNT; + ch->r_io[ATA_SECTOR].offset = ATA_SECTOR; + ch->r_io[ATA_CYL_LSB].offset = ATA_CYL_LSB; + ch->r_io[ATA_CYL_MSB].offset = ATA_CYL_MSB; + ch->r_io[ATA_DRIVE].offset = ATA_DRIVE; + ch->r_io[ATA_COMMAND].offset = ATA_COMMAND; + ch->r_io[ATA_ERROR].offset = ATA_FEATURE; + /* NB: should be used only for ATAPI devices */ + ch->r_io[ATA_IREASON].offset = ATA_COUNT; + ch->r_io[ATA_STATUS].offset = ATA_COMMAND; + /* alias this; required by ata_generic_status */ + ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_STATUS].offset; + + /* NB: the control register is special */ + ch->r_io[ATA_CONTROL].offset = AVILA_IDE_CTRL; + + /* NB: by convention this points at the base of registers */ + ch->r_io[ATA_IDX_ADDR].offset = 0; + + ata_generic_hw(dev); + return ata_attach(dev); +} + +/* XXX override ata_generic_reset to handle non-standard status */ +static void +avila_channel_reset(device_t dev) +{ + struct ata_channel *ch = device_get_softc(dev); + u_int8_t ostat0 = 0, stat0 = 0; + u_int8_t err = 0, lsb = 0, msb = 0; + int mask = 0, timeout; + + /* do we have any signs of ATA/ATAPI HW being present ? */ + ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_MASTER); + DELAY(10); + ostat0 = ATA_IDX_INB(ch, ATA_STATUS); + if ((ostat0 & 0xf8) != 0xf8 && ostat0 != 0xa5) { + stat0 = ATA_S_BUSY; + mask |= 0x01; + } + + if (bootverbose) + device_printf(dev, "%s: reset tp1 mask=%02x ostat0=%02x\n", + __func__, mask, ostat0); + + /* if nothing showed up there is no need to get any further */ + /* XXX SOS is that too strong?, we just might loose devices here */ + ch->devices = 0; + if (!mask) + return; + + /* reset (both) devices on this channel */ + ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_MASTER); + DELAY(10); + ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS | ATA_A_RESET); + ata_udelay(10000); + ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS); + ata_udelay(100000); + ATA_IDX_INB(ch, ATA_ERROR); + + /* wait for BUSY to go inactive */ + for (timeout = 0; timeout < 310; timeout++) { + if ((mask & 0x01) && (stat0 & ATA_S_BUSY)) { + ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_MASTER); + DELAY(10); + err = ATA_IDX_INB(ch, ATA_ERROR); + lsb = ATA_IDX_INB(ch, ATA_CYL_LSB); + msb = ATA_IDX_INB(ch, ATA_CYL_MSB); + stat0 = ATA_IDX_INB(ch, ATA_STATUS); + if (bootverbose) + device_printf(dev, + "%s: stat0=0x%02x err=0x%02x lsb=0x%02x " + "msb=0x%02x\n", __func__, + stat0, err, lsb, msb); + if (stat0 == err && lsb == err && msb == err && + timeout > (stat0 & ATA_S_BUSY ? 100 : 10)) + mask &= ~0x01; + if (!(stat0 & ATA_S_BUSY)) { + if ((err & 0x7f) == ATA_E_ILI || err == 0) { + if (lsb == ATAPI_MAGIC_LSB && + msb == ATAPI_MAGIC_MSB) { + ch->devices |= ATA_ATAPI_MASTER; + } else if (stat0 & ATA_S_READY) { + ch->devices |= ATA_ATA_MASTER; + } + } else if ((stat0 & 0x0f) && + err == lsb && err == msb) { + stat0 |= ATA_S_BUSY; + } + } + } + if (mask == 0x00) /* nothing to wait for */ + break; + /* wait for master */ + if (!(stat0 & ATA_S_BUSY) || (stat0 == 0xff && timeout > 10)) + break; + ata_udelay(100000); + } + + if (bootverbose) + device_printf(dev, "%s: reset tp2 stat0=%02x devices=0x%b\n", + __func__, stat0, ch->devices, + "\20\4ATAPI_SLAVE\3ATAPI_MASTER\2ATA_SLAVE\1ATA_MASTER"); +} + +static device_method_t avila_channel_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, avila_channel_probe), + DEVMETHOD(device_attach, avila_channel_attach), + DEVMETHOD(device_detach, ata_detach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, ata_suspend), + DEVMETHOD(device_resume, ata_resume), + + DEVMETHOD(ata_reset, avila_channel_reset), + + { 0, 0 } +}; + +driver_t avila_channel_driver = { + "ata", + avila_channel_methods, + sizeof(struct ata_channel), +}; +DRIVER_MODULE(ata, ata_avila, avila_channel_driver, ata_devclass, 0, 0); diff --git a/sys/arm/xscale/ixp425/avila_machdep.c b/sys/arm/xscale/ixp425/avila_machdep.c new file mode 100644 index 000000000000..b6efecf51576 --- /dev/null +++ b/sys/arm/xscale/ixp425/avila_machdep.c @@ -0,0 +1,545 @@ +/* $NetBSD: hpc_machdep.c,v 1.70 2003/09/16 08:18:22 agc Exp $ */ + +/*- + * Copyright (c) 1994-1998 Mark Brinicombe. + * Copyright (c) 1994 Brini. + * All rights reserved. + * + * This code is derived from software written for Brini by Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Brini. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RiscBSD kernel project + * + * machdep.c + * + * Machine dependant functions for kernel setup + * + * This file needs a lot of work. + * + * Created : 17/09/94 + */ + +#include "opt_msgbuf.h" +#include "opt_ddb.h" + +#include +__FBSDID("$FreeBSD$"); + +#define _ARM32_BUS_DMA_PRIVATE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */ +#define KERNEL_PT_IO 1 +#define KERNEL_PT_IO_NUM 3 +#define KERNEL_PT_BEFOREKERN KERNEL_PT_IO + KERNEL_PT_IO_NUM +#define KERNEL_PT_AFKERNEL KERNEL_PT_BEFOREKERN + 1 /* L2 table for mapping after kernel */ +#define KERNEL_PT_AFKERNEL_NUM 9 + +/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */ +#define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM) + +/* Define various stack sizes in pages */ +#define IRQ_STACK_SIZE 1 +#define ABT_STACK_SIZE 1 +#ifdef IPKDB +#define UND_STACK_SIZE 2 +#else +#define UND_STACK_SIZE 1 +#endif + +extern u_int data_abort_handler_address; +extern u_int prefetch_abort_handler_address; +extern u_int undefined_handler_address; + +struct pv_addr kernel_pt_table[NUM_KERNEL_PTS]; + +extern void *_end; + +extern vm_offset_t sa1_cache_clean_addr; + +extern int *end; + +struct pcpu __pcpu; +struct pcpu *pcpup = &__pcpu; + +/* Physical and virtual addresses for some global pages */ + +vm_paddr_t phys_avail[10]; +vm_paddr_t dump_avail[4]; +vm_offset_t physical_pages; +vm_offset_t clean_sva, clean_eva; + +struct pv_addr systempage; +struct pv_addr msgbufpv; +struct pv_addr irqstack; +struct pv_addr undstack; +struct pv_addr abtstack; +struct pv_addr kernelstack; +struct pv_addr minidataclean; + +static struct trapframe proc0_tf; + +/* Static device mappings. */ +static const struct pmap_devmap ixp425_devmap[] = { + /* Physical/Virtual address for I/O space */ + { + IXP425_IO_VBASE, + IXP425_IO_HWBASE, + IXP425_IO_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + + /* Expansion Bus */ + { + IXP425_EXP_VBASE, + IXP425_EXP_HWBASE, + IXP425_EXP_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + + /* IXP425 PCI Configuration */ + { + IXP425_PCI_VBASE, + IXP425_PCI_HWBASE, + IXP425_PCI_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + + /* SDRAM Controller */ + { + IXP425_MCU_VBASE, + IXP425_MCU_HWBASE, + IXP425_MCU_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + + /* PCI Memory Space */ + { + IXP425_PCI_MEM_VBASE, + IXP425_PCI_MEM_HWBASE, + IXP425_PCI_MEM_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + /* NPE-A Memory Space */ + { + IXP425_NPE_A_VBASE, + IXP425_NPE_A_HWBASE, + IXP425_NPE_A_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + /* NPE-B Memory Space */ + { + IXP425_NPE_B_VBASE, + IXP425_NPE_B_HWBASE, + IXP425_NPE_B_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + /* NPE-C Memory Space */ + { + IXP425_NPE_C_VBASE, + IXP425_NPE_C_HWBASE, + IXP425_NPE_C_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + /* MAC-A Memory Space */ + { + IXP425_MAC_A_VBASE, + IXP425_MAC_A_HWBASE, + IXP425_MAC_A_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + /* MAC-B Memory Space */ + { + IXP425_MAC_B_VBASE, + IXP425_MAC_B_HWBASE, + IXP425_MAC_B_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + /* Q-Mgr Memory Space */ + { + IXP425_QMGR_VBASE, + IXP425_QMGR_HWBASE, + IXP425_QMGR_SIZE, + VM_PROT_READ|VM_PROT_WRITE, + PTE_NOCACHE, + }, + + { + 0, + 0, + 0, + 0, + 0, + } +}; + +#define SDRAM_START 0x10000000 + +#ifdef DDB +extern vm_offset_t ksym_start, ksym_end; +#endif + +extern vm_offset_t xscale_cache_clean_addr; + +void * +initarm(void *arg, void *arg2) +{ + struct pv_addr kernel_l1pt; + int loop; + u_int l1pagetable; + vm_offset_t freemempos; + vm_offset_t freemem_pt; + vm_offset_t afterkern; + vm_offset_t freemem_after; + vm_offset_t lastaddr; +#ifdef DDB + vm_offset_t zstart = 0, zend = 0; +#endif + int i = 0; + uint32_t fake_preload[35]; + uint32_t memsize; + + i = 0; + + set_cpufuncs(); + fake_preload[i++] = MODINFO_NAME; + fake_preload[i++] = strlen("elf kernel") + 1; + strcpy((char*)&fake_preload[i++], "elf kernel"); + i += 2; + fake_preload[i++] = MODINFO_TYPE; + fake_preload[i++] = strlen("elf kernel") + 1; + strcpy((char*)&fake_preload[i++], "elf kernel"); + i += 2; + fake_preload[i++] = MODINFO_ADDR; + fake_preload[i++] = sizeof(vm_offset_t); + fake_preload[i++] = KERNBASE + 0x00200000; + fake_preload[i++] = MODINFO_SIZE; + fake_preload[i++] = sizeof(uint32_t); + fake_preload[i++] = (uint32_t)&end - KERNBASE - 0x00200000; +#ifdef DDB + if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { + fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; + fake_preload[i++] = sizeof(vm_offset_t); + fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); + fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; + fake_preload[i++] = sizeof(vm_offset_t); + fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); + lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); + zend = lastaddr; + zstart = *(uint32_t *)(KERNVIRTADDR + 4); + ksym_start = zstart; + ksym_end = zend; + } else +#endif + lastaddr = (vm_offset_t)&end; + + fake_preload[i++] = 0; + fake_preload[i] = 0; + preload_metadata = (void *)fake_preload; + + + pcpu_init(pcpup, 0, sizeof(struct pcpu)); + PCPU_SET(curthread, &thread0); + +#define KERNEL_TEXT_BASE (KERNBASE + 0x00200000) + freemempos = 0x10200000; + /* Define a macro to simplify memory allocation */ +#define valloc_pages(var, np) \ + alloc_pages((var).pv_pa, (np)); \ + (var).pv_va = (var).pv_pa + 0xb0000000; + +#define alloc_pages(var, np) \ + freemempos -= (np * PAGE_SIZE); \ + (var) = freemempos; \ + memset((char *)(var), 0, ((np) * PAGE_SIZE)); + + while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) + freemempos -= PAGE_SIZE; + valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); + for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { + if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { + valloc_pages(kernel_pt_table[loop], + L2_TABLE_SIZE / PAGE_SIZE); + } else { + kernel_pt_table[loop].pv_pa = freemempos + + (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) * + L2_TABLE_SIZE_REAL; + kernel_pt_table[loop].pv_va = + kernel_pt_table[loop].pv_pa + 0xb0000000; + } + } + freemem_pt = freemempos; + freemempos = 0x10100000; + /* + * Allocate a page for the system page mapped to V0x00000000 + * This page will just contain the system vectors and can be + * shared by all processes. + */ + valloc_pages(systempage, 1); + + /* Allocate stacks for all modes */ + valloc_pages(irqstack, IRQ_STACK_SIZE); + valloc_pages(abtstack, ABT_STACK_SIZE); + valloc_pages(undstack, UND_STACK_SIZE); + valloc_pages(kernelstack, KSTACK_PAGES); + alloc_pages(minidataclean.pv_pa, 1); + valloc_pages(msgbufpv, round_page(MSGBUF_SIZE) / PAGE_SIZE); +#ifdef ARM_USE_SMALL_ALLOC + freemempos -= PAGE_SIZE; + freemem_pt = trunc_page(freemem_pt); + freemem_after = freemempos - ((freemem_pt - 0x10100000) / + PAGE_SIZE) * sizeof(struct arm_small_page); + arm_add_smallalloc_pages((void *)(freemem_after + 0xb0000000) + , (void *)0xc0100000, freemem_pt - 0x10100000, 1); + freemem_after -= ((freemem_after - 0x10001000) / PAGE_SIZE) * + sizeof(struct arm_small_page); + arm_add_smallalloc_pages((void *)(freemem_after + 0xb0000000) + , (void *)0xc0001000, trunc_page(freemem_after) - 0x10001000, 0); + freemempos = trunc_page(freemem_after); + freemempos -= PAGE_SIZE; +#endif + /* + * Allocate memory for the l1 and l2 page tables. The scheme to avoid + * wasting memory by allocating the l1pt on the first 16k memory was + * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for + * this to work (which is supposed to be the case). + */ + + /* + * Now we start construction of the L1 page table + * We start by mapping the L2 page tables into the L1. + * This means that we can replace L1 mappings later on if necessary + */ + l1pagetable = kernel_l1pt.pv_va; + + /* Map the L2 pages tables in the L1 page table */ + pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1), + &kernel_pt_table[KERNEL_PT_SYS]); + pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE, + &kernel_pt_table[KERNEL_PT_IO]); + pmap_link_l2pt(l1pagetable, IXP425_MCU_VBASE, + &kernel_pt_table[KERNEL_PT_IO + 1]); + pmap_link_l2pt(l1pagetable, IXP425_PCI_MEM_VBASE, + &kernel_pt_table[KERNEL_PT_IO + 2]); + pmap_link_l2pt(l1pagetable, KERNBASE, + &kernel_pt_table[KERNEL_PT_BEFOREKERN]); + pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000, + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); + pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000, + 0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); + pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000, + (((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1), + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); + freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); + afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) & ~(L1_S_SIZE + - 1)); + for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) { + pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000, + &kernel_pt_table[KERNEL_PT_AFKERNEL + i]); + } + pmap_map_entry(l1pagetable, afterkern, minidataclean.pv_pa, + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); + + +#ifdef ARM_USE_SMALL_ALLOC + if ((freemem_after + 2 * PAGE_SIZE) <= afterkern) { + arm_add_smallalloc_pages((void *)(freemem_after), + (void*)(freemem_after + PAGE_SIZE), + afterkern - (freemem_after + PAGE_SIZE), 0); + + } +#endif + + /* Map the Mini-Data cache clean area. */ + xscale_setup_minidata(l1pagetable, afterkern, + minidataclean.pv_pa); + + /* Map the vector page. */ + pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, + VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); + pmap_devmap_bootstrap(l1pagetable, ixp425_devmap); + /* + * Give the XScale global cache clean code an appropriately + * sized chunk of unmapped VA space starting at 0xff000000 + * (our device mappings end before this address). + */ + xscale_cache_clean_addr = 0xff000000U; + + cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); + setttb(kernel_l1pt.pv_pa); + cpu_tlb_flushID(); + cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); + /* + * Pages were allocated during the secondary bootstrap for the + * stacks for different CPU modes. + * We must now set the r13 registers in the different CPU modes to + * point to these stacks. + * Since the ARM stacks use STMFD etc. we must set r13 to the top end + * of the stack memory. + */ + + + set_stackptr(PSR_IRQ32_MODE, + irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); + set_stackptr(PSR_ABT32_MODE, + abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); + set_stackptr(PSR_UND32_MODE, + undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); + + + + /* + * We must now clean the cache again.... + * Cleaning may be done by reading new data to displace any + * dirty data in the cache. This will have happened in setttb() + * but since we are boot strapping the addresses used for the read + * may have just been remapped and thus the cache could be out + * of sync. A re-clean after the switch will cure this. + * After booting there are no gross reloations of the kernel thus + * this problem will not occur after initarm(). + */ + cpu_idcache_wbinv_all(); + /* + * Fetch the SDRAM start/size from the ixp425 SDRAM configration + * registers. + */ + cninit(); + memsize = ixp425_sdram_size(); + physmem = memsize / PAGE_SIZE; + + /* Set stack for exception handlers */ + + data_abort_handler_address = (u_int)data_abort_handler; + prefetch_abort_handler_address = (u_int)prefetch_abort_handler; + undefined_handler_address = (u_int)undefinedinstruction_bounce; + undefined_init(); + +#ifdef KSE + proc_linkup(&proc0, &ksegrp0, &thread0); +#else + proc_linkup(&proc0, &thread0); +#endif + thread0.td_kstack = kernelstack.pv_va; + thread0.td_pcb = (struct pcb *) + (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; + thread0.td_pcb->pcb_flags = 0; + thread0.td_frame = &proc0_tf; + pcpup->pc_curpcb = thread0.td_pcb; + + /* Enable MMU, I-cache, D-cache, write buffer. */ + + arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); + + + + pmap_curmaxkvaddr = afterkern + PAGE_SIZE; + dump_avail[0] = 0x10000000; + dump_avail[1] = 0x10000000 + memsize; + dump_avail[2] = 0; + dump_avail[3] = 0; + + pmap_bootstrap(pmap_curmaxkvaddr, + 0xd0000000, &kernel_l1pt); + msgbufp = (void*)msgbufpv.pv_va; + msgbufinit(msgbufp, MSGBUF_SIZE); + mutex_init(); + + i = 0; +#ifdef ARM_USE_SMALL_ALLOC + phys_avail[i++] = 0x10000000; + phys_avail[i++] = 0x10001000; /* + *XXX: Gross hack to get our + * pages in the vm_page_array + . */ +#endif + phys_avail[i++] = round_page(virtual_avail - KERNBASE + SDRAM_START); + phys_avail[i++] = trunc_page(0x10000000 + memsize - 1); + phys_avail[i++] = 0; + phys_avail[i] = 0; + + /* Do basic tuning, hz etc */ + init_param1(); + init_param2(physmem); + kdb_init(); + return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - + sizeof(struct pcb))); +} diff --git a/sys/arm/xscale/ixp425/files.avila b/sys/arm/xscale/ixp425/files.avila new file mode 100644 index 000000000000..f5f2dbce95b4 --- /dev/null +++ b/sys/arm/xscale/ixp425/files.avila @@ -0,0 +1,4 @@ +#$FreeBSD$ +arm/xscale/ixp425/avila_machdep.c standard +arm/xscale/ixp425/avila_ata.c optional avila_ata +arm/xscale/ixp425/ixdp425_pci.c optional pci diff --git a/sys/arm/xscale/ixp425/files.ixp425 b/sys/arm/xscale/ixp425/files.ixp425 new file mode 100644 index 000000000000..9304b7c6d330 --- /dev/null +++ b/sys/arm/xscale/ixp425/files.ixp425 @@ -0,0 +1,41 @@ +#$FreeBSD$ +arm/arm/cpufunc_asm_xscale.S standard +arm/arm/irq_dispatch.S standard +arm/xscale/ixp425/ixp425.c standard +arm/xscale/ixp425/ixp425_mem.c standard +arm/xscale/ixp425/ixp425_space.c standard +arm/xscale/ixp425/ixp425_timer.c standard +arm/xscale/ixp425/ixp425_wdog.c optional ixpwdog +arm/xscale/ixp425/ixp425_iic.c optional ixpiic +arm/xscale/ixp425/ixp425_pci.c optional pci +arm/xscale/ixp425/ixp425_pci_asm.S optional pci +arm/xscale/ixp425/ixp425_pci_space.c optional pci +arm/xscale/ixp425/uart_cpu_ixp425.c optional uart +arm/xscale/ixp425/uart_bus_ixp425.c optional uart +arm/xscale/ixp425/ixp425_a4x_space.c optional uart +arm/xscale/ixp425/ixp425_a4x_io.S optional uart +dev/uart/uart_dev_ns8250.c optional uart +# +# NPE-based Ethernet support (requires qmgr also). Note the +# firmware images must be downloaded from the Intel web site. +# +arm/xscale/ixp425/if_npe.c optional npe +arm/xscale/ixp425/ixp425_npe.c optional npe +ixp425_npe_fw.c optional npe_fw \ + compile-with "${AWK} -f $S/tools/fw_stub.awk IxNpeMicrocode.dat:npe_fw -mnpe -c${.TARGET}" \ + no-implicit-rule before-depend local \ + clean "ixp425_npe_fw.c" +# +# NB: ld encodes the path in the binary symbols generated for the +# firmware image so link the file to the object directory to +# get known values for reference in the _fw.c file. +# +IxNpeMicrocode.fwo optional npe_fw \ + dependency "$S/arm/xscale/ixp425/IxNpeMicrocode.dat" \ + compile-with "ln -sf $S/arm/xscale/ixp425/IxNpeMicrocode.dat ${.OBJDIR}; ${LD} -b binary -d -warn-common -r -d -o ${.TARGET} IxNpeMicrocode.dat" \ + no-implicit-rule \ + clean "IxNpeMicrocode.dat IxNpeMicrocode.fwo" +# +# Q-Manager support +# +arm/xscale/ixp425/ixp425_qmgr.c optional qmgr diff --git a/sys/arm/xscale/ixp425/if_npe.c b/sys/arm/xscale/ixp425/if_npe.c new file mode 100644 index 000000000000..c63fc986093b --- /dev/null +++ b/sys/arm/xscale/ixp425/if_npe.c @@ -0,0 +1,1673 @@ +/*- + * Copyright (c) 2006 Sam Leffler. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Intel XScale NPE Ethernet driver. + * + * This driver handles the two ports present on the IXP425. + * Packet processing is done by the Network Processing Engines + * (NPE's) that work together with a MAC and PHY. The MAC + * is also mapped to the XScale cpu; the PHY is accessed via + * the MAC. NPE-XScale communication happens through h/w + * queues managed by the Q Manager block. + * + * The code here replaces the ethAcc, ethMii, and ethDB classes + * in the Intel Access Library (IAL) and the OS-specific driver. + * + * XXX add vlan support + * XXX NPE-C port doesn't work yet + */ +#ifdef HAVE_KERNEL_OPTION_HEADERS +#include "opt_device_polling.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef INET +#include +#include +#include +#include +#endif + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include "miibus_if.h" + +struct npebuf { + struct npebuf *ix_next; /* chain to next buffer */ + void *ix_m; /* backpointer to mbuf */ + bus_dmamap_t ix_map; /* bus dma map for associated data */ + struct npehwbuf *ix_hw; /* associated h/w block */ + uint32_t ix_neaddr; /* phys address of ix_hw */ +}; + +struct npedma { + const char* name; + int nbuf; /* # npebuf's allocated */ + bus_dma_tag_t mtag; /* bus dma tag for mbuf data */ + struct npehwbuf *hwbuf; /* NPE h/w buffers */ + bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */ + bus_dmamap_t buf_map; + bus_addr_t buf_phys; /* phys addr of buffers */ + struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */ +}; + +struct npe_softc { + /* XXX mii requires this be first; do not move! */ + struct ifnet *sc_ifp; /* ifnet pointer */ + struct mtx sc_mtx; /* basically a perimeter lock */ + device_t sc_dev; + bus_space_tag_t sc_iot; + bus_space_handle_t sc_ioh; /* MAC register window */ + device_t sc_mii; /* child miibus */ + bus_space_handle_t sc_miih; /* MII register window */ + struct ixpnpe_softc *sc_npe; /* NPE support */ + int sc_debug; /* DPRINTF* control */ + int sc_tickinterval; + struct callout tick_ch; /* Tick callout */ + struct npedma txdma; + struct npebuf *tx_free; /* list of free tx buffers */ + struct npedma rxdma; + bus_addr_t buf_phys; /* XXX for returning a value */ + int rx_qid; /* rx qid */ + int rx_freeqid; /* rx free buffers qid */ + int tx_qid; /* tx qid */ + int tx_doneqid; /* tx completed qid */ + struct ifmib_iso_8802_3 mibdata; + bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */ + struct npestats *sc_stats; + bus_dmamap_t sc_stats_map; + bus_addr_t sc_stats_phys; /* phys addr of sc_stats */ +}; + +/* + * Per-unit static configuration for IXP425. The tx and + * rx free Q id's are fixed by the NPE microcode. The + * rx Q id's are programmed to be separate to simplify + * multi-port processing. It may be better to handle + * all traffic through one Q (as done by the Intel drivers). + * + * Note that the PHY's are accessible only from MAC A + * on the IXP425. This and other platform-specific + * assumptions probably need to be handled through hints. + */ +static const struct { + const char *desc; /* device description */ + int npeid; /* NPE assignment */ + uint32_t imageid; /* NPE firmware image id */ + uint32_t regbase; + int regsize; + uint32_t miibase; + int miisize; + uint8_t rx_qid; + uint8_t rx_freeqid; + uint8_t tx_qid; + uint8_t tx_doneqid; +} npeconfig[NPE_PORTS_MAX] = { + { .desc = "IXP NPE-B", + .npeid = NPE_B, + .imageid = IXP425_NPE_B_IMAGEID, + .regbase = IXP425_MAC_A_HWBASE, + .regsize = IXP425_MAC_A_SIZE, + .miibase = IXP425_MAC_A_HWBASE, + .miisize = IXP425_MAC_A_SIZE, + .rx_qid = 4, + .rx_freeqid = 27, + .tx_qid = 24, + .tx_doneqid = 31 + }, + { .desc = "IXP NPE-C", + .npeid = NPE_C, + .imageid = IXP425_NPE_C_IMAGEID, + .regbase = IXP425_MAC_B_HWBASE, + .regsize = IXP425_MAC_B_SIZE, + .miibase = IXP425_MAC_A_HWBASE, + .miisize = IXP425_MAC_A_SIZE, + .rx_qid = 12, + .rx_freeqid = 28, + .tx_qid = 25, + .tx_doneqid = 31 + }, +}; +static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */ + +static __inline uint32_t +RD4(struct npe_softc *sc, bus_size_t off) +{ + return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); +} + +static __inline void +WR4(struct npe_softc *sc, bus_size_t off, uint32_t val) +{ + bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); +} + +#define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) +#define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) +#define NPE_LOCK_INIT(_sc) \ + mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \ + MTX_NETWORK_LOCK, MTX_DEF) +#define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); +#define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); +#define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); + +static devclass_t npe_devclass; + +static int npe_activate(device_t dev); +static void npe_deactivate(device_t dev); +static int npe_ifmedia_update(struct ifnet *ifp); +static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr); +static void npe_setmac(struct npe_softc *sc, u_char *eaddr); +static void npe_getmac(struct npe_softc *sc, u_char *eaddr); +static void npe_txdone(int qid, void *arg); +static int npe_rxbuf_init(struct npe_softc *, struct npebuf *, + struct mbuf *); +static void npe_rxdone(int qid, void *arg); +static void npeinit(void *); +static void npestart_locked(struct ifnet *); +static void npestart(struct ifnet *); +static void npestop(struct npe_softc *); +static void npewatchdog(struct ifnet *); +static int npeioctl(struct ifnet * ifp, u_long, caddr_t); + +static int npe_setrxqosentry(struct npe_softc *, int classix, + int trafclass, int qid); +static int npe_updatestats(struct npe_softc *); +#if 0 +static int npe_getstats(struct npe_softc *); +static uint32_t npe_getimageid(struct npe_softc *); +static int npe_setloopback(struct npe_softc *, int ena); +#endif + +/* NB: all tx done processing goes through one queue */ +static int tx_doneqid = -1; + +SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP425 NPE driver parameters"); + +static int npe_debug = 0; +SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug, + 0, "IXP425 NPE network interface debug msgs"); +TUNABLE_INT("hw.npe.npe", &npe_debug); +#define DPRINTF(sc, fmt, ...) do { \ + if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \ +} while (0) +#define DPRINTFn(n, sc, fmt, ...) do { \ + if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\ +} while (0) +static int npe_tickinterval = 3; /* npe_tick frequency (secs) */ +SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval, + 0, "periodic work interval (secs)"); +TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval); + +static int npe_rxbuf = 64; /* # rx buffers to allocate */ +SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf, + 0, "rx buffers allocated"); +TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf); +static int npe_txbuf = 128; /* # tx buffers to allocate */ +SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf, + 0, "tx buffers allocated"); +TUNABLE_INT("hw.npe.txbuf", &npe_txbuf); + +static int +npe_probe(device_t dev) +{ + int unit = device_get_unit(dev); + + if (unit >= NPE_PORTS_MAX) { + device_printf(dev, "unit %d not supported\n", unit); + return EINVAL; + } + /* XXX check feature register to see if enabled */ + device_set_desc(dev, npeconfig[unit].desc); + return 0; +} + +static int +npe_attach(device_t dev) +{ + struct npe_softc *sc = device_get_softc(dev); + struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); + struct sysctl_oid *tree = device_get_sysctl_tree(dev); + struct ifnet *ifp = NULL; + int error; + u_char eaddr[6]; + + sc->sc_dev = dev; + sc->sc_iot = sa->sc_iot; + NPE_LOCK_INIT(sc); + callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); + sc->sc_debug = npe_debug; + sc->sc_tickinterval = npe_tickinterval; + + sc->sc_npe = ixpnpe_attach(dev); + if (sc->sc_npe == NULL) { + error = EIO; /* XXX */ + goto out; + } + + error = npe_activate(dev); + if (error) + goto out; + + npe_getmac(sc, eaddr); + + /* NB: must be setup prior to invoking mii code */ + sc->sc_ifp = ifp = if_alloc(IFT_ETHER); + if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) { + device_printf(dev, "Cannot find my PHY.\n"); + error = ENXIO; + goto out; + } + + ifp->if_softc = sc; + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_mtu = ETHERMTU; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_start = npestart; + ifp->if_ioctl = npeioctl; + ifp->if_watchdog = npewatchdog; + ifp->if_init = npeinit; + IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1); + ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; + IFQ_SET_READY(&ifp->if_snd); + ifp->if_timer = 0; + ifp->if_linkmib = &sc->mibdata; + ifp->if_linkmiblen = sizeof(sc->mibdata); + sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS; +#ifdef DEVICE_POLLING + ifp->if_capabilities |= IFCAP_POLLING; +#endif + + SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", + CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); + SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval", + CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency"); + + ether_ifattach(ifp, eaddr); + return 0; +out: + npe_deactivate(dev); + if (ifp != NULL) + if_free(ifp); + return error; +} + +static int +npe_detach(device_t dev) +{ + struct npe_softc *sc = device_get_softc(dev); + struct ifnet *ifp = sc->sc_ifp; + +#ifdef DEVICE_POLLING + if (ifp->if_capenable & IFCAP_POLLING) + ether_poll_deregister(ifp); +#endif + npestop(sc); + if (ifp != NULL) { + ether_ifdetach(ifp); + if_free(ifp); + } + NPE_LOCK_DESTROY(sc); + npe_deactivate(dev); + if (sc->sc_npe != NULL) + ixpnpe_detach(sc->sc_npe); + return 0; +} + +/* + * Compute and install the multicast filter. + */ +static void +npe_setmcast(struct npe_softc *sc) +{ + struct ifnet *ifp = sc->sc_ifp; + uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN]; + int i; + + if (ifp->if_flags & IFF_PROMISC) { + memset(mask, 0, ETHER_ADDR_LEN); + memset(addr, 0, ETHER_ADDR_LEN); + } else if (ifp->if_flags & IFF_ALLMULTI) { + static const uint8_t allmulti[ETHER_ADDR_LEN] = + { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; + memcpy(mask, allmulti, ETHER_ADDR_LEN); + memcpy(addr, allmulti, ETHER_ADDR_LEN); + } else { + uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN]; + struct ifmultiaddr *ifma; + const uint8_t *mac; + + memset(clr, 0, ETHER_ADDR_LEN); + memset(set, 0xff, ETHER_ADDR_LEN); + + IF_ADDR_LOCK(ifp); + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); + for (i = 0; i < ETHER_ADDR_LEN; i++) { + clr[i] |= mac[i]; + set[i] &= mac[i]; + } + } + IF_ADDR_UNLOCK(ifp); + + for (i = 0; i < ETHER_ADDR_LEN; i++) { + mask[i] = set[i] | ~clr[i]; + addr[i] = set[i]; + } + } + + /* + * Write the mask and address registers. + */ + for (i = 0; i < ETHER_ADDR_LEN; i++) { + WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]); + WR4(sc, NPE_MAC_ADDR(i), addr[i]); + } +} + +static void +npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + struct npe_softc *sc; + + if (error != 0) + return; + sc = (struct npe_softc *)arg; + sc->buf_phys = segs[0].ds_addr; +} + +static int +npe_dma_setup(struct npe_softc *sc, struct npedma *dma, + const char *name, int nbuf, int maxseg) +{ + int error, i; + + memset(dma, 0, sizeof(dma)); + + dma->name = name; + dma->nbuf = nbuf; + + /* DMA tag for mapped mbufs */ + error = bus_dma_tag_create(NULL, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + MCLBYTES, maxseg, MCLBYTES, 0, + busdma_lock_mutex, &sc->sc_mtx, &dma->mtag); + if (error != 0) { + device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, " + "error %u\n", dma->name, error); + return error; + } + + /* DMA tag and map for the NPE buffers */ + error = bus_dma_tag_create(NULL, sizeof(uint32_t), 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + nbuf * sizeof(struct npehwbuf), 1, + nbuf * sizeof(struct npehwbuf), 0, + busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag); + if (error != 0) { + device_printf(sc->sc_dev, + "unable to create %s npebuf dma tag, error %u\n", + dma->name, error); + return error; + } + /* XXX COHERENT for now */ + if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf, + BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, + &dma->buf_map) != 0) { + device_printf(sc->sc_dev, + "unable to allocate memory for %s h/w buffers, error %u\n", + dma->name, error); + return error; + } + /* XXX M_TEMP */ + dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO); + if (dma->buf == NULL) { + device_printf(sc->sc_dev, + "unable to allocate memory for %s s/w buffers\n", + dma->name); + return error; + } + if (bus_dmamap_load(dma->buf_tag, dma->buf_map, + dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) { + device_printf(sc->sc_dev, + "unable to map memory for %s h/w buffers, error %u\n", + dma->name, error); + return error; + } + dma->buf_phys = sc->buf_phys; + for (i = 0; i < dma->nbuf; i++) { + struct npebuf *npe = &dma->buf[i]; + struct npehwbuf *hw = &dma->hwbuf[i]; + + /* calculate offset to shared area */ + npe->ix_neaddr = dma->buf_phys + + ((uintptr_t)hw - (uintptr_t)dma->hwbuf); + KASSERT((npe->ix_neaddr & 0x1f) == 0, + ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr)); + error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT, + &npe->ix_map); + if (error != 0) { + device_printf(sc->sc_dev, + "unable to create dmamap for %s buffer %u, " + "error %u\n", dma->name, i, error); + return error; + } + npe->ix_hw = hw; + } + bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE); + return 0; +} + +static void +npe_dma_destroy(struct npe_softc *sc, struct npedma *dma) +{ + int i; + + if (dma->hwbuf != NULL) { + for (i = 0; i < dma->nbuf; i++) { + struct npebuf *npe = &dma->buf[i]; + bus_dmamap_destroy(dma->mtag, npe->ix_map); + } + bus_dmamap_unload(dma->buf_tag, dma->buf_map); + bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map); + bus_dmamap_destroy(dma->buf_tag, dma->buf_map); + } + if (dma->buf != NULL) + free(dma->buf, M_TEMP); + if (dma->buf_tag) + bus_dma_tag_destroy(dma->buf_tag); + if (dma->mtag) + bus_dma_tag_destroy(dma->mtag); + memset(dma, 0, sizeof(*dma)); +} + +static int +npe_activate(device_t dev) +{ + struct npe_softc * sc = device_get_softc(dev); + int unit = device_get_unit(dev); + int error, i; + + /* load NPE firmware and start it running */ + error = ixpnpe_init(sc->sc_npe, "npe_fw", npeconfig[unit].imageid); + if (error != 0) + return error; + + if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase, + npeconfig[unit].regsize, 0, &sc->sc_ioh)) { + device_printf(dev, "Cannot map registers 0x%x:0x%x\n", + npeconfig[unit].regbase, npeconfig[unit].regsize); + return ENOMEM; + } + + if (npeconfig[unit].miibase != npeconfig[unit].regbase) { + /* + * The PHY's are only accessible from one MAC (it appears) + * so for other MAC's setup an additional mapping for + * frobbing the PHY registers. + */ + if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase, + npeconfig[unit].miisize, 0, &sc->sc_miih)) { + device_printf(dev, + "Cannot map MII registers 0x%x:0x%x\n", + npeconfig[unit].miibase, npeconfig[unit].miisize); + return ENOMEM; + } + } else + sc->sc_miih = sc->sc_ioh; + error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG); + if (error != 0) + return error; + error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1); + if (error != 0) + return error; + + /* setup statistics block */ + error = bus_dma_tag_create(NULL, sizeof(uint32_t), 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + sizeof(struct npestats), 1, sizeof(struct npestats), 0, + busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag); + if (error != 0) { + device_printf(sc->sc_dev, "unable to create stats tag, " + "error %u\n", error); + return error; + } + if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats, + BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) { + device_printf(sc->sc_dev, + "unable to allocate memory for stats block, error %u\n", + error); + return error; + } + if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map, + sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) { + device_printf(sc->sc_dev, + "unable to load memory for stats block, error %u\n", + error); + return error; + } + sc->sc_stats_phys = sc->buf_phys; + + /* XXX disable half-bridge LEARNING+FILTERING feature */ + + /* + * Setup h/w rx/tx queues. There are four q's: + * rx inbound q of rx'd frames + * rx_free pool of ixpbuf's for receiving frames + * tx outbound q of frames to send + * tx_done q of tx frames that have been processed + * + * The NPE handles the actual tx/rx process and the q manager + * handles the queues. The driver just writes entries to the + * q manager mailbox's and gets callbacks when there are rx'd + * frames to process or tx'd frames to reap. These callbacks + * are controlled by the q configurations; e.g. we get a + * callback when tx_done has 2 or more frames to process and + * when the rx q has at least one frame. These setings can + * changed at the time the q is configured. + */ + sc->rx_qid = npeconfig[unit].rx_qid; + ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1, + IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc); + sc->rx_freeqid = npeconfig[unit].rx_freeqid; + ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc); + /* tell the NPE to direct all traffic to rx_qid */ +#if 0 + for (i = 0; i < 8; i++) +#else +device_printf(sc->sc_dev, "remember to fix rx q setup\n"); + for (i = 0; i < 4; i++) +#endif + npe_setrxqosentry(sc, i, 0, sc->rx_qid); + + sc->tx_qid = npeconfig[unit].tx_qid; + sc->tx_doneqid = npeconfig[unit].tx_doneqid; + ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc); + if (tx_doneqid == -1) { + ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2, + IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc); + tx_doneqid = sc->tx_doneqid; + } + + KASSERT(npes[npeconfig[unit].npeid] == NULL, + ("npe %u already setup", npeconfig[unit].npeid)); + npes[npeconfig[unit].npeid] = sc; + + return 0; +} + +static void +npe_deactivate(device_t dev) +{ + struct npe_softc *sc = device_get_softc(dev); + int unit = device_get_unit(dev); + + npes[npeconfig[unit].npeid] = NULL; + + /* XXX disable q's */ + if (sc->sc_npe != NULL) + ixpnpe_stop(sc->sc_npe); + if (sc->sc_stats != NULL) { + bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map); + bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats, + sc->sc_stats_map); + bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map); + } + if (sc->sc_stats_tag != NULL) + bus_dma_tag_destroy(sc->sc_stats_tag); + npe_dma_destroy(sc, &sc->txdma); + npe_dma_destroy(sc, &sc->rxdma); + bus_generic_detach(sc->sc_dev); + if (sc->sc_mii) + device_delete_child(sc->sc_dev, sc->sc_mii); +#if 0 + /* XXX sc_ioh and sc_miih */ + if (sc->mem_res) + bus_release_resource(dev, SYS_RES_IOPORT, + rman_get_rid(sc->mem_res), sc->mem_res); + sc->mem_res = 0; +#endif +} + +/* + * Change media according to request. + */ +static int +npe_ifmedia_update(struct ifnet *ifp) +{ + struct npe_softc *sc = ifp->if_softc; + struct mii_data *mii; + + mii = device_get_softc(sc->sc_mii); + NPE_LOCK(sc); + mii_mediachg(mii); + /* XXX push state ourself? */ + NPE_UNLOCK(sc); + return (0); +} + +/* + * Notify the world which media we're using. + */ +static void +npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct npe_softc *sc = ifp->if_softc; + struct mii_data *mii; + + mii = device_get_softc(sc->sc_mii); + NPE_LOCK(sc); + mii_pollstat(mii); + ifmr->ifm_active = mii->mii_media_active; + ifmr->ifm_status = mii->mii_media_status; + NPE_UNLOCK(sc); +} + +static void +npe_addstats(struct npe_softc *sc) +{ +#define MIBADD(x) sc->mibdata.x += be32toh(ns->x) + struct ifnet *ifp = sc->sc_ifp; + struct npestats *ns = sc->sc_stats; + + MIBADD(dot3StatsAlignmentErrors); + MIBADD(dot3StatsFCSErrors); + MIBADD(dot3StatsSingleCollisionFrames); + MIBADD(dot3StatsMultipleCollisionFrames); + MIBADD(dot3StatsDeferredTransmissions); + MIBADD(dot3StatsLateCollisions); + MIBADD(dot3StatsExcessiveCollisions); + MIBADD(dot3StatsInternalMacTransmitErrors); + MIBADD(dot3StatsCarrierSenseErrors); + sc->mibdata.dot3StatsFrameTooLongs += + be32toh(ns->RxLargeFramesDiscards) + + be32toh(ns->TxLargeFrameDiscards); + MIBADD(dot3StatsInternalMacReceiveErrors); + sc->mibdata.dot3StatsMissedFrames += + be32toh(ns->RxOverrunDiscards) + + be32toh(ns->RxUnderflowEntryDiscards); + + ifp->if_oerrors += + be32toh(ns->dot3StatsInternalMacTransmitErrors) + + be32toh(ns->dot3StatsCarrierSenseErrors) + + be32toh(ns->TxVLANIdFilterDiscards) + ; + ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors) + + be32toh(ns->dot3StatsInternalMacReceiveErrors) + + be32toh(ns->RxOverrunDiscards) + + be32toh(ns->RxUnderflowEntryDiscards) + ; + ifp->if_collisions += + be32toh(ns->dot3StatsSingleCollisionFrames) + + be32toh(ns->dot3StatsMultipleCollisionFrames) + ; +#undef MIBADD +} + +static void +npe_tick(void *xsc) +{ +#define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL) + struct npe_softc *sc = xsc; + struct mii_data *mii = device_get_softc(sc->sc_mii); + uint32_t msg[2]; + + NPE_ASSERT_LOCKED(sc); + + /* + * NB: to avoid sleeping with the softc lock held we + * split the NPE msg processing into two parts. The + * request for statistics is sent w/o waiting for a + * reply and then on the next tick we retrieve the + * results. This works because npe_tick is the only + * code that talks via the mailbox's (except at setup). + * This likely can be handled better. + */ + if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) { + bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map, + BUS_DMASYNC_POSTREAD); + npe_addstats(sc); + } + npe_updatestats(sc); + mii_tick(mii); + + /* schedule next poll */ + callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc); +#undef ACK +} + +static void +npe_setmac(struct npe_softc *sc, u_char *eaddr) +{ + WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]); + WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]); + WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]); + WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]); + WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]); + WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]); + +} + +static void +npe_getmac(struct npe_softc *sc, u_char *eaddr) +{ + /* NB: the unicast address appears to be loaded from EEPROM on reset */ + eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff; + eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff; + eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff; + eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff; + eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff; + eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff; +} + +struct txdone { + struct npebuf *head; + struct npebuf **tail; + int count; +}; + +static __inline void +npe_txdone_finish(struct npe_softc *sc, const struct txdone *td) +{ + struct ifnet *ifp = sc->sc_ifp; + + NPE_LOCK(sc); + *td->tail = sc->tx_free; + sc->tx_free = td->head; + /* + * We're no longer busy, so clear the busy flag and call the + * start routine to xmit more packets. + */ + ifp->if_opackets += td->count; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + ifp->if_timer = 0; + npestart_locked(ifp); + NPE_UNLOCK(sc); +} + +/* + * Q manager callback on tx done queue. Reap mbufs + * and return tx buffers to the free list. Finally + * restart output. Note the microcode has only one + * txdone q wired into it so we must use the NPE ID + * returned with each npehwbuf to decide where to + * send buffers. + */ +static void +npe_txdone(int qid, void *arg) +{ +#define P2V(a, dma) \ + &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] + struct npe_softc *sc0 = arg; + struct npe_softc *sc; + struct npebuf *npe; + struct txdone *td, q[NPE_MAX]; + uint32_t entry; + + /* XXX no NPE-A support */ + q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0; + q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0; + /* XXX max # at a time? */ + while (ixpqmgr_qread(qid, &entry) == 0) { + DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n", + __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry)); + + sc = npes[NPE_QM_Q_NPE(entry)]; + npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma); + m_freem(npe->ix_m); + npe->ix_m = NULL; + + td = &q[NPE_QM_Q_NPE(entry)]; + *td->tail = npe; + td->tail = &npe->ix_next; + td->count++; + } + + if (q[NPE_B].count) + npe_txdone_finish(npes[NPE_B], &q[NPE_B]); + if (q[NPE_C].count) + npe_txdone_finish(npes[NPE_C], &q[NPE_C]); +#undef P2V +} + +static int +npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m) +{ + bus_dma_segment_t segs[1]; + struct npedma *dma = &sc->rxdma; + struct npehwbuf *hw; + int error, nseg; + + if (m == NULL) { + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m == NULL) + return ENOBUFS; + } + KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN, + ("ext_size %d", m->m_ext.ext_size)); + m->m_pkthdr.len = m->m_len = 1536; + /* backload payload and align ip hdr */ + m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN)); + error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m, + segs, &nseg, 0); + if (error != 0) { + m_freem(m); + return error; + } + hw = npe->ix_hw; + hw->ix_ne[0].data = htobe32(segs[0].ds_addr); + /* NB: NPE requires length be a multiple of 64 */ + /* NB: buffer length is shifted in word */ + hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16); + hw->ix_ne[0].next = 0; + npe->ix_m = m; + /* Flush the memory in the mbuf */ + bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD); + return 0; +} + +/* + * RX q processing for a specific NPE. Claim entries + * from the hardware queue and pass the frames up the + * stack. Pass the rx buffers to the free list. + */ +static void +npe_rxdone(int qid, void *arg) +{ +#define P2V(a, dma) \ + &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] + struct npe_softc *sc = arg; + struct npedma *dma = &sc->rxdma; + uint32_t entry; + + while (ixpqmgr_qread(qid, &entry) == 0) { + struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma); + struct mbuf *m; + + DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n", + __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len); + /* + * Allocate a new mbuf to replenish the rx buffer. + * If doing so fails we drop the rx'd frame so we + * can reuse the previous mbuf. When we're able to + * allocate a new mbuf dispatch the mbuf w/ rx'd + * data up the stack and replace it with the newly + * allocated one. + */ + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m != NULL) { + struct mbuf *mrx = npe->ix_m; + struct npehwbuf *hw = npe->ix_hw; + struct ifnet *ifp = sc->sc_ifp; + + /* Flush mbuf memory for rx'd data */ + bus_dmamap_sync(dma->mtag, npe->ix_map, + BUS_DMASYNC_POSTREAD); + + /* XXX flush hw buffer; works now 'cuz coherent */ + /* set m_len etc. per rx frame size */ + mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff; + mrx->m_pkthdr.len = mrx->m_len; + mrx->m_pkthdr.rcvif = ifp; + mrx->m_flags |= M_HASFCS; + + ifp->if_ipackets++; + ifp->if_input(ifp, mrx); + } else { + /* discard frame and re-use mbuf */ + m = npe->ix_m; + } + if (npe_rxbuf_init(sc, npe, m) == 0) { + /* return npe buf to rx free list */ + ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); + } else { + /* XXX should not happen */ + } + } +#undef P2V +} + +#ifdef DEVICE_POLLING +static void +npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) +{ + struct npe_softc *sc = ifp->if_softc; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + npe_rxdone(sc->rx_qid, sc); + npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */ + } +} +#endif /* DEVICE_POLLING */ + +static void +npe_startxmit(struct npe_softc *sc) +{ + struct npedma *dma = &sc->txdma; + int i; + + NPE_ASSERT_LOCKED(sc); + sc->tx_free = NULL; + for (i = 0; i < dma->nbuf; i++) { + struct npebuf *npe = &dma->buf[i]; + if (npe->ix_m != NULL) { + /* NB: should not happen */ + device_printf(sc->sc_dev, + "%s: free mbuf at entry %u\n", __func__, i); + m_freem(npe->ix_m); + } + npe->ix_m = NULL; + npe->ix_next = sc->tx_free; + sc->tx_free = npe; + } +} + +static void +npe_startrecv(struct npe_softc *sc) +{ + struct npedma *dma = &sc->rxdma; + struct npebuf *npe; + int i; + + NPE_ASSERT_LOCKED(sc); + for (i = 0; i < dma->nbuf; i++) { + npe = &dma->buf[i]; + npe_rxbuf_init(sc, npe, npe->ix_m); + /* set npe buf on rx free list */ + ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); + } +} + +/* + * Reset and initialize the chip + */ +static void +npeinit_locked(void *xsc) +{ + struct npe_softc *sc = xsc; + struct ifnet *ifp = sc->sc_ifp; + + NPE_ASSERT_LOCKED(sc); +if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/ + + /* + * Reset MAC core. + */ + WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); + DELAY(NPE_MAC_RESET_DELAY); + /* configure MAC to generate MDC clock */ + WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); + + /* disable transmitter and reciver in the MAC */ + WR4(sc, NPE_MAC_RX_CNTRL1, + RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); + WR4(sc, NPE_MAC_TX_CNTRL1, + RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); + + /* + * Set the MAC core registers. + */ + WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */ + WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */ + WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */ + /* thresholds determined by NPE firmware FS */ + WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12); + WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30); + WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */ + WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */ + WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/ + WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */ + WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */ + WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */ + + WR4(sc, NPE_MAC_TX_CNTRL1, + NPE_TX_CNTRL1_RETRY /* retry failed xmits */ + | NPE_TX_CNTRL1_FCS_EN /* append FCS */ + | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */ + | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */ + /* XXX pad strip? */ + WR4(sc, NPE_MAC_RX_CNTRL1, + NPE_RX_CNTRL1_CRC_EN /* include CRC/FCS */ + | NPE_RX_CNTRL1_PAUSE_EN); /* ena pause frame handling */ + WR4(sc, NPE_MAC_RX_CNTRL2, 0); + + npe_setmac(sc, IF_LLADDR(ifp)); + npe_setmcast(sc); + + npe_startxmit(sc); + npe_startrecv(sc); + + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + ifp->if_timer = 0; /* just in case */ + + /* enable transmitter and reciver in the MAC */ + WR4(sc, NPE_MAC_RX_CNTRL1, + RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN); + WR4(sc, NPE_MAC_TX_CNTRL1, + RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN); + + callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc); +} + +static void +npeinit(void *xsc) +{ + struct npe_softc *sc = xsc; + NPE_LOCK(sc); + npeinit_locked(sc); + NPE_UNLOCK(sc); +} + +/* + * Defragment an mbuf chain, returning at most maxfrags separate + * mbufs+clusters. If this is not possible NULL is returned and + * the original mbuf chain is left in it's present (potentially + * modified) state. We use two techniques: collapsing consecutive + * mbufs and replacing consecutive mbufs by a cluster. + */ +static struct mbuf * +npe_defrag(struct mbuf *m0, int how, int maxfrags) +{ + struct mbuf *m, *n, *n2, **prev; + u_int curfrags; + + /* + * Calculate the current number of frags. + */ + curfrags = 0; + for (m = m0; m != NULL; m = m->m_next) + curfrags++; + /* + * First, try to collapse mbufs. Note that we always collapse + * towards the front so we don't need to deal with moving the + * pkthdr. This may be suboptimal if the first mbuf has much + * less data than the following. + */ + m = m0; +again: + for (;;) { + n = m->m_next; + if (n == NULL) + break; + if ((m->m_flags & M_RDONLY) == 0 && + n->m_len < M_TRAILINGSPACE(m)) { + bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, + n->m_len); + m->m_len += n->m_len; + m->m_next = n->m_next; + m_free(n); + if (--curfrags <= maxfrags) + return m0; + } else + m = n; + } + KASSERT(maxfrags > 1, + ("maxfrags %u, but normal collapse failed", maxfrags)); + /* + * Collapse consecutive mbufs to a cluster. + */ + prev = &m0->m_next; /* NB: not the first mbuf */ + while ((n = *prev) != NULL) { + if ((n2 = n->m_next) != NULL && + n->m_len + n2->m_len < MCLBYTES) { + m = m_getcl(how, MT_DATA, 0); + if (m == NULL) + goto bad; + bcopy(mtod(n, void *), mtod(m, void *), n->m_len); + bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, + n2->m_len); + m->m_len = n->m_len + n2->m_len; + m->m_next = n2->m_next; + *prev = m; + m_free(n); + m_free(n2); + if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ + return m0; + /* + * Still not there, try the normal collapse + * again before we allocate another cluster. + */ + goto again; + } + prev = &n->m_next; + } + /* + * No place where we can collapse to a cluster; punt. + * This can occur if, for example, you request 2 frags + * but the packet requires that both be clusters (we + * never reallocate the first mbuf to avoid moving the + * packet header). + */ +bad: + return NULL; +} + +/* + * Dequeue packets and place on the h/w transmit queue. + */ +static void +npestart_locked(struct ifnet *ifp) +{ + struct npe_softc *sc = ifp->if_softc; + struct npebuf *npe; + struct npehwbuf *hw; + struct mbuf *m, *n; + struct npedma *dma = &sc->txdma; + bus_dma_segment_t segs[NPE_MAXSEG]; + int nseg, len, error, i; + uint32_t next; + + NPE_ASSERT_LOCKED(sc); + /* XXX can this happen? */ + if (ifp->if_drv_flags & IFF_DRV_OACTIVE) + return; + + while (sc->tx_free != NULL) { + IFQ_DRV_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) { + /* XXX? */ + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + return; + } + npe = sc->tx_free; + error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, + m, segs, &nseg, 0); + if (error == EFBIG) { + n = npe_defrag(m, M_DONTWAIT, NPE_MAXSEG); + if (n == NULL) { + if_printf(ifp, "%s: too many fragments %u\n", + __func__, nseg); + m_freem(m); + return; /* XXX? */ + } + m = n; + error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, + m, segs, &nseg, 0); + } + if (error != 0 || nseg == 0) { + if_printf(ifp, "%s: error %u nseg %u\n", + __func__, error, nseg); + m_freem(m); + return; /* XXX? */ + } + sc->tx_free = npe->ix_next; + + bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE); + + /* + * Tap off here if there is a bpf listener. + */ + BPF_MTAP(ifp, m); + + npe->ix_m = m; + hw = npe->ix_hw; + len = m->m_pkthdr.len; + next = npe->ix_neaddr + sizeof(hw->ix_ne[0]); + for (i = 0; i < nseg; i++) { + hw->ix_ne[i].data = htobe32(segs[i].ds_addr); + hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len); + hw->ix_ne[i].next = htobe32(next); + + len = 0; /* zero for segments > 1 */ + next += sizeof(hw->ix_ne[0]); + } + hw->ix_ne[i-1].next = 0; /* zero last in chain */ + /* XXX flush descriptor instead of using uncached memory */ + + DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n", + __func__, sc->tx_qid, npe->ix_neaddr, + hw->ix_ne[0].data, hw->ix_ne[0].len); + /* stick it on the tx q */ + /* XXX add vlan priority */ + ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr); + + ifp->if_timer = 5; + } + if (sc->tx_free == NULL) + ifp->if_drv_flags |= IFF_DRV_OACTIVE; +} + +void +npestart(struct ifnet *ifp) +{ + struct npe_softc *sc = ifp->if_softc; + NPE_LOCK(sc); + npestart_locked(ifp); + NPE_UNLOCK(sc); +} + +static void +npe_stopxmit(struct npe_softc *sc) +{ + struct npedma *dma = &sc->txdma; + int i; + + NPE_ASSERT_LOCKED(sc); + + /* XXX qmgr */ + for (i = 0; i < dma->nbuf; i++) { + struct npebuf *npe = &dma->buf[i]; + + if (npe->ix_m != NULL) { + bus_dmamap_unload(dma->mtag, npe->ix_map); + m_freem(npe->ix_m); + npe->ix_m = NULL; + } + } +} + +static void +npe_stoprecv(struct npe_softc *sc) +{ + struct npedma *dma = &sc->rxdma; + int i; + + NPE_ASSERT_LOCKED(sc); + + /* XXX qmgr */ + for (i = 0; i < dma->nbuf; i++) { + struct npebuf *npe = &dma->buf[i]; + + if (npe->ix_m != NULL) { + bus_dmamap_unload(dma->mtag, npe->ix_map); + m_freem(npe->ix_m); + npe->ix_m = NULL; + } + } +} + +/* + * Turn off interrupts, and stop the nic. + */ +void +npestop(struct npe_softc *sc) +{ + struct ifnet *ifp = sc->sc_ifp; + + /* disable transmitter and reciver in the MAC */ + WR4(sc, NPE_MAC_RX_CNTRL1, + RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); + WR4(sc, NPE_MAC_TX_CNTRL1, + RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); + + ifp->if_timer = 0; + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + callout_stop(&sc->tick_ch); + + npe_stopxmit(sc); + npe_stoprecv(sc); + /* XXX go into loopback & drain q's? */ + /* XXX but beware of disabling tx above */ + + /* + * The MAC core rx/tx disable may leave the MAC hardware in an + * unpredictable state. A hw reset is executed before resetting + * all the MAC parameters to a known value. + */ + WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); + DELAY(NPE_MAC_RESET_DELAY); + WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT); + WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); +} + +void +npewatchdog(struct ifnet *ifp) +{ + struct npe_softc *sc = ifp->if_softc; + + NPE_LOCK(sc); + if_printf(ifp, "device timeout\n"); + ifp->if_oerrors++; + npeinit_locked(sc); + NPE_UNLOCK(sc); +} + +static int +npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct npe_softc *sc = ifp->if_softc; + struct mii_data *mii; + struct ifreq *ifr = (struct ifreq *)data; + int error = 0; +#ifdef DEVICE_POLLING + int mask; +#endif + + switch (cmd) { + case SIOCSIFFLAGS: + NPE_LOCK(sc); + if ((ifp->if_flags & IFF_UP) == 0 && + ifp->if_drv_flags & IFF_DRV_RUNNING) { + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + npestop(sc); + } else { + /* reinitialize card on any parameter change */ + npeinit_locked(sc); + } + NPE_UNLOCK(sc); + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + /* update multicast filter list. */ + NPE_LOCK(sc); + npe_setmcast(sc); + NPE_UNLOCK(sc); + error = 0; + break; + + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + mii = device_get_softc(sc->sc_mii); + error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); + break; + +#ifdef DEVICE_POLLING + case SIOCSIFCAP: + mask = ifp->if_capenable ^ ifr->ifr_reqcap; + if (mask & IFCAP_POLLING) { + if (ifr->ifr_reqcap & IFCAP_POLLING) { + error = ether_poll_register(npe_poll, ifp); + if (error) + return error; + NPE_LOCK(sc); + /* disable callbacks XXX txdone is shared */ + ixpqmgr_notify_disable(sc->rx_qid); + ixpqmgr_notify_disable(sc->tx_doneqid); + ifp->if_capenable |= IFCAP_POLLING; + NPE_UNLOCK(sc); + } else { + error = ether_poll_deregister(ifp); + /* NB: always enable qmgr callbacks */ + NPE_LOCK(sc); + /* enable qmgr callbacks */ + ixpqmgr_notify_enable(sc->rx_qid, + IX_QMGR_Q_SOURCE_ID_NOT_E); + ixpqmgr_notify_enable(sc->tx_doneqid, + IX_QMGR_Q_SOURCE_ID_NOT_E); + ifp->if_capenable &= ~IFCAP_POLLING; + NPE_UNLOCK(sc); + } + } + break; +#endif + default: + error = ether_ioctl(ifp, cmd, data); + break; + } + return error; +} + +/* + * Setup a traffic class -> rx queue mapping. + */ +static int +npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid) +{ + int npeid = npeconfig[device_get_unit(sc->sc_dev)].npeid; + uint32_t msg[2]; + + msg[0] = (NPE_SETRXQOSENTRY << 24) | (npeid << 20) | classix; + msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4); + return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); +} + +/* + * Update and reset the statistics in the NPE. + */ +static int +npe_updatestats(struct npe_softc *sc) +{ + uint32_t msg[2]; + + msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL; + msg[1] = sc->sc_stats_phys; /* physical address of stat block */ + return ixpnpe_sendmsg(sc->sc_npe, msg); /* NB: no recv */ +} + +#if 0 +/* + * Get the current statistics block. + */ +static int +npe_getstats(struct npe_softc *sc) +{ + uint32_t msg[2]; + + msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL; + msg[1] = sc->sc_stats_phys; /* physical address of stat block */ + return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); +} + +/* + * Query the image id of the loaded firmware. + */ +static uint32_t +npe_getimageid(struct npe_softc *sc) +{ + uint32_t msg[2]; + + msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL; + msg[1] = 0; + return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0; +} + +/* + * Enable/disable loopback. + */ +static int +npe_setloopback(struct npe_softc *sc, int ena) +{ + uint32_t msg[2]; + + msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0); + msg[1] = 0; + return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); +} +#endif + +static void +npe_child_detached(device_t dev, device_t child) +{ + struct npe_softc *sc; + + sc = device_get_softc(dev); + if (child == sc->sc_mii) + sc->sc_mii = NULL; +} + +/* + * MII bus support routines. + * + * NB: ixp425 has one PHY per NPE + */ +static uint32_t +npe_mii_mdio_read(struct npe_softc *sc, int reg) +{ +#define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg) + uint32_t v; + + /* NB: registers are known to be sequential */ + v = (MII_RD4(sc, reg+0) & 0xff) << 0; + v |= (MII_RD4(sc, reg+4) & 0xff) << 8; + v |= (MII_RD4(sc, reg+8) & 0xff) << 16; + v |= (MII_RD4(sc, reg+12) & 0xff) << 24; + return v; +#undef MII_RD4 +} + +static void +npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd) +{ +#define MII_WR4(sc, reg, v) \ + bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v) + + /* NB: registers are known to be sequential */ + MII_WR4(sc, reg+0, cmd & 0xff); + MII_WR4(sc, reg+4, (cmd >> 8) & 0xff); + MII_WR4(sc, reg+8, (cmd >> 16) & 0xff); + MII_WR4(sc, reg+12, (cmd >> 24) & 0xff); +#undef MII_WR4 +} + +static int +npe_mii_mdio_wait(struct npe_softc *sc) +{ +#define MAXTRIES 100 /* XXX */ + uint32_t v; + int i; + + for (i = 0; i < MAXTRIES; i++) { + v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD); + if ((v & NPE_MII_GO) == 0) + return 1; + } + return 0; /* NB: timeout */ +#undef MAXTRIES +} + +static int +npe_miibus_readreg(device_t dev, int phy, int reg) +{ + struct npe_softc *sc = device_get_softc(dev); + uint32_t v; + + if (phy != device_get_unit(dev)) /* XXX */ + return 0xffff; + v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) + | NPE_MII_GO; + npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); + if (npe_mii_mdio_wait(sc)) + v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS); + else + v = 0xffff | NPE_MII_READ_FAIL; + return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff); +#undef MAXTRIES +} + +static void +npe_miibus_writereg(device_t dev, int phy, int reg, int data) +{ + struct npe_softc *sc = device_get_softc(dev); + uint32_t v; + + if (phy != device_get_unit(dev)) /* XXX */ + return; + v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) + | data | NPE_MII_WRITE + | NPE_MII_GO; + npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); + /* XXX complain about timeout */ + (void) npe_mii_mdio_wait(sc); +} + +static void +npe_miibus_statchg(device_t dev) +{ + struct npe_softc *sc = device_get_softc(dev); + struct mii_data *mii = device_get_softc(sc->sc_mii); + uint32_t tx1, rx1; + + /* sync MAC duplex state */ + tx1 = RD4(sc, NPE_MAC_TX_CNTRL1); + rx1 = RD4(sc, NPE_MAC_RX_CNTRL1); + if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { + tx1 &= ~NPE_TX_CNTRL1_DUPLEX; + rx1 |= NPE_RX_CNTRL1_PAUSE_EN; + } else { + tx1 |= NPE_TX_CNTRL1_DUPLEX; + rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN; + } + WR4(sc, NPE_MAC_RX_CNTRL1, rx1); + WR4(sc, NPE_MAC_TX_CNTRL1, tx1); +} + +static device_method_t npe_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, npe_probe), + DEVMETHOD(device_attach, npe_attach), + DEVMETHOD(device_detach, npe_detach), + + /* Bus interface */ + DEVMETHOD(bus_child_detached, npe_child_detached), + + /* MII interface */ + DEVMETHOD(miibus_readreg, npe_miibus_readreg), + DEVMETHOD(miibus_writereg, npe_miibus_writereg), + DEVMETHOD(miibus_statchg, npe_miibus_statchg), + + { 0, 0 } +}; + +static driver_t npe_driver = { + "npe", + npe_methods, + sizeof(struct npe_softc), +}; + +DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0); +DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0); +MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1); +MODULE_DEPEND(npe, miibus, 1, 1, 1); +MODULE_DEPEND(npe, ether, 1, 1, 1); diff --git a/sys/arm/xscale/ixp425/if_npereg.h b/sys/arm/xscale/ixp425/if_npereg.h new file mode 100644 index 000000000000..d41905d8840b --- /dev/null +++ b/sys/arm/xscale/ixp425/if_npereg.h @@ -0,0 +1,288 @@ +/*- + * Copyright (c) 2006 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ + +/* + * Copyright (c) 2001-2005, Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef ARM_XSCALE_IF_NPEREG_H +#define ARM_XSCALE_IF_NPEREG_H + +/* + * NPE/NPE tx/rx descriptor format. This is just the area + * shared with ucode running in the NPE; the driver-specific + * state is defined in the driver. The shared area must be + * cacheline-aligned. We allocate NPE_MAXSEG "descriptors" + * per buffer; this allows us to do minimal s/g. The number + * of descriptors can be expanded but doing so uses memory + * so should be done with care. + * + * The driver sets up buffers in uncached memory. + */ +#define NPE_MAXSEG 3 /* empirically selected */ + +struct npehwbuf { + struct { /* NPE shared area, cacheline aligned */ + uint32_t next; /* phys addr of next segment */ + uint32_t len; /* buffer/segment length (bytes) */ + uint32_t data; /* phys addr of data segment */ + uint32_t pad[5]; /* pad to cacheline */ + } ix_ne[NPE_MAXSEG]; +}; + +/* NPE ID's */ +#define NPE_A 0 +#define NPE_B 1 +#define NPE_C 2 +#define NPE_MAX (NPE_C+1) + +#define NPE_PORTS_MAX 2 /* logical ports */ +#define NPE_FRAME_SIZE_DEFAULT 1536 +#define NPE_FRAME_SIZE_MAX (65536-64) +#define NPE_FRAME_SIZE_MIN 64 + +/* + * Queue Manager-related definitions. + * + * These define the layout of 32-bit Q entries passed + * between the host cpu and the NPE's. + */ +#define NPE_QM_Q_NPE(e) (((e)>>0)&0x3) /* NPE ID */ +#define NPE_QM_Q_PORT(e) (((e)>>3)&0x1) /* Port ID */ +#define NPE_QM_Q_PRIO(e) (((e)>>0)&0x3) /* 802.1d priority */ +#define NPE_QM_Q_ADDR(e) ((e)&0xfffffffe0) /* phys address */ + +/* + * Host->NPE requests written to the shared mailbox. + * The NPE writes the same value back as an ACK. + */ +#define NPE_GETSTATUS 0x00 /* get firmware revision */ +#define NPE_SETPORTADDRESS 0x01 /* set port id and mac address */ +#define NPE_GETMACADDRDB 0x02 /* upload filter database */ +#define NPE_SETMACADDRDB 0x03 /* download filter database */ +#define NPE_GETSTATS 0x04 /* get statistics */ +#define NPE_RESETSTATS 0x05 /* reset stats + return result */ +#define NPE_SETMAXFRAME 0x06 /* configure max tx/rx frame lengths */ +#define NPE_SETRXTAGMODE 0x07 /* configure VLAN rx operating mode */ +#define NPE_SETDEFRXVID 0x08 /* set def VLAN tag + traffic class */ +#define NPE_SETRXQOSENTRY 0x0b /* map user pri -> QoS class+rx qid */ +#define NPE_SETFIREWALLMODE 0x0e /* config firewall services */ +#define NPE_SETLOOPBACK 0x12 /* enable/disable loopback */ +/* ... XXX more */ + +#define NPE_MAC_MSGID_SHL 24 +#define NPE_MAC_PORTID_SHL 16 + +/* + * MAC register definitions; see section + * 15.2 of the Intel Developers Manual. + */ +#define NPE_MAC_TX_CNTRL1 0x000 +#define NPE_MAC_TX_CNTRL2 0x004 +#define NPE_MAC_RX_CNTRL1 0x010 +#define NPE_MAC_RX_CNTRL2 0x014 +#define NPE_MAC_RANDOM_SEED 0x020 +#define NPE_MAC_THRESH_P_EMPTY 0x030 +#define NPE_MAC_THRESH_P_FULL 0x038 +#define NPE_MAC_BUF_SIZE_TX 0x040 +#define NPE_MAC_TX_DEFER 0x050 +#define NPE_MAC_RX_DEFER 0x054 +#define NPE_MAC_TX_TWO_DEFER_1 0x060 +#define NPE_MAC_TX_TWO_DEFER_2 0x064 +#define NPE_MAC_SLOT_TIME 0x070 +#define NPE_MAC_MDIO_CMD_1 0x080 +#define NPE_MAC_MDIO_CMD_2 0x084 +#define NPE_MAC_MDIO_CMD_3 0x088 +#define NPE_MAC_MDIO_CMD_4 0x08c +#define NPE_MAC_MDIO_STS_1 0x090 +#define NPE_MAC_MDIO_STS_2 0x094 +#define NPE_MAC_MDIO_STS_3 0x098 +#define NPE_MAC_MDIO_STS_4 0x09c +#define NPE_MAC_ADDR_MASK_1 0x0A0 +#define NPE_MAC_ADDR_MASK_2 0x0A4 +#define NPE_MAC_ADDR_MASK_3 0x0A8 +#define NPE_MAC_ADDR_MASK_4 0x0AC +#define NPE_MAC_ADDR_MASK_5 0x0B0 +#define NPE_MAC_ADDR_MASK_6 0x0B4 +#define NPE_MAC_ADDR_1 0x0C0 +#define NPE_MAC_ADDR_2 0x0C4 +#define NPE_MAC_ADDR_3 0x0C8 +#define NPE_MAC_ADDR_4 0x0CC +#define NPE_MAC_ADDR_5 0x0D0 +#define NPE_MAC_ADDR_6 0x0D4 +#define NPE_MAC_INT_CLK_THRESH 0x0E0 +#define NPE_MAC_UNI_ADDR_1 0x0F0 +#define NPE_MAC_UNI_ADDR_2 0x0F4 +#define NPE_MAC_UNI_ADDR_3 0x0F8 +#define NPE_MAC_UNI_ADDR_4 0x0FC +#define NPE_MAC_UNI_ADDR_5 0x100 +#define NPE_MAC_UNI_ADDR_6 0x104 +#define NPE_MAC_CORE_CNTRL 0x1FC + +#define NPE_MAC_ADDR_MASK(i) (NPE_MAC_ADDR_MASK_1 + ((i)<<2)) +#define NPE_MAC_ADDR(i) (NPE_MAC_ADDR_1 + ((i)<<2)) +#define NPE_MAC_UNI_ADDR(i) (NPE_MAC_UNI_ADDR_1 + ((i)<<2)) + +/* + * Bit definitions + */ + +/* TX Control Register 1*/ +#define NPE_TX_CNTRL1_TX_EN 0x01 /* enable TX engine */ +#define NPE_TX_CNTRL1_DUPLEX 0x02 /* select half duplex */ +#define NPE_TX_CNTRL1_RETRY 0x04 /* auto-retry on collision */ +#define NPE_TX_CNTRL1_PAD_EN 0x08 /* pad frames <64 bytes */ +#define NPE_TX_CNTRL1_FCS_EN 0x10 /* append FCS */ +#define NPE_TX_CNTRL1_2DEFER 0x20 /* select 2-part deferral */ +#define NPE_TX_CNTRL1_RMII 0x40 + +/* TX Control Register 2 */ +#define NPE_TX_CNTRL2_RETRIES_MASK 0xf /* max retry count */ + +/* RX Control Register 1 */ +#define NPE_RX_CNTRL1_RX_EN 0x01 /* enable RX engine */ +#define NPE_RX_CNTRL1_PADSTRIP_EN 0x02 /* strip frame padding */ +#define NPE_RX_CNTRL1_CRC_EN 0x04 /* include CRC in RX frame */ +#define NPE_RX_CNTRL1_PAUSE_EN 0x08 /* detect Pause frames */ +#define NPE_RX_CNTRL1_LOOP_EN 0x10 /* loopback tx/rx */ +#define NPE_RX_CNTRL1_ADDR_FLTR_EN 0x20 /* enable address filtering */ +#define NPE_RX_CNTRL1_RX_RUNT_EN 0x40 /* enable RX of runt frames */ +#define NPE_RX_CNTRL1_BCAST_DIS 0x80 /* discard broadcast frames */ + +/* RX Control Register 2 */ +#define NPE_RX_CNTRL2_DEFER_EN 0x01 + +/* Core Control Register */ +#define NPE_CORE_RESET 0x01 /* MAC reset state */ +#define NPE_CORE_RX_FIFO_FLUSH 0x02 /* flush RX FIFO */ +#define NPE_CORE_TX_FIFO_FLUSH 0x04 /* flush TX FIFO */ +#define NPE_CORE_SEND_JAM 0x08 /* send JAM on packet RX */ +#define NPE_CORE_MDC_EN 0x10 /* IXP42X drives MDC clock */ + +/* + * Stat block returned by NPE with NPE_GETSTATS msg. + */ +struct npestats { + uint32_t dot3StatsAlignmentErrors; + uint32_t dot3StatsFCSErrors; + uint32_t dot3StatsInternalMacReceiveErrors; + uint32_t RxOverrunDiscards; + uint32_t RxLearnedEntryDiscards; + uint32_t RxLargeFramesDiscards; + uint32_t RxSTPBlockedDiscards; + uint32_t RxVLANTypeFilterDiscards; + uint32_t RxVLANIdFilterDiscards; + uint32_t RxInvalidSourceDiscards; + uint32_t RxBlackListDiscards; + uint32_t RxWhiteListDiscards; + uint32_t RxUnderflowEntryDiscards; + uint32_t dot3StatsSingleCollisionFrames; + uint32_t dot3StatsMultipleCollisionFrames; + uint32_t dot3StatsDeferredTransmissions; + uint32_t dot3StatsLateCollisions; + uint32_t dot3StatsExcessiveCollisions; + uint32_t dot3StatsInternalMacTransmitErrors; + uint32_t dot3StatsCarrierSenseErrors; + uint32_t TxLargeFrameDiscards; + uint32_t TxVLANIdFilterDiscards; +}; + +/* + * Default values + */ +#define NPE_MAC_INT_CLK_THRESH_DEFAULT 0x1 + +#define NPE_MAC_RESET_DELAY 1 + +/* This value applies to RMII */ +#define NPE_MAC_SLOT_TIME_RMII_DEFAULT 0xFF + +/* + * MII definitions - these have been verified against the LXT971 and LXT972 PHYs + */ +#define NPE_MII_REG_SHL 16 +#define NPE_MII_ADDR_SHL 21 + +/* NB: shorthands for mii bus mdio routines */ +#define NPE_MAC_MDIO_CMD NPE_MAC_MDIO_CMD_1 +#define NPE_MAC_MDIO_STS NPE_MAC_MDIO_STS_1 + +#define NPE_MII_GO (1<<31) +#define NPE_MII_WRITE (1<<26) +#define NPE_MII_TIMEOUT_10TH_SECS 5 +#define NPE_MII_10TH_SEC_IN_MILLIS 100 +#define NPE_MII_READ_FAIL (1<<31) + +#define NPE_MII_PHY_DEF_DELAY 300 /* max delay before link up, etc. */ +#define NPE_MII_PHY_NO_DELAY 0x0 /* do not delay */ +#define NPE_MII_PHY_NULL 0xff /* PHY is not present */ +#define NPE_MII_PHY_DEF_ADDR 0x0 /* default PHY's logical address */ + +/* Register definition */ +#define NPE_MII_CTRL_REG 0x0 /* Control Register */ +#define NPE_MII_STAT_REG 0x1 /* Status Register */ +#define NPE_MII_PHY_ID1_REG 0x2 /* PHY identifier 1 Register */ +#define NPE_MII_PHY_ID2_REG 0x3 /* PHY identifier 2 Register */ +#define NPE_MII_AN_ADS_REG 0x4 /* Auto-Negotiation */ + /* Advertisement Register */ +#define NPE_MII_AN_PRTN_REG 0x5 /* Auto-Negotiation */ + /* partner ability Register */ +#define NPE_MII_AN_EXP_REG 0x6 /* Auto-Negotiation */ + /* Expansion Register */ +#define NPE_MII_AN_NEXT_REG 0x7 /* Auto-Negotiation */ + /* next-page transmit Register */ +#endif /* ARM_XSCALE_IF_NPEREG_H */ diff --git a/sys/arm/xscale/ixp425/ixdp425_pci.c b/sys/arm/xscale/ixp425/ixdp425_pci.c new file mode 100644 index 000000000000..7504baa59fea --- /dev/null +++ b/sys/arm/xscale/ixp425/ixdp425_pci.c @@ -0,0 +1,169 @@ +/* $NetBSD: ixdp425_pci.c,v 1.5 2005/12/11 12:17:09 christos Exp $ */ +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#define _ARM32_BUS_DMA_PRIVATE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +void +ixp425_md_attach(device_t dev) +{ + struct ixp425_softc *sc = device_get_softc(device_get_parent(dev)); + struct ixppcib_softc *pci_sc = device_get_softc(dev); + uint32_t reg; + + + /* PCI Reset Assert */ + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPOUTR); + reg &= ~(1U << GPIO_PCI_RESET); + GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPOUTR, reg & ~(1U << GPIO_PCI_RESET)); + + /* PCI Clock Disable */ + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPCLKR); + reg &= ~GPCLKR_MUX14; + GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPCLKR, reg & ~GPCLKR_MUX14); + + /* + * set GPIO Direction + * Output: PCI_CLK, PCI_RESET + * Input: PCI_INTA, PCI_INTB, PCI_INTC, PCI_INTD + */ + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPOER); + reg &= ~(1U << GPIO_PCI_CLK); + reg &= ~(1U << GPIO_PCI_RESET); + reg |= ((1U << GPIO_PCI_INTA) | (1U << GPIO_PCI_INTB) | + (1U << GPIO_PCI_INTC) | (1U << GPIO_PCI_INTD)); + GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPOER, reg); + + /* + * Set GPIO interrupt type + * PCI_INT_A, PCI_INTB, PCI_INT_C, PCI_INT_D: Active Low + */ + reg = GPIO_CONF_READ_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTA)); + reg &= ~GPIO_TYPE(GPIO_PCI_INTA, GPIO_TYPE_MASK); + reg |= GPIO_TYPE(GPIO_PCI_INTA, GPIO_TYPE_ACT_LOW); + GPIO_CONF_WRITE_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTA), reg); + + reg = GPIO_CONF_READ_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTB)); + reg &= ~GPIO_TYPE(GPIO_PCI_INTB, GPIO_TYPE_MASK); + reg |= GPIO_TYPE(GPIO_PCI_INTB, GPIO_TYPE_ACT_LOW); + GPIO_CONF_WRITE_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTB), reg); + + reg = GPIO_CONF_READ_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTC)); + reg &= ~GPIO_TYPE(GPIO_PCI_INTC, GPIO_TYPE_MASK); + reg |= GPIO_TYPE(GPIO_PCI_INTC, GPIO_TYPE_ACT_LOW); + GPIO_CONF_WRITE_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTC), reg); + + reg = GPIO_CONF_READ_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTD)); + reg &= ~GPIO_TYPE(GPIO_PCI_INTD, GPIO_TYPE_MASK); + reg |= GPIO_TYPE(GPIO_PCI_INTD, GPIO_TYPE_ACT_LOW); + GPIO_CONF_WRITE_4(sc, GPIO_TYPE_REG(GPIO_PCI_INTD), reg); + + /* clear ISR */ + GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPISR, + (1U << GPIO_PCI_INTA) | (1U << GPIO_PCI_INTB) | + (1U << GPIO_PCI_INTC) | (1U << GPIO_PCI_INTD)); + + /* wait 1ms to satisfy "minimum reset assertion time" of the PCI spec */ + DELAY(1000); + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPCLKR); + GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPCLKR, reg | + (0xf << GPCLKR_CLK0DC_SHIFT) | (0xf << GPCLKR_CLK0TC_SHIFT)); + + /* PCI Clock Enable */ + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPCLKR); + reg |= GPCLKR_MUX14; + GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPCLKR, reg | GPCLKR_MUX14); + + /* + * wait 100us to satisfy "minimum reset assertion time from clock stable + * requirement of the PCI spec + */ + DELAY(100); + /* PCI Reset deassert */ + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPOUTR); + reg |= 1U << GPIO_PCI_RESET; + GPIO_CONF_WRITE_4(sc, IXP425_GPIO_GPOUTR, reg | (1U << GPIO_PCI_RESET)); + pci_sc->sc_irq_rman.rm_type = RMAN_ARRAY; + pci_sc->sc_irq_rman.rm_descr = "IXP425 PCI IRQs"; + CTASSERT(PCI_INT_D < PCI_INT_A); + /* XXX this overlaps the irq's setup in ixp425_attach */ + if (rman_init(&pci_sc->sc_irq_rman) != 0 || + rman_manage_region(&pci_sc->sc_irq_rman, PCI_INT_D, PCI_INT_A) != 0) + panic("ixp425_md_attach: failed to set up IRQ rman"); +} + +#define IXP425_MAX_DEV 4 +#define IXP425_MAX_LINE 4 + +int +ixp425_md_route_interrupt(device_t bridge, device_t device, int pin) +{ + static int ixp425_pci_table[IXP425_MAX_DEV][IXP425_MAX_LINE] = + { + {PCI_INT_A, PCI_INT_B, PCI_INT_C, PCI_INT_D}, + {PCI_INT_B, PCI_INT_C, PCI_INT_D, PCI_INT_A}, + {PCI_INT_C, PCI_INT_D, PCI_INT_A, PCI_INT_B}, + {PCI_INT_D, PCI_INT_A, PCI_INT_B, PCI_INT_C}, + }; + int dev; + + dev = pci_get_slot(device); + if (bootverbose) + device_printf(bridge, "routing pin %d for %s\n", pin, + device_get_nameunit(device)); + if (pin >= 1 && pin <= IXP425_MAX_LINE && + dev >= 1 && dev <= IXP425_MAX_DEV) { + return (ixp425_pci_table[dev - 1][pin - 1]); + } else + printf("ixppcib: no mapping for %d/%d/%d\n", + pci_get_bus(device), dev, pci_get_function(device)); + + return (-1); +} diff --git a/sys/arm/xscale/ixp425/ixdp425reg.h b/sys/arm/xscale/ixp425/ixdp425reg.h new file mode 100644 index 000000000000..06d7ae99f10c --- /dev/null +++ b/sys/arm/xscale/ixp425/ixdp425reg.h @@ -0,0 +1,54 @@ +/* $NetBSD: ixdp425reg.h,v 1.6 2005/12/11 12:17:09 christos Exp $ */ +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* $FreeBSD$ */ +#ifndef _IXDP425REG_H_ +#define _IXDP425REG_H_ +/* GPIOs */ +#define GPIO_PCI_CLK 14 +#define GPIO_PCI_RESET 13 +#define GPIO_PCI_INTA 11 +#define GPIO_PCI_INTB 10 +#define GPIO_PCI_INTC 9 +#define GPIO_PCI_INTD 8 +#define GPIO_I2C_SDA 7 +#define GPIO_I2C_SDA_BIT (1U << 7) +#define GPIO_I2C_SCL 6 +#define GPIO_I2C_SCL_BIT (1U << 6) +/* Interrupt */ +#define PCI_INT_A IXP425_INT_GPIO_11 +#define PCI_INT_B IXP425_INT_GPIO_10 +#define PCI_INT_C IXP425_INT_GPIO_9 +#define PCI_INT_D IXP425_INT_GPIO_8 +#endif /* _IXDP425REG_H_ */ diff --git a/sys/arm/xscale/ixp425/ixp425.c b/sys/arm/xscale/ixp425/ixp425.c new file mode 100644 index 000000000000..48ac29ccf91f --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425.c @@ -0,0 +1,369 @@ +/* $NetBSD: ixp425.c,v 1.10 2005/12/11 12:16:51 christos Exp $ */ + +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#define _ARM32_BUS_DMA_PRIVATE +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +volatile uint32_t intr_enabled; +uint32_t intr_steer = 0; + +struct ixp425_softc *ixp425_softc = NULL; + +static int ixp425_probe(device_t); +static void ixp425_identify(driver_t *, device_t); +static int ixp425_attach(device_t); + +static struct { + uint32_t hwbase; + uint32_t size; + uint32_t vbase; +} hwvtrans[] = { + { IXP425_IO_HWBASE, IXP425_IO_SIZE, IXP425_IO_VBASE }, + { IXP425_EXP_HWBASE, IXP425_EXP_SIZE, IXP425_EXP_VBASE }, + { IXP425_PCI_HWBASE, IXP425_PCI_SIZE, IXP425_PCI_VBASE }, + { IXP425_PCI_MEM_HWBASE,IXP425_PCI_MEM_SIZE, IXP425_PCI_MEM_VBASE }, +#if 0 + { IXP425_PCI_IO_HWBASE, IXP425_PCI_IO_SIZE, IXP425_PCI_IO_VBASE }, +#endif + { IXP425_MCU_HWBASE, IXP425_MCU_SIZE, IXP425_MCU_VBASE }, + { IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, IXP425_QMGR_VBASE }, + { IXP425_NPE_A_HWBASE, IXP425_NPE_A_SIZE, IXP425_NPE_A_VBASE }, + { IXP425_NPE_B_HWBASE, IXP425_NPE_B_SIZE, IXP425_NPE_B_VBASE }, + { IXP425_NPE_C_HWBASE, IXP425_NPE_C_SIZE, IXP425_NPE_C_VBASE }, + { IXP425_MAC_A_HWBASE, IXP425_MAC_A_SIZE, IXP425_MAC_A_VBASE }, + { IXP425_MAC_B_HWBASE, IXP425_MAC_B_SIZE, IXP425_MAC_B_VBASE }, + /* Gateworks Avila IDE/CF is mapped here */ + { IXP425_EXP_BUS_CS1_HWBASE, IXP425_EXP_BUS_SIZE, + IXP425_EXP_BUS_CS1_VBASE }, +}; + +static int +getvbase(uint32_t hwbase, uint32_t size, uint32_t *vbase) +{ + int i; + + for (i = 0; i < sizeof hwvtrans / sizeof *hwvtrans; i++) { + if (hwbase >= hwvtrans[i].hwbase && + hwbase + size <= hwvtrans[i].hwbase + hwvtrans[i].size) { + *vbase = hwbase - hwvtrans[i].hwbase + hwvtrans[i].vbase; + return (0); + } + } + + return (ENOENT); +} + +struct arm32_dma_range * +bus_dma_get_range(void) +{ + return (NULL); +} + +int +bus_dma_get_range_nb(void) +{ + return (0); +} + +static __inline u_int32_t +ixp425_irq2gpio_bit(int irq) +{ + + static const uint8_t int2gpio[32] __attribute__ ((aligned(32))) = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* INT#0 -> INT#5 */ + 0x00, 0x01, /* GPIO#0 -> GPIO#1 */ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* INT#8 -> INT#13 */ + 0xff, 0xff, 0xff, 0xff, 0xff, /* INT#14 -> INT#18 */ + 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* GPIO#2 -> GPIO#7 */ + 0x08, 0x09, 0x0a, 0x0b, 0x0c, /* GPIO#8 -> GPIO#12 */ + 0xff, 0xff /* INT#30 -> INT#31 */ + }; + + return (1U << int2gpio[irq]); +} + +void +arm_mask_irq(uintptr_t nb) +{ + intr_enabled &= ~(1 << nb); + ixp425_set_intrmask(); + /*XXX; If it's a GPIO interrupt, ACK it know. Can it be a problem ?*/ + if ((1 << nb) & IXP425_INT_GPIOMASK) + IXPREG(IXP425_GPIO_VBASE + IXP425_GPIO_GPISR) = + ixp425_irq2gpio_bit(nb); + + +} + +void +arm_unmask_irq(uintptr_t nb) +{ + intr_enabled |= (1 << nb); + ixp425_set_intrmask(); +} + +static __inline uint32_t +ixp425_irq_read(void) +{ + return IXPREG(IXP425_INT_STATUS) & intr_enabled; +} + +int +arm_get_next_irq(void) +{ + int irq; + + if ((irq = ixp425_irq_read())) + return (ffs(irq) - 1); + return (-1); +} + +void +cpu_reset(void) +{ + + bus_space_write_4(&ixp425_bs_tag, IXP425_TIMER_VBASE, + IXP425_OST_WDOG_KEY, OST_WDOG_KEY_MAJICK); + bus_space_write_4(&ixp425_bs_tag, IXP425_TIMER_VBASE, + IXP425_OST_WDOG, 0); + bus_space_write_4(&ixp425_bs_tag, IXP425_TIMER_VBASE, + IXP425_OST_WDOG_ENAB, OST_WDOG_ENAB_RST_ENA | + OST_WDOG_ENAB_CNT_ENA); + printf("Reset failed!\n"); + for(;;); +} + +static void +ixp425_identify(driver_t *driver, device_t parent) +{ + BUS_ADD_CHILD(parent, 0, "ixp", 0); +} + +static int +ixp425_probe(device_t dev) +{ + device_set_desc(dev, "Intel IXP425"); + return (0); +} + +static int +ixp425_attach(device_t dev) +{ + struct ixp425_softc *sc; + + sc = device_get_softc(dev); + sc->sc_iot = &ixp425_bs_tag; + KASSERT(ixp425_softc == NULL, ("ixp425_attach called twice?")); + ixp425_softc = sc; + + intr_enabled = 0; + ixp425_set_intrmask(); + ixp425_set_intrsteer(); + + sc->sc_irq_rman.rm_type = RMAN_ARRAY; + sc->sc_irq_rman.rm_descr = "IXP425 IRQs"; + if (rman_init(&sc->sc_irq_rman) != 0 || + rman_manage_region(&sc->sc_irq_rman, 0, 31) != 0) + panic("ixp425_attach: failed to set up IRQ rman"); + + sc->sc_mem_rman.rm_type = RMAN_ARRAY; + sc->sc_mem_rman.rm_descr = "IXP425 Memory"; + if (rman_init(&sc->sc_mem_rman) != 0 || + rman_manage_region(&sc->sc_mem_rman, 0, ~0) != 0) + panic("ixp425_attach: failed to set up IRQ rman"); + + device_add_child(dev, "pcib", 0); + device_add_child(dev, "ixpclk", 0); + device_add_child(dev, "ixpwdog", 0); + device_add_child(dev, "ixpiic", 0); + device_add_child(dev, "uart", 0); + /* XXX these are optional, what if they are not configured? */ + device_add_child(dev, "ixpqmgr", 0); + device_add_child(dev, "npe", 0); /* NPE-B */ + device_add_child(dev, "npe", 1); /* NPE-C */ + device_add_child(dev, "ata_avila", 0); /* XXX */ + + if (bus_space_map(sc->sc_iot, IXP425_GPIO_HWBASE, IXP425_GPIO_SIZE, + 0, &sc->sc_gpio_ioh)) + panic("ixp425_attach: unable to map GPIO registers"); + if (bus_space_map(sc->sc_iot, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, + 0, &sc->sc_exp_ioh)) + panic("ixp425_attach: unable to map Expansion Bus registers"); + + bus_generic_probe(dev); + bus_generic_attach(dev); + + return (0); +} + +static struct resource * +ixp425_alloc_resource(device_t dev, device_t child, int type, int *rid, + u_long start, u_long end, u_long count, u_int flags) +{ + struct ixp425_softc *sc = device_get_softc(dev); + struct rman *rmanp; + struct resource *rv; + uint32_t vbase; + int isuart = (start == 0 && end == ~0); /* XXX how to do this right? */ + + rv = NULL; + + switch (type) { + case SYS_RES_IRQ: + rmanp = &sc->sc_irq_rman; + if (isuart) { + if (device_get_unit(dev) == 0) + start = IXP425_INT_UART0; + else + start = IXP425_INT_UART1; + end = start; + } + break; + + case SYS_RES_MEMORY: + rmanp = &sc->sc_mem_rman; + if (isuart) { + if (device_get_unit(dev) == 0) + start = IXP425_UART0_HWBASE; + else + start = IXP425_UART1_HWBASE; + end = start + 0x1000; + } + if (getvbase(start, end - start, &vbase)) + return (rv); + break; + + default: + return (rv); + } + + rv = rman_reserve_resource(rmanp, start, end, count, flags, child); + if (rv != NULL) { + rman_set_rid(rv, *rid); + if (type == SYS_RES_MEMORY) { + rman_set_bustag(rv, + isuart ? &ixp425_a4x_bs_tag : sc->sc_iot); + rman_set_bushandle(rv, vbase); + } + } + + return (rv); +} + +static int +ixp425_setup_intr(device_t dev, device_t child, + struct resource *ires, int flags, driver_intr_t *intr, void *arg, + void **cookiep) +{ + uint32_t mask; + int i; + + if (flags & INTR_TYPE_TTY) { + /* XXX: wrong. */ + if (device_get_unit(dev) == 0) + rman_set_start(ires, IXP425_INT_UART0); + else + rman_set_start(ires, IXP425_INT_UART1); + rman_set_end(ires, rman_get_start(ires)); + } + BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, intr, arg, + cookiep); + + mask = 0; + for (i = rman_get_start(ires); i <= rman_get_end(ires); i++) + mask |= 1 << i; + intr_enabled |= mask; + ixp425_set_intrmask(); + + return (0); +} + +static int +ixp425_teardown_intr(device_t dev, device_t child, struct resource *res, + void *cookie) +{ + uint32_t mask; + int i; + + mask = 0; + for (i = rman_get_start(res); i <= rman_get_end(res); i++) + mask |= 1 << i; + intr_enabled &= ~mask; + ixp425_set_intrmask(); + + return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, res, cookie)); +} + +static device_method_t ixp425_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ixp425_probe), + DEVMETHOD(device_attach, ixp425_attach), + DEVMETHOD(device_identify, ixp425_identify), + + /* Bus interface */ + DEVMETHOD(bus_alloc_resource, ixp425_alloc_resource), + DEVMETHOD(bus_setup_intr, ixp425_setup_intr), + DEVMETHOD(bus_teardown_intr, ixp425_teardown_intr), + + {0, 0}, +}; + +static driver_t ixp425_driver = { + "ixp", + ixp425_methods, + sizeof(struct ixp425_softc), +}; +static devclass_t ixp425_devclass; + +DRIVER_MODULE(ixp, nexus, ixp425_driver, ixp425_devclass, 0, 0); diff --git a/sys/arm/xscale/ixp425/ixp425_a4x_io.S b/sys/arm/xscale/ixp425/ixp425_a4x_io.S new file mode 100644 index 000000000000..22ab1b3479b4 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_a4x_io.S @@ -0,0 +1,142 @@ +/* $NetBSD: ixp425_a4x_io.S,v 1.2 2005/12/11 12:16:51 christos Exp $ */ + +/* + * Copyright 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Steve C. Woodford for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * There are simple bus space functions for IO registers mapped at + * 32-bit aligned positions. offset is multiplied by 4. + * + * Based loosely on pxa2x0_a2x_io.S + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * bus_space I/O functions with offset*4 + */ + +/* + * Read single + */ +ENTRY(a4x_bs_r_1) + ldr r0, [r1, r2, LSL #2] + and r0, r0, #0xff + mov pc, lr + +ENTRY(a4x_bs_r_2) + ldr r0, [r1, r2, LSL #2] + mov r1, #0xff + orr r1, r1, r1, lsl #8 + and r0, r0, r1 + mov pc, lr + +ENTRY(a4x_bs_r_4) + ldr r0, [r1, r2, LSL #2] + mov pc, lr + +/* + * Write single + */ +ENTRY(a4x_bs_w_1) + and r3, r3, #0xff + str r3, [r1, r2, LSL #2] + mov pc, lr + +ENTRY(a4x_bs_w_2) + mov r0, #0xff + orr r0, r0, r0, lsl #8 + and r3, r3, r0 + str r3, [r1, r2, LSL #2] + mov pc, lr + +ENTRY(a4x_bs_w_4) + str r3, [r1, r2, LSL #2] + mov pc, lr + +/* + * Read multiple + */ +ENTRY(a4x_bs_rm_1) + add r0, r1, r2, lsl #2 + ldr r2, [sp, #0] + mov r1, r3 + teq r2, #0 + moveq pc, lr +1: ldr r3, [r0] + subs r2, r2, #1 + strb r3, [r1], #1 + bne 1b + mov pc, lr + +ENTRY(a4x_bs_rm_2) + add r0, r1, r2, lsl #2 + ldr r2, [sp, #0] + mov r1, r3 + teq r2, #0 + moveq pc, lr +1: ldr r3, [r0] + subs r2, r2, #1 + strh r3, [r1], #2 + bne 1b + mov pc, lr + +/* + * Write multiple + */ +ENTRY(a4x_bs_wm_1) + add r0, r1, r2, lsl #2 + ldr r2, [sp, #0] + mov r1, r3 + teq r2, #0 + moveq pc, lr +1: ldrb r3, [r1], #1 + subs r2, r2, #1 + str r3, [r0] + bne 1b + mov pc, lr + +ENTRY(a4x_bs_wm_2) + add r0, r1, r2, lsl #2 + ldr r2, [sp, #0] + mov r1, r3 + teq r2, #0 + moveq pc, lr +1: ldrh r3, [r1], #2 + subs r2, r2, #1 + str r3, [r0] + bne 1b + mov pc, lr diff --git a/sys/arm/xscale/ixp425/ixp425_a4x_space.c b/sys/arm/xscale/ixp425/ixp425_a4x_space.c new file mode 100644 index 000000000000..1a6e28fd50ea --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_a4x_space.c @@ -0,0 +1,116 @@ +/* $NetBSD: ixp425_a4x_space.c,v 1.2 2005/12/11 12:16:51 christos Exp $ */ + +/* + * Copyright 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Steve C. Woodford for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Bus space tag for 8/16-bit devices on 32-bit bus. + * all registers are located at the address of multiple of 4. + * + * Based on pxa2x0_a4x_space.c + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +/* Prototypes for all the bus_space structure functions */ +bs_protos(ixp425); +bs_protos(a4x); +bs_protos(generic); +bs_protos(generic_armv4); + +struct bus_space ixp425_a4x_bs_tag = { + /* cookie */ + .bs_cookie = (void *) 0, + + /* mapping/unmapping */ + .bs_map = ixp425_bs_map, + .bs_unmap = ixp425_bs_unmap, + .bs_subregion = ixp425_bs_subregion, + + /* allocation/deallocation */ + .bs_alloc = ixp425_bs_alloc, /* XXX not implemented */ + .bs_free = ixp425_bs_free, /* XXX not implemented */ + + /* barrier */ + .bs_barrier = ixp425_bs_barrier, + + /* read (single) */ + .bs_r_1 = a4x_bs_r_1, + .bs_r_2 = a4x_bs_r_2, + .bs_r_4 = a4x_bs_r_4, + + /* read multiple */ + .bs_rm_1 = a4x_bs_rm_1, + .bs_rm_2 = a4x_bs_rm_2, + + /* read region */ + /* XXX not implemented */ + + /* write (single) */ + .bs_w_1 = a4x_bs_w_1, + .bs_w_2 = a4x_bs_w_2, + .bs_w_4 = a4x_bs_w_4, + + /* write multiple */ + .bs_wm_1 = a4x_bs_wm_1, + .bs_wm_2 = a4x_bs_wm_2, + + /* write region */ + /* XXX not implemented */ + + /* set multiple */ + /* XXX not implemented */ + + /* set region */ + /* XXX not implemented */ + + /* copy */ + /* XXX not implemented */ +}; diff --git a/sys/arm/xscale/ixp425/ixp425_iic.c b/sys/arm/xscale/ixp425/ixp425_iic.c new file mode 100644 index 000000000000..9f8e35dd07d6 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_iic.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2006 Kevin Lo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "iicbb_if.h" + +#define I2C_DELAY 10 + +/* bit clr/set shorthands */ +#define GPIO_CONF_CLR(sc, reg, mask) \ + GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, reg) &~ (mask)) +#define GPIO_CONF_SET(sc, reg, mask) \ + GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, reg) | (mask)) + +struct ixpiic_softc { + device_t sc_dev; + bus_space_tag_t sc_iot; + bus_space_handle_t sc_gpio_ioh; + + device_t iicbb; +}; + +static struct ixpiic_softc *ixpiic_sc = NULL; + +static int +ixpiic_probe(device_t dev) +{ + device_set_desc(dev, "IXP425 GPIO-Based I2C Interface"); + return (0); +} + +static int +ixpiic_attach(device_t dev) +{ + struct ixpiic_softc *sc = device_get_softc(dev); + struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); + + ixpiic_sc = sc; + + sc->sc_dev = dev; + sc->sc_iot = sa->sc_iot; + sc->sc_gpio_ioh = sa->sc_gpio_ioh; + + GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, + GPIO_I2C_SCL_BIT | GPIO_I2C_SDA_BIT); + GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, + GPIO_I2C_SCL_BIT | GPIO_I2C_SDA_BIT); + + /* add generic bit-banging code */ + if ((sc->iicbb = device_add_child(dev, "iicbb", -1)) == NULL) + device_printf(dev, "could not add iicbb\n"); + + /* probe and attach the bit-banging code */ + device_probe_and_attach(sc->iicbb); + + return (0); +} + +static int +ixpiic_callback(device_t dev, int index, caddr_t *data) +{ + return (0); +} + +static int +ixpiic_getscl(device_t dev) +{ + struct ixpiic_softc *sc = ixpiic_sc; + uint32_t reg; + + GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT); + + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR); + return (reg & GPIO_I2C_SCL_BIT); +} + +static int +ixpiic_getsda(device_t dev) +{ + struct ixpiic_softc *sc = ixpiic_sc; + uint32_t reg; + + GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT); + + reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR); + return (reg & GPIO_I2C_SDA_BIT); +} + +static void +ixpiic_setsda(device_t dev, char val) +{ + struct ixpiic_softc *sc = ixpiic_sc; + + GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SDA_BIT); + if (val) + GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT); + else + GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT); + DELAY(I2C_DELAY); +} + +static void +ixpiic_setscl(device_t dev, char val) +{ + struct ixpiic_softc *sc = ixpiic_sc; + + GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SCL_BIT); + if (val) + GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT); + else + GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT); + DELAY(I2C_DELAY); +} + +static int +ixpiic_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr) +{ + /* reset bus */ + ixpiic_setsda(dev, 1); + ixpiic_setscl(dev, 1); + + return (IIC_ENOADDR); +} + +static device_method_t ixpiic_methods[] = { + /* device interface */ + DEVMETHOD(device_probe, ixpiic_probe), + DEVMETHOD(device_attach, ixpiic_attach), + + /* iicbb interface */ + DEVMETHOD(iicbb_callback, ixpiic_callback), + DEVMETHOD(iicbb_setsda, ixpiic_setsda), + DEVMETHOD(iicbb_setscl, ixpiic_setscl), + DEVMETHOD(iicbb_getsda, ixpiic_getsda), + DEVMETHOD(iicbb_getscl, ixpiic_getscl), + DEVMETHOD(iicbb_reset, ixpiic_reset), + + { 0, 0 } +}; + +static driver_t ixpiic_driver = { + "ixpiic", + ixpiic_methods, + sizeof(struct ixpiic_softc), +}; +static devclass_t ixpiic_devclass; + +DRIVER_MODULE(ixpiic, ixp, ixpiic_driver, ixpiic_devclass, 0, 0); diff --git a/sys/arm/xscale/ixp425/ixp425_intr.h b/sys/arm/xscale/ixp425/ixp425_intr.h new file mode 100644 index 000000000000..27217f467502 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_intr.h @@ -0,0 +1,149 @@ +/* $NetBSD: ixp425_intr.h,v 1.6 2005/12/24 20:06:52 perry Exp $ */ + +/* + * Copyright (c) 2001, 2002 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Jason R. Thorpe for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef _IXP425_INTR_H_ +#define _IXP425_INTR_H_ + +#define ARM_IRQ_HANDLER _C_LABEL(ixp425_intr_dispatch) + +#ifndef _LOCORE + +#include +#include + +#include + +#define IXPREG(reg) *((__volatile u_int32_t*) (reg)) + +void ixp425_do_pending(void); + +extern __volatile uint32_t intr_enabled; +extern uint32_t intr_steer; + +static __inline void __attribute__((__unused__)) +ixp425_set_intrmask(void) +{ + IXPREG(IXP425_INT_ENABLE) = intr_enabled & IXP425_INT_HWMASK; +} + +static __inline void +ixp425_set_intrsteer(void) +{ + IXPREG(IXP425_INT_SELECT) = intr_steer & IXP425_INT_HWMASK; +} + +#define INT_SWMASK \ + ((1U << IXP425_INT_bit31) | (1U << IXP425_INT_bit30) | \ + (1U << IXP425_INT_bit14) | (1U << IXP425_INT_bit11)) + +#if 0 +static __inline void __attribute__((__unused__)) +ixp425_splx(int new) +{ + extern __volatile uint32_t intr_enabled; + extern __volatile int current_spl_level; + extern __volatile int ixp425_ipending; + extern void ixp425_do_pending(void); + int oldirqstate, hwpend; + + /* Don't let the compiler re-order this code with preceding code */ + __insn_barrier(); + + current_spl_level = new; + + hwpend = (ixp425_ipending & IXP425_INT_HWMASK) & ~new; + if (hwpend != 0) { + oldirqstate = disable_interrupts(I32_bit); + intr_enabled |= hwpend; + ixp425_set_intrmask(); + restore_interrupts(oldirqstate); + } + + if ((ixp425_ipending & INT_SWMASK) & ~new) + ixp425_do_pending(); +} + +static __inline int __attribute__((__unused__)) +ixp425_splraise(int ipl) +{ + extern __volatile int current_spl_level; + extern int ixp425_imask[]; + int old; + + old = current_spl_level; + current_spl_level |= ixp425_imask[ipl]; + + /* Don't let the compiler re-order this code with subsequent code */ + __insn_barrier(); + + return (old); +} + +static __inline int __attribute__((__unused__)) +ixp425_spllower(int ipl) +{ + extern __volatile int current_spl_level; + extern int ixp425_imask[]; + int old = current_spl_level; + + ixp425_splx(ixp425_imask[ipl]); + return(old); +} + +#endif +#if !defined(EVBARM_SPL_NOINLINE) + +#define splx(new) ixp425_splx(new) +#define _spllower(ipl) ixp425_spllower(ipl) +#define _splraise(ipl) ixp425_splraise(ipl) +void _setsoftintr(int); + +#else + +int _splraise(int); +int _spllower(int); +void splx(int); +void _setsoftintr(int); + +#endif /* ! EVBARM_SPL_NOINLINE */ + +#endif /* _LOCORE */ + +#endif /* _IXP425_INTR_H_ */ diff --git a/sys/arm/xscale/ixp425/ixp425_mem.c b/sys/arm/xscale/ixp425/ixp425_mem.c new file mode 100644 index 000000000000..df7f7e550545 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_mem.c @@ -0,0 +1,85 @@ +/* $NetBSD: ixp425_mem.c,v 1.2 2005/12/11 12:16:51 christos Exp $ */ + +/* + * Copyright (c) 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Steve C. Woodford for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include +#include + +static uint32_t sdram_64bit[] = { + 0x00800000, /* 8M: One 2M x 32 chip */ + 0x01000000, /* 16M: Two 2M x 32 chips */ + 0x01000000, /* 16M: One 4M x 32 chip */ + 0x02000000, /* 32M: Two 4M x 32 chips */ + 0, 0, 0, 0 +}; + +static uint32_t sdram_other[] = { + 0x02000000, /* 32M: Two 8M x 16 chips */ + 0x04000000, /* 64M: Four 8M x 16 chips */ + 0x04000000, /* 64M: Two 16M x 16 chips */ + 0x08000000, /* 128M: Four 16M x 16 chips */ + 0x08000000, /* 128M: Two 32M x 16 chips */ + 0x10000000, /* 256M: Four 32M x 16 chips */ + 0, 0 +}; + +#define MCU_REG_READ(x) (*(volatile uint32_t *)(IXP425_MCU_VBASE + (x))) + +uint32_t +ixp425_sdram_size(void) +{ + uint32_t size, sdr_config; + + sdr_config = MCU_REG_READ(MCU_SDR_CONFIG); + + if (sdr_config & MCU_SDR_CONFIG_64MBIT) + size = sdram_64bit[MCU_SDR_CONFIG_MCONF(sdr_config)]; + else + size = sdram_other[MCU_SDR_CONFIG_MCONF(sdr_config)]; + + if (size == 0) { + printf("** SDR_CONFIG retuns unknown value, using 32M\n"); + size = 32 * 1024 * 1024; + } + + return (size); +} diff --git a/sys/arm/xscale/ixp425/ixp425_npe.c b/sys/arm/xscale/ixp425/ixp425_npe.c new file mode 100644 index 000000000000..0d1626ba3fc0 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_npe.c @@ -0,0 +1,1396 @@ +/*- + * Copyright (c) 2006 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ + +/*- + * Copyright (c) 2001-2005, Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. +*/ +#include +__FBSDID("$FreeBSD$"); + +/* + * Intel XScale Network Processing Engine (NPE) support. + * + * Each NPE has an ixpnpeX device associated with it that is + * attached at boot. Depending on the microcode loaded into + * an NPE there may be an Ethernet interface (npeX) or some + * other network interface (e.g. for ATM). This file has support + * for loading microcode images and the associated NPE CPU + * manipulations (start, stop, reset). + * + * The code here basically replaces the npeDl and npeMh classes + * in the Intel Access Library (IAL). + * + * NB: Microcode images are loaded with firmware(9). To + * include microcode in a static kernel include the + * ixpnpe_fw device. Otherwise the firmware will be + * automatically loaded from the filesystem. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ixpnpe_softc { + device_t sc_dev; + bus_space_tag_t sc_iot; + bus_space_handle_t sc_ioh; + bus_size_t sc_size; /* size of mapped register window */ + struct resource *sc_irq; /* IRQ resource */ + void *sc_ih; /* interrupt handler */ + struct mtx sc_mtx; /* mailbox lock */ + uint32_t sc_msg[2]; /* reply msg collected in ixpnpe_intr */ + int sc_msgwaiting; /* sc_msg holds valid data */ + + int validImage; /* valid ucode image loaded */ + int started; /* NPE is started */ + uint8_t functionalityId;/* ucode functionality ID */ + int insMemSize; /* size of instruction memory */ + int dataMemSize; /* size of data memory */ + uint32_t savedExecCount; + uint32_t savedEcsDbgCtxtReg2; +}; + +#define IX_NPEDL_NPEIMAGE_FIELD_MASK 0xff + +/* used to read download map from version in microcode image */ +#define IX_NPEDL_BLOCK_TYPE_INSTRUCTION 0x00000000 +#define IX_NPEDL_BLOCK_TYPE_DATA 0x00000001 +#define IX_NPEDL_BLOCK_TYPE_STATE 0x00000002 +#define IX_NPEDL_END_OF_DOWNLOAD_MAP 0x0000000F + +/* + * masks used to extract address info from State information context + * register addresses as read from microcode image + */ +#define IX_NPEDL_MASK_STATE_ADDR_CTXT_REG 0x0000000F +#define IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM 0x000000F0 + +/* LSB offset of Context Number field in State-Info Context Address */ +#define IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM 4 + +/* size (in words) of single State Information entry (ctxt reg address|data) */ +#define IX_NPEDL_STATE_INFO_ENTRY_SIZE 2 + +typedef struct { + uint32_t type; + uint32_t offset; +} IxNpeDlNpeMgrDownloadMapBlockEntry; + +typedef union { + IxNpeDlNpeMgrDownloadMapBlockEntry block; + uint32_t eodmMarker; +} IxNpeDlNpeMgrDownloadMapEntry; + +typedef struct { + /* 1st entry in the download map (there may be more than one) */ + IxNpeDlNpeMgrDownloadMapEntry entry[1]; +} IxNpeDlNpeMgrDownloadMap; + +/* used to access an instruction or data block in a microcode image */ +typedef struct { + uint32_t npeMemAddress; + uint32_t size; + uint32_t data[1]; +} IxNpeDlNpeMgrCodeBlock; + +/* used to access each Context Reg entry state-information block */ +typedef struct { + uint32_t addressInfo; + uint32_t value; +} IxNpeDlNpeMgrStateInfoCtxtRegEntry; + +/* used to access a state-information block in a microcode image */ +typedef struct { + uint32_t size; + IxNpeDlNpeMgrStateInfoCtxtRegEntry ctxtRegEntry[1]; +} IxNpeDlNpeMgrStateInfoBlock; + +static int npe_debug = 0; +SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RW, &npe_debug, + 0, "IXP425 NPE debug msgs"); +TUNABLE_INT("debug.ixp425npe", &npe_debug); +#define DPRINTF(dev, fmt, ...) do { \ + if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \ +} while (0) +#define DPRINTFn(n, dev, fmt, ...) do { \ + if (npe_debug >= n) printf(fmt, __VA_ARGS__); \ +} while (0) + +static int npe_checkbits(struct ixpnpe_softc *, uint32_t reg, uint32_t); +static int npe_isstopped(struct ixpnpe_softc *); +static int npe_load_ins(struct ixpnpe_softc *, + const IxNpeDlNpeMgrCodeBlock *bp, int verify); +static int npe_load_data(struct ixpnpe_softc *, + const IxNpeDlNpeMgrCodeBlock *bp, int verify); +static int npe_load_stateinfo(struct ixpnpe_softc *, + const IxNpeDlNpeMgrStateInfoBlock *bp, int verify); +static int npe_load_image(struct ixpnpe_softc *, + const uint32_t *imageCodePtr, int verify); +static int npe_cpu_reset(struct ixpnpe_softc *); +static int npe_cpu_start(struct ixpnpe_softc *); +static int npe_cpu_stop(struct ixpnpe_softc *); +static void npe_cmd_issue_write(struct ixpnpe_softc *, + uint32_t cmd, uint32_t addr, uint32_t data); +static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *, + uint32_t cmd, uint32_t addr); +static int npe_ins_write(struct ixpnpe_softc *, + uint32_t addr, uint32_t data, int verify); +static int npe_data_write(struct ixpnpe_softc *, + uint32_t addr, uint32_t data, int verify); +static void npe_ecs_reg_write(struct ixpnpe_softc *, + uint32_t reg, uint32_t data); +static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *, uint32_t reg); +static void npe_issue_cmd(struct ixpnpe_softc *, uint32_t command); +static void npe_cpu_step_save(struct ixpnpe_softc *); +static int npe_cpu_step(struct ixpnpe_softc *, uint32_t npeInstruction, + uint32_t ctxtNum, uint32_t ldur); +static void npe_cpu_step_restore(struct ixpnpe_softc *); +static int npe_logical_reg_read(struct ixpnpe_softc *, + uint32_t regAddr, uint32_t regSize, + uint32_t ctxtNum, uint32_t *regVal); +static int npe_logical_reg_write(struct ixpnpe_softc *, + uint32_t regAddr, uint32_t regVal, + uint32_t regSize, uint32_t ctxtNum, int verify); +static int npe_physical_reg_write(struct ixpnpe_softc *, + uint32_t regAddr, uint32_t regValue, int verify); +static int npe_ctx_reg_write(struct ixpnpe_softc *, uint32_t ctxtNum, + uint32_t ctxtReg, uint32_t ctxtRegVal, int verify); + +static void ixpnpe_intr(void *arg); + +static uint32_t +npe_reg_read(struct ixpnpe_softc *sc, bus_size_t off) +{ + uint32_t v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); + DPRINTFn(9, sc->sc_dev, "%s(0x%lx) => 0x%x\n", __func__, off, v); + return v; +} + +static void +npe_reg_write(struct ixpnpe_softc *sc, bus_size_t off, uint32_t val) +{ + DPRINTFn(9, sc->sc_dev, "%s(0x%lx, 0x%x)\n", __func__, off, val); + bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); +} + +struct ixpnpe_softc * +ixpnpe_attach(device_t dev) +{ + struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); + struct ixpnpe_softc *sc; + bus_addr_t base; + int rid, irq; + + /* XXX M_BUS */ + sc = malloc(sizeof(struct ixpnpe_softc), M_TEMP, M_WAITOK | M_ZERO); + sc->sc_dev = dev; + sc->sc_iot = sa->sc_iot; + mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "npe driver", MTX_DEF); + + if (device_get_unit(dev) == 0) { + base = IXP425_NPE_B_HWBASE; + sc->sc_size = IXP425_NPE_B_SIZE; + irq = IXP425_INT_NPE_B; + + /* size of instruction memory */ + sc->insMemSize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEB; + /* size of data memory */ + sc->dataMemSize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB; + } else { + base = IXP425_NPE_C_HWBASE; + sc->sc_size = IXP425_NPE_C_SIZE; + irq = IXP425_INT_NPE_C; + + /* size of instruction memory */ + sc->insMemSize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEC; + /* size of data memory */ + sc->dataMemSize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC; + } + if (bus_space_map(sc->sc_iot, base, sc->sc_size, 0, &sc->sc_ioh)) + panic("%s: Cannot map registers", device_get_name(dev)); + + /* + * Setup IRQ and handler for NPE message support. + */ + rid = 0; + sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, + irq, irq, 1, RF_ACTIVE); + if (!sc->sc_irq) + panic("%s: Unable to allocate irq %u", device_get_name(dev), irq); + /* XXX could be a source of entropy */ + bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, + ixpnpe_intr, sc, &sc->sc_ih); + /* enable output fifo interrupts (NB: must also set OFIFO Write Enable) */ + npe_reg_write(sc, IX_NPECTL, + npe_reg_read(sc, IX_NPECTL) | (IX_NPECTL_OFE | IX_NPECTL_OFWE)); + + return sc; +} + +void +ixpnpe_detach(struct ixpnpe_softc *sc) +{ + /* disable output fifo interrupts */ + npe_reg_write(sc, IX_NPECTL, + npe_reg_read(sc, IX_NPECTL) &~ (IX_NPECTL_OFE | IX_NPECTL_OFWE)); + + bus_teardown_intr(sc->sc_dev, sc->sc_irq, sc->sc_ih); + bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size); + mtx_destroy(&sc->sc_mtx); + free(sc, M_TEMP); +} + +int +ixpnpe_stopandreset(struct ixpnpe_softc *sc) +{ + int error; + + mtx_lock(&sc->sc_mtx); + error = npe_cpu_stop(sc); /* stop NPE */ + if (error == 0) + error = npe_cpu_reset(sc); /* reset it */ + if (error == 0) + sc->started = 0; /* mark stopped */ + mtx_unlock(&sc->sc_mtx); + + DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); + return error; +} + +static int +ixpnpe_start_locked(struct ixpnpe_softc *sc) +{ + int error; + + if (!sc->started) { + error = npe_cpu_start(sc); + if (error == 0) + sc->started = 1; + } else + error = 0; + + DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); + return error; +} + +int +ixpnpe_start(struct ixpnpe_softc *sc) +{ + int ret; + + mtx_lock(&sc->sc_mtx); + ret = ixpnpe_start_locked(sc); + mtx_unlock(&sc->sc_mtx); + return (ret); +} + +int +ixpnpe_stop(struct ixpnpe_softc *sc) +{ + int error; + + mtx_lock(&sc->sc_mtx); + error = npe_cpu_stop(sc); + if (error == 0) + sc->started = 0; + mtx_unlock(&sc->sc_mtx); + + DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); + return error; +} + +/* + * Indicates the start of an NPE Image, in new NPE Image Library format. + * 2 consecutive occurances indicates the end of the NPE Image Library + */ +#define NPE_IMAGE_MARKER 0xfeedf00d + +/* + * NPE Image Header definition, used in new NPE Image Library format + */ +typedef struct { + uint32_t marker; + uint32_t id; + uint32_t size; +} IxNpeDlImageMgrImageHeader; + +static int +npe_findimage(struct ixpnpe_softc *sc, + const uint32_t *imageLibrary, uint32_t imageId, + const uint32_t **imagePtr, uint32_t *imageSize) +{ + const IxNpeDlImageMgrImageHeader *image; + uint32_t offset = 0; + + while (imageLibrary[offset] == NPE_IMAGE_MARKER) { + image = (const IxNpeDlImageMgrImageHeader *)&imageLibrary[offset]; + offset += sizeof(IxNpeDlImageMgrImageHeader)/sizeof(uint32_t); + + DPRINTF(sc->sc_dev, "%s: off %u mark 0x%x id 0x%x size %u\n", + __func__, offset, image->marker, image->id, image->size); + if (image->id == imageId) { + *imagePtr = imageLibrary + offset; + *imageSize = image->size; + return 0; + } + /* 2 consecutive NPE_IMAGE_MARKER's indicates end of library */ + if (image->id == NPE_IMAGE_MARKER) { + device_printf(sc->sc_dev, + "imageId 0x%08x not found in image library header\n", imageId); + /* reached end of library, image not found */ + return EIO; + } + offset += image->size; + } + return EIO; +} + +int +ixpnpe_init(struct ixpnpe_softc *sc, const char *imageName, uint32_t imageId) +{ + uint32_t imageSize; + const uint32_t *imageCodePtr; + struct firmware *fw; + int error; + + DPRINTF(sc->sc_dev, "load %s, imageId 0x%08x\n", imageName, imageId); + +#if 0 + IxFeatureCtrlDeviceId devid = IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId); + /* + * Checking if image being loaded is meant for device that is running. + * Image is forward compatible. i.e Image built for IXP42X should run + * on IXP46X but not vice versa. + */ + if (devid > (ixFeatureCtrlDeviceRead() & IX_FEATURE_CTRL_DEVICE_TYPE_MASK)) + return EINVAL; +#endif + error = ixpnpe_stopandreset(sc); /* stop and reset the NPE */ + if (error != 0) + return error; + + fw = firmware_get(imageName); + if (fw == NULL) + return ENOENT; + + /* Locate desired image in files w/ combined images */ + error = npe_findimage(sc, fw->data, imageId, &imageCodePtr, &imageSize); + if (error != 0) + goto done; + + /* + * If download was successful, store image Id in list of + * currently loaded images. If a critical error occured + * during download, record that the NPE has an invalid image + */ + mtx_lock(&sc->sc_mtx); + error = npe_load_image(sc, imageCodePtr, 1 /*VERIFY*/); + if (error == 0) { + sc->validImage = 1; + error = ixpnpe_start_locked(sc); + } else { + sc->validImage = 0; + } + sc->functionalityId = IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId); + mtx_unlock(&sc->sc_mtx); +done: + firmware_put(fw, FIRMWARE_UNLOAD); + DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error); + return error; +} + +int +ixpnpe_getfunctionality(struct ixpnpe_softc *sc) +{ + return (sc->validImage ? sc->functionalityId : 0); +} + +static int +npe_checkbits(struct ixpnpe_softc *sc, uint32_t reg, uint32_t expectedBitsSet) +{ + uint32_t val; + + val = npe_reg_read(sc, reg); + DPRINTFn(5, sc->sc_dev, "%s(0x%x, 0x%x) => 0x%x (%u)\n", + __func__, reg, expectedBitsSet, val, + (val & expectedBitsSet) == expectedBitsSet); + return ((val & expectedBitsSet) == expectedBitsSet); +} + +static int +npe_isstopped(struct ixpnpe_softc *sc) +{ + return npe_checkbits(sc, + IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP); +} + +static int +npe_load_ins(struct ixpnpe_softc *sc, + const IxNpeDlNpeMgrCodeBlock *bp, int verify) +{ + uint32_t npeMemAddress; + int i, blockSize; + + npeMemAddress = bp->npeMemAddress; + blockSize = bp->size; /* NB: instruction/data count */ + if (npeMemAddress + blockSize > sc->insMemSize) { + device_printf(sc->sc_dev, "Block size too big for NPE memory\n"); + return EINVAL; /* XXX */ + } + for (i = 0; i < blockSize; i++, npeMemAddress++) { + if (npe_ins_write(sc, npeMemAddress, bp->data[i], verify) != 0) { + device_printf(sc->sc_dev, "NPE instruction write failed"); + return EIO; + } + } + return 0; +} + +static int +npe_load_data(struct ixpnpe_softc *sc, + const IxNpeDlNpeMgrCodeBlock *bp, int verify) +{ + uint32_t npeMemAddress; + int i, blockSize; + + npeMemAddress = bp->npeMemAddress; + blockSize = bp->size; /* NB: instruction/data count */ + if (npeMemAddress + blockSize > sc->dataMemSize) { + device_printf(sc->sc_dev, "Block size too big for NPE memory\n"); + return EINVAL; + } + for (i = 0; i < blockSize; i++, npeMemAddress++) { + if (npe_data_write(sc, npeMemAddress, bp->data[i], verify) != 0) { + device_printf(sc->sc_dev, "NPE data write failed\n"); + return EIO; + } + } + return 0; +} + +static int +npe_load_stateinfo(struct ixpnpe_softc *sc, + const IxNpeDlNpeMgrStateInfoBlock *bp, int verify) +{ + int i, nentries, error; + + npe_cpu_step_save(sc); + + /* for each state-info context register entry in block */ + nentries = bp->size / IX_NPEDL_STATE_INFO_ENTRY_SIZE; + error = 0; + for (i = 0; i < nentries; i++) { + /* each state-info entry is 2 words (address, value) in length */ + uint32_t regVal = bp->ctxtRegEntry[i].value; + uint32_t addrInfo = bp->ctxtRegEntry[i].addressInfo; + + uint32_t reg = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_REG); + uint32_t cNum = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM) >> + IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM; + + /* error-check Context Register No. and Context Number values */ + if (!(0 <= reg && reg < IX_NPEDL_CTXT_REG_MAX)) { + device_printf(sc->sc_dev, "invalid Context Register %u\n", reg); + error = EINVAL; + break; + } + if (!(0 <= cNum && cNum < IX_NPEDL_CTXT_NUM_MAX)) { + device_printf(sc->sc_dev, "invalid Context Number %u\n", cNum); + error = EINVAL; + break; + } + /* NOTE that there is no STEVT register for Context 0 */ + if (cNum == 0 && reg == IX_NPEDL_CTXT_REG_STEVT) { + device_printf(sc->sc_dev, "no STEVT for Context 0\n"); + error = EINVAL; + break; + } + + if (npe_ctx_reg_write(sc, cNum, reg, regVal, verify) != 0) { + device_printf(sc->sc_dev, "write of state-info to NPE failed\n"); + error = EIO; + break; + } + } + + npe_cpu_step_restore(sc); + return error; +} + +static int +npe_load_image(struct ixpnpe_softc *sc, + const uint32_t *imageCodePtr, int verify) +{ +#define EOM(marker) ((marker) == IX_NPEDL_END_OF_DOWNLOAD_MAP) + const IxNpeDlNpeMgrDownloadMap *downloadMap; + int i, error; + + if (!npe_isstopped(sc)) { /* verify NPE is stopped */ + device_printf(sc->sc_dev, "cannot load image, NPE not stopped\n"); + return EIO; + } + + /* + * Read Download Map, checking each block type and calling + * appropriate function to perform download + */ + error = 0; + downloadMap = (const IxNpeDlNpeMgrDownloadMap *) imageCodePtr; + for (i = 0; !EOM(downloadMap->entry[i].eodmMarker); i++) { + /* calculate pointer to block to be downloaded */ + const uint32_t *bp = imageCodePtr + downloadMap->entry[i].block.offset; + switch (downloadMap->entry[i].block.type) { + case IX_NPEDL_BLOCK_TYPE_INSTRUCTION: + error = npe_load_ins(sc, + (const IxNpeDlNpeMgrCodeBlock *) bp, verify); + DPRINTF(sc->sc_dev, "%s: inst, error %d\n", __func__, error); + break; + case IX_NPEDL_BLOCK_TYPE_DATA: + error = npe_load_data(sc, + (const IxNpeDlNpeMgrCodeBlock *) bp, verify); + DPRINTF(sc->sc_dev, "%s: data, error %d\n", __func__, error); + break; + case IX_NPEDL_BLOCK_TYPE_STATE: + error = npe_load_stateinfo(sc, + (const IxNpeDlNpeMgrStateInfoBlock *) bp, verify); + DPRINTF(sc->sc_dev, "%s: state, error %d\n", __func__, error); + break; + default: + device_printf(sc->sc_dev, + "unknown block type 0x%x in download map\n", + downloadMap->entry[i].block.type); + error = EIO; /* XXX */ + break; + } + if (error != 0) + break; + } + return error; +#undef EOM +} + +/* contains Reset values for Context Store Registers */ +static const struct { + uint32_t regAddr; + uint32_t regResetVal; +} ixNpeDlEcsRegResetValues[] = { + { IX_NPEDL_ECS_BG_CTXT_REG_0, IX_NPEDL_ECS_BG_CTXT_REG_0_RESET }, + { IX_NPEDL_ECS_BG_CTXT_REG_1, IX_NPEDL_ECS_BG_CTXT_REG_1_RESET }, + { IX_NPEDL_ECS_BG_CTXT_REG_2, IX_NPEDL_ECS_BG_CTXT_REG_2_RESET }, + { IX_NPEDL_ECS_PRI_1_CTXT_REG_0, IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET }, + { IX_NPEDL_ECS_PRI_1_CTXT_REG_1, IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET }, + { IX_NPEDL_ECS_PRI_1_CTXT_REG_2, IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET }, + { IX_NPEDL_ECS_PRI_2_CTXT_REG_0, IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET }, + { IX_NPEDL_ECS_PRI_2_CTXT_REG_1, IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET }, + { IX_NPEDL_ECS_PRI_2_CTXT_REG_2, IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET }, + { IX_NPEDL_ECS_DBG_CTXT_REG_0, IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET }, + { IX_NPEDL_ECS_DBG_CTXT_REG_1, IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET }, + { IX_NPEDL_ECS_DBG_CTXT_REG_2, IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET }, + { IX_NPEDL_ECS_INSTRUCT_REG, IX_NPEDL_ECS_INSTRUCT_REG_RESET } +}; + +/* contains Reset values for Context Store Registers */ +static const uint32_t ixNpeDlCtxtRegResetValues[] = { + IX_NPEDL_CTXT_REG_RESET_STEVT, + IX_NPEDL_CTXT_REG_RESET_STARTPC, + IX_NPEDL_CTXT_REG_RESET_REGMAP, + IX_NPEDL_CTXT_REG_RESET_CINDEX, +}; + +#define IX_NPEDL_RESET_NPE_PARITY 0x0800 +#define IX_NPEDL_PARITY_BIT_MASK 0x3F00FFFF +#define IX_NPEDL_CONFIG_CTRL_REG_MASK 0x3F3FFFFF + +static int +npe_cpu_reset(struct ixpnpe_softc *sc) +{ +#define N(a) (sizeof(a) / sizeof(a[0])) + struct ixp425_softc *sa = device_get_softc(device_get_parent(sc->sc_dev)); + uint32_t ctxtReg; /* identifies Context Store reg (0-3) */ + uint32_t regAddr; + uint32_t regVal; + uint32_t resetNpeParity; + uint32_t ixNpeConfigCtrlRegVal; + int i, error = 0; + + /* pre-store the NPE Config Control Register Value */ + ixNpeConfigCtrlRegVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL); + ixNpeConfigCtrlRegVal |= 0x3F000000; + + /* disable the parity interrupt */ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL, + (ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK)); + DPRINTFn(2, sc->sc_dev, "%s: dis parity int, CTL => 0x%x\n", + __func__, ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK); + + npe_cpu_step_save(sc); + + /* + * Clear the FIFOs. + */ + while (npe_checkbits(sc, + IX_NPEDL_REG_OFFSET_WFIFO, IX_NPEDL_MASK_WFIFO_VALID)) { + /* read from the Watch-point FIFO until empty */ + (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WFIFO); + } + + while (npe_checkbits(sc, + IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_OFNE)) { + /* read from the outFIFO until empty */ + (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_FIFO); + } + + while (npe_checkbits(sc, + IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_IFNE)) { + /* + * Step execution of the NPE intruction to read inFIFO using + * the Debug Executing Context stack. + */ + error = npe_cpu_step(sc, IX_NPEDL_INSTR_RD_FIFO, 0, 0); + if (error != 0) { + DPRINTF(sc->sc_dev, "%s: cannot step (1), error %u\n", + __func__, error); + npe_cpu_step_restore(sc); + return error; + } + } + + /* + * Reset the mailbox reg + */ + /* ...from XScale side */ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_MBST, IX_NPEDL_REG_RESET_MBST); + /* ...from NPE side */ + error = npe_cpu_step(sc, IX_NPEDL_INSTR_RESET_MBOX, 0, 0); + if (error != 0) { + DPRINTF(sc->sc_dev, "%s: cannot step (2), error %u\n", __func__, error); + npe_cpu_step_restore(sc); + return error; + } + + /* + * Reset the physical registers in the NPE register file: + * Note: no need to save/restore REGMAP for Context 0 here + * since all Context Store regs are reset in subsequent code. + */ + for (regAddr = 0; + regAddr < IX_NPEDL_TOTAL_NUM_PHYS_REG && error == 0; + regAddr++) { + /* for each physical register in the NPE reg file, write 0 : */ + error = npe_physical_reg_write(sc, regAddr, 0, TRUE); + if (error != 0) { + DPRINTF(sc->sc_dev, "%s: cannot write phy reg, error %u\n", + __func__, error); + npe_cpu_step_restore(sc); + return error; /* abort reset */ + } + } + + /* + * Reset the context store: + */ + for (i = IX_NPEDL_CTXT_NUM_MIN; i <= IX_NPEDL_CTXT_NUM_MAX; i++) { + /* set each context's Context Store registers to reset values: */ + for (ctxtReg = 0; ctxtReg < IX_NPEDL_CTXT_REG_MAX; ctxtReg++) { + /* NOTE that there is no STEVT register for Context 0 */ + if (!(i == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STEVT)) { + regVal = ixNpeDlCtxtRegResetValues[ctxtReg]; + error = npe_ctx_reg_write(sc, i, ctxtReg, regVal, TRUE); + if (error != 0) { + DPRINTF(sc->sc_dev, "%s: cannot write ctx reg, error %u\n", + __func__, error); + npe_cpu_step_restore(sc); + return error; /* abort reset */ + } + } + } + } + + npe_cpu_step_restore(sc); + + /* write Reset values to Execution Context Stack registers */ + for (i = 0; i < N(ixNpeDlEcsRegResetValues); i++) + npe_ecs_reg_write(sc, + ixNpeDlEcsRegResetValues[i].regAddr, + ixNpeDlEcsRegResetValues[i].regResetVal); + + /* clear the profile counter */ + npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT); + + /* clear registers EXCT, AP0, AP1, AP2 and AP3 */ + for (regAddr = IX_NPEDL_REG_OFFSET_EXCT; + regAddr <= IX_NPEDL_REG_OFFSET_AP3; + regAddr += sizeof(uint32_t)) + npe_reg_write(sc, regAddr, 0); + + /* Reset the Watch-count register */ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_WC, 0); + + /* + * WR IXA00055043 - Remove IMEM Parity Introduced by NPE Reset Operation + */ + + /* + * Reset the NPE and its coprocessor - to reset internal + * states and remove parity error. Note this makes no + * sense based on the documentation. The feature control + * register always reads back as 0 on the ixp425 and further + * the bit definition of NPEA/NPEB is off by 1 according to + * the Intel documention--so we're blindly following the + * Intel code w/o any real understanding. + */ + regVal = EXP_BUS_READ_4(sa, EXP_FCTRL_OFFSET); + DPRINTFn(2, sc->sc_dev, "%s: FCTRL 0x%x\n", __func__, regVal); + resetNpeParity = + IX_NPEDL_RESET_NPE_PARITY << (1 + device_get_unit(sc->sc_dev)); + DPRINTFn(2, sc->sc_dev, "%s: FCTRL fuse parity, write 0x%x\n", + __func__, regVal | resetNpeParity); + EXP_BUS_WRITE_4(sa, EXP_FCTRL_OFFSET, regVal | resetNpeParity); + + /* un-fuse and un-reset the NPE & coprocessor */ + DPRINTFn(2, sc->sc_dev, "%s: FCTRL unfuse parity, write 0x%x\n", + __func__, regVal & resetNpeParity); + EXP_BUS_WRITE_4(sa, EXP_FCTRL_OFFSET, regVal &~ resetNpeParity); + + /* + * Call NpeMgr function to stop the NPE again after the Feature Control + * has unfused and Un-Reset the NPE and its associated Coprocessors. + */ + error = npe_cpu_stop(sc); + + /* restore NPE configuration bus Control Register - Parity Settings */ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL, + (ixNpeConfigCtrlRegVal & IX_NPEDL_CONFIG_CTRL_REG_MASK)); + DPRINTFn(2, sc->sc_dev, "%s: restore CTL => 0x%x\n", + __func__, npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL)); + + return error; +#undef N +} + +static int +npe_cpu_start(struct ixpnpe_softc *sc) +{ + uint32_t ecsRegVal; + + /* + * Ensure only Background Context Stack Level is Active by turning off + * the Active bit in each of the other Executing Context Stack levels. + */ + ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0); + ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; + npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0, ecsRegVal); + + ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0); + ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; + npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0, ecsRegVal); + + ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0); + ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; + npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsRegVal); + + /* clear the pipeline */ + npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); + + /* start NPE execution by issuing command through EXCTL register on NPE */ + npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_START); + + /* + * Check execution status of NPE to verify operation was successful. + */ + return npe_checkbits(sc, + IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_RUN) ? 0 : EIO; +} + +static int +npe_cpu_stop(struct ixpnpe_softc *sc) +{ + /* stop NPE execution by issuing command through EXCTL register on NPE */ + npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STOP); + + /* verify that NPE Stop was successful */ + return npe_checkbits(sc, + IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP) ? 0 : EIO; +} + +#define IX_NPEDL_REG_SIZE_BYTE 8 +#define IX_NPEDL_REG_SIZE_SHORT 16 +#define IX_NPEDL_REG_SIZE_WORD 32 + +/* + * Introduce extra read cycles after issuing read command to NPE + * so that we read the register after the NPE has updated it + * This is to overcome race condition between XScale and NPE + */ +#define IX_NPEDL_DELAY_READ_CYCLES 2 +/* + * To mask top three MSBs of 32bit word to download into NPE IMEM + */ +#define IX_NPEDL_MASK_UNUSED_IMEM_BITS 0x1FFFFFFF; + +static void +npe_cmd_issue_write(struct ixpnpe_softc *sc, + uint32_t cmd, uint32_t addr, uint32_t data) +{ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, data); + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr); + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd); +} + +static uint32_t +npe_cmd_issue_read(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr) +{ + uint32_t data; + int i; + + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr); + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd); + for (i = 0; i <= IX_NPEDL_DELAY_READ_CYCLES; i++) + data = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA); + return data; +} + +static int +npe_ins_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify) +{ + DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data); + npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_INS_MEM, addr, data); + if (verify) { + uint32_t rdata; + + /* + * Write invalid data to this reg, so we can see if we're reading + * the EXDATA register too early. + */ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data); + + /* Disabled since top 3 MSB are not used for Azusa hardware Refer WR:IXA00053900*/ + data &= IX_NPEDL_MASK_UNUSED_IMEM_BITS; + + rdata = npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_INS_MEM, addr); + rdata &= IX_NPEDL_MASK_UNUSED_IMEM_BITS; + + if (data != rdata) + return EIO; + } + return 0; +} + +static int +npe_data_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify) +{ + DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data); + npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_DATA_MEM, addr, data); + if (verify) { + /* + * Write invalid data to this reg, so we can see if we're reading + * the EXDATA register too early. + */ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data); + if (data != npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_DATA_MEM, addr)) + return EIO; + } + return 0; +} + +static void +npe_ecs_reg_write(struct ixpnpe_softc *sc, uint32_t reg, uint32_t data) +{ + npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_ECS_REG, reg, data); +} + +static uint32_t +npe_ecs_reg_read(struct ixpnpe_softc *sc, uint32_t reg) +{ + return npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_ECS_REG, reg); +} + +static void +npe_issue_cmd(struct ixpnpe_softc *sc, uint32_t command) +{ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, command); +} + +static void +npe_cpu_step_save(struct ixpnpe_softc *sc) +{ + /* turn off the halt bit by clearing Execution Count register. */ + /* save reg contents 1st and restore later */ + sc->savedExecCount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXCT); + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, 0); + + /* ensure that IF and IE are on (temporarily), so that we don't end up + * stepping forever */ + sc->savedEcsDbgCtxtReg2 = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2); + + npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, + (sc->savedEcsDbgCtxtReg2 | IX_NPEDL_MASK_ECS_DBG_REG_2_IF | + IX_NPEDL_MASK_ECS_DBG_REG_2_IE)); +} + +static int +npe_cpu_step(struct ixpnpe_softc *sc, uint32_t npeInstruction, + uint32_t ctxtNum, uint32_t ldur) +{ +#define IX_NPE_DL_MAX_NUM_OF_RETRIES 1000000 + uint32_t ecsDbgRegVal; + uint32_t oldWatchcount, newWatchcount; + int tries; + + /* set the Active bit, and the LDUR, in the debug level */ + ecsDbgRegVal = IX_NPEDL_MASK_ECS_REG_0_ACTIVE | + (ldur << IX_NPEDL_OFFSET_ECS_REG_0_LDUR); + + npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsDbgRegVal); + + /* + * Set CCTXT at ECS DEBUG L3 to specify in which context to execute the + * instruction, and set SELCTXT at ECS DEBUG Level to specify which context + * store to access. + * Debug ECS Level Reg 1 has form 0x000n000n, where n = context number + */ + ecsDbgRegVal = (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_CCTXT) | + (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT); + + npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_1, ecsDbgRegVal); + + /* clear the pipeline */ + npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); + + /* load NPE instruction into the instruction register */ + npe_ecs_reg_write(sc, IX_NPEDL_ECS_INSTRUCT_REG, npeInstruction); + + /* we need this value later to wait for completion of NPE execution step */ + oldWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); + + /* issue a Step One command via the Execution Control register */ + npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STEP); + + /* + * Force the XScale to wait until the NPE has finished execution step + * NOTE that this delay will be very small, just long enough to allow a + * single NPE instruction to complete execution; if instruction execution + * is not completed before timeout retries, exit the while loop. + */ + newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); + for (tries = 0; tries < IX_NPE_DL_MAX_NUM_OF_RETRIES && + newWatchcount == oldWatchcount; tries++) { + /* Watch Count register increments when NPE completes an instruction */ + newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC); + } + return (tries < IX_NPE_DL_MAX_NUM_OF_RETRIES) ? 0 : EIO; +#undef IX_NPE_DL_MAX_NUM_OF_RETRIES +} + +static void +npe_cpu_step_restore(struct ixpnpe_softc *sc) +{ + /* clear active bit in debug level */ + npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, 0); + + /* clear the pipeline */ + npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); + + /* restore Execution Count register contents. */ + npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, sc->savedExecCount); + + /* restore IF and IE bits to original values */ + npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, sc->savedEcsDbgCtxtReg2); +} + +static int +npe_logical_reg_read(struct ixpnpe_softc *sc, + uint32_t regAddr, uint32_t regSize, + uint32_t ctxtNum, uint32_t *regVal) +{ + uint32_t npeInstruction, mask; + int error; + + switch (regSize) { + case IX_NPEDL_REG_SIZE_BYTE: + npeInstruction = IX_NPEDL_INSTR_RD_REG_BYTE; + mask = 0xff; + break; + case IX_NPEDL_REG_SIZE_SHORT: + npeInstruction = IX_NPEDL_INSTR_RD_REG_SHORT; + mask = 0xffff; + break; + case IX_NPEDL_REG_SIZE_WORD: + npeInstruction = IX_NPEDL_INSTR_RD_REG_WORD; + mask = 0xffffffff; + break; + default: + return EINVAL; + } + + /* make regAddr be the SRC and DEST operands (e.g. movX d0, d0) */ + npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_SRC) | + (regAddr << IX_NPEDL_OFFSET_INSTR_DEST); + + /* step execution of NPE intruction using Debug Executing Context stack */ + error = npe_cpu_step(sc, npeInstruction, ctxtNum, IX_NPEDL_RD_INSTR_LDUR); + if (error != 0) { + DPRINTF(sc->sc_dev, "%s(0x%x, %u, %u), cannot step, error %d\n", + __func__, regAddr, regSize, ctxtNum, error); + return error; + } + /* read value of register from Execution Data register */ + *regVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA); + + /* align value from left to right */ + *regVal = (*regVal >> (IX_NPEDL_REG_SIZE_WORD - regSize)) & mask; + + return 0; +} + +static int +npe_logical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regVal, + uint32_t regSize, uint32_t ctxtNum, int verify) +{ + int error; + + DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x, %u, %u)\n", + __func__, regAddr, regVal, regSize, ctxtNum); + if (regSize == IX_NPEDL_REG_SIZE_WORD) { + /* NPE register addressing is left-to-right: e.g. |d0|d1|d2|d3| */ + /* Write upper half-word (short) to |d0|d1| */ + error = npe_logical_reg_write(sc, regAddr, + regVal >> IX_NPEDL_REG_SIZE_SHORT, + IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify); + if (error != 0) + return error; + + /* Write lower half-word (short) to |d2|d3| */ + error = npe_logical_reg_write(sc, + regAddr + sizeof(uint16_t), + regVal & 0xffff, + IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify); + } else { + uint32_t npeInstruction; + + switch (regSize) { + case IX_NPEDL_REG_SIZE_BYTE: + npeInstruction = IX_NPEDL_INSTR_WR_REG_BYTE; + regVal &= 0xff; + break; + case IX_NPEDL_REG_SIZE_SHORT: + npeInstruction = IX_NPEDL_INSTR_WR_REG_SHORT; + regVal &= 0xffff; + break; + default: + return EINVAL; + } + /* fill dest operand field of instruction with destination reg addr */ + npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_DEST); + + /* fill src operand field of instruction with least-sig 5 bits of val*/ + npeInstruction |= ((regVal & IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA) << + IX_NPEDL_OFFSET_INSTR_SRC); + + /* fill coprocessor field of instruction with most-sig 11 bits of val*/ + npeInstruction |= ((regVal & IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA) << + IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA); + + /* step execution of NPE intruction using Debug ECS */ + error = npe_cpu_step(sc, npeInstruction, + ctxtNum, IX_NPEDL_WR_INSTR_LDUR); + } + if (error != 0) { + DPRINTF(sc->sc_dev, "%s(0x%x, 0x%x, %u, %u), error %u writing reg\n", + __func__, regAddr, regVal, regSize, ctxtNum, error); + return error; + } + if (verify) { + uint32_t retRegVal; + + error = npe_logical_reg_read(sc, regAddr, regSize, ctxtNum, &retRegVal); + if (error == 0 && regVal != retRegVal) + error = EIO; /* XXX ambiguous */ + } + return error; +} + +/* + * There are 32 physical registers used in an NPE. These are + * treated as 16 pairs of 32-bit registers. To write one of the pair, + * write the pair number (0-16) to the REGMAP for Context 0. Then write + * the value to register 0 or 4 in the regfile, depending on which + * register of the pair is to be written + */ +static int +npe_physical_reg_write(struct ixpnpe_softc *sc, + uint32_t regAddr, uint32_t regValue, int verify) +{ + int error; + + /* + * Set REGMAP for context 0 to (regAddr >> 1) to choose which pair (0-16) + * of physical registers to write . + */ + error = npe_logical_reg_write(sc, IX_NPEDL_CTXT_REG_ADDR_REGMAP, + (regAddr >> IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP), + IX_NPEDL_REG_SIZE_SHORT, 0, verify); + if (error == 0) { + /* regAddr = 0 or 4 */ + regAddr = (regAddr & IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR) * + sizeof(uint32_t); + error = npe_logical_reg_write(sc, regAddr, regValue, + IX_NPEDL_REG_SIZE_WORD, 0, verify); + } + return error; +} + +static int +npe_ctx_reg_write(struct ixpnpe_softc *sc, uint32_t ctxtNum, + uint32_t ctxtReg, uint32_t ctxtRegVal, int verify) +{ + DPRINTFn(4, sc->sc_dev, "%s(%u, %u, %u)\n", + __func__, ctxtNum, ctxtReg, ctxtRegVal); + /* + * Context 0 has no STARTPC. Instead, this value is used to set + * NextPC for Background ECS, to set where NPE starts executing code + */ + if (ctxtNum == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STARTPC) { + /* read BG_CTXT_REG_0, update NEXTPC bits, and write back to reg */ + uint32_t v = npe_ecs_reg_read(sc, IX_NPEDL_ECS_BG_CTXT_REG_0); + v &= ~IX_NPEDL_MASK_ECS_REG_0_NEXTPC; + v |= (ctxtRegVal << IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC) & + IX_NPEDL_MASK_ECS_REG_0_NEXTPC; + + npe_ecs_reg_write(sc, IX_NPEDL_ECS_BG_CTXT_REG_0, v); + return 0; + } else { + static const struct { + uint32_t regAddress; + uint32_t regSize; + } regAccInfo[IX_NPEDL_CTXT_REG_MAX] = { + { IX_NPEDL_CTXT_REG_ADDR_STEVT, IX_NPEDL_REG_SIZE_BYTE }, + { IX_NPEDL_CTXT_REG_ADDR_STARTPC, IX_NPEDL_REG_SIZE_SHORT }, + { IX_NPEDL_CTXT_REG_ADDR_REGMAP, IX_NPEDL_REG_SIZE_SHORT }, + { IX_NPEDL_CTXT_REG_ADDR_CINDEX, IX_NPEDL_REG_SIZE_BYTE } + }; + return npe_logical_reg_write(sc, regAccInfo[ctxtReg].regAddress, + ctxtRegVal, regAccInfo[ctxtReg].regSize, ctxtNum, verify); + } +} + +/* + * NPE Mailbox support. + */ +#define IX_NPEMH_MAXTRIES 100000 + +static int +ixpnpe_ofifo_wait(struct ixpnpe_softc *sc) +{ + int i; + + for (i = 0; i < IX_NPEMH_MAXTRIES; i++) { + if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_OFNE) + return 1; + DELAY(10); + } + device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n", + __func__, npe_reg_read(sc, IX_NPESTAT)); + return 0; +} + +static void +ixpnpe_intr(void *arg) +{ + struct ixpnpe_softc *sc = arg; + uint32_t status; + + status = npe_reg_read(sc, IX_NPESTAT); + if ((status & IX_NPESTAT_OFINT) == 0) { + /* NB: should not happen */ + device_printf(sc->sc_dev, "%s: status 0x%x\n", __func__, status); + /* XXX must silence interrupt? */ + return; + } + /* + * A message is waiting in the output FIFO, copy it so + * the interrupt will be silenced; then signal anyone + * waiting to collect the result. + */ + sc->sc_msgwaiting = -1; /* NB: error indicator */ + if (ixpnpe_ofifo_wait(sc)) { + sc->sc_msg[0] = npe_reg_read(sc, IX_NPEFIFO); + if (ixpnpe_ofifo_wait(sc)) { + sc->sc_msg[1] = npe_reg_read(sc, IX_NPEFIFO); + sc->sc_msgwaiting = 1; /* successful fetch */ + } + } + wakeup_one(sc); +} + +static int +ixpnpe_ififo_wait(struct ixpnpe_softc *sc) +{ + int i; + + for (i = 0; i < IX_NPEMH_MAXTRIES; i++) { + if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_IFNF) + return 1; + DELAY(10); + } + return 0; +} + +static int +ixpnpe_sendmsg_locked(struct ixpnpe_softc *sc, const uint32_t msg[2]) +{ + int error = 0; + + mtx_assert(&sc->sc_mtx, MA_OWNED); + + sc->sc_msgwaiting = 0; + if (ixpnpe_ififo_wait(sc)) { + npe_reg_write(sc, IX_NPEFIFO, msg[0]); + if (ixpnpe_ififo_wait(sc)) + npe_reg_write(sc, IX_NPEFIFO, msg[1]); + else + error = EIO; + } else + error = EIO; + + if (error) + device_printf(sc->sc_dev, "input FIFO timeout, msg [0x%x,0x%x]\n", + msg[0], msg[1]); + return error; +} + +static int +ixpnpe_recvmsg_locked(struct ixpnpe_softc *sc, uint32_t msg[2]) +{ + mtx_assert(&sc->sc_mtx, MA_OWNED); + + if (!sc->sc_msgwaiting) + msleep(sc, &sc->sc_mtx, 0, "npemh", 0); + bcopy(sc->sc_msg, msg, sizeof(sc->sc_msg)); + /* NB: sc_msgwaiting != 1 means the ack fetch failed */ + return sc->sc_msgwaiting != 1 ? EIO : 0; +} + +/* + * Send a msg to the NPE and wait for a reply. We use the + * private mutex and sleep until an interrupt is received + * signalling the availability of data in the output FIFO + * so the caller cannot be holding a mutex. May be better + * piggyback on the caller's mutex instead but that would + * make other locking confusing. + */ +int +ixpnpe_sendandrecvmsg(struct ixpnpe_softc *sc, + const uint32_t send[2], uint32_t recv[2]) +{ + int error; + + mtx_lock(&sc->sc_mtx); + error = ixpnpe_sendmsg_locked(sc, send); + if (error == 0) + error = ixpnpe_recvmsg_locked(sc, recv); + mtx_unlock(&sc->sc_mtx); + + return error; +} + +/* XXX temporary, not reliable */ + +int +ixpnpe_sendmsg(struct ixpnpe_softc *sc, const uint32_t msg[2]) +{ + int error; + + mtx_lock(&sc->sc_mtx); + error = ixpnpe_sendmsg_locked(sc, msg); + mtx_unlock(&sc->sc_mtx); + + return error; +} + +int +ixpnpe_recvmsg(struct ixpnpe_softc *sc, uint32_t msg[2]) +{ + int error; + + mtx_lock(&sc->sc_mtx); + if (sc->sc_msgwaiting) + bcopy(sc->sc_msg, msg, sizeof(sc->sc_msg)); + /* NB: sc_msgwaiting != 1 means the ack fetch failed */ + error = sc->sc_msgwaiting != 1 ? EIO : 0; + mtx_unlock(&sc->sc_mtx); + + return error; +} diff --git a/sys/arm/xscale/ixp425/ixp425_npereg.h b/sys/arm/xscale/ixp425/ixp425_npereg.h new file mode 100644 index 000000000000..b3e684cbbe08 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_npereg.h @@ -0,0 +1,434 @@ +/*- + * Copyright (c) 2006 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ + +/*- + * Copyright (c) 2001-2005, Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. +*/ + +#ifndef _IXP425_NPEREG_H_ +#define _IXP425_NPEREG_H_ + +/* signature found as 1st word in a microcode image library */ +#define IX_NPEDL_IMAGEMGR_SIGNATURE 0xDEADBEEF +/* marks end of header in a microcode image library */ +#define IX_NPEDL_IMAGEMGR_END_OF_HEADER 0xFFFFFFFF + +/* + * Intel (R) IXP400 Software NPE Image ID Definition + * + * Definition of NPE Image ID to be passed to ixNpeDlNpeInitAndStart() + * as input of type uint32_t which has the following fields format: + * + * Field [Bit Location] + * ----------------------------------- + * Device ID [31 - 28] + * NPE ID [27 - 24] + * NPE Functionality ID [23 - 16] + * Major Release Number [15 - 8] + * Minor Release Number [7 - 0] + */ +#define IX_NPEDL_NPEID_FROM_IMAGEID_GET(imageId) \ + (((imageId) >> 24) & 0xf) +#define IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId) \ + (((imageId) >> 28) & 0xf) +#define IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId) \ + (((imageId) >> 16) & 0xff) +#define IX_NPEDL_MAJOR_FROM_IMAGEID_GET(imageId) \ + (((imageId) >> 8) & 0xff) +#define IX_NPEDL_MINOR_FROM_IMAGEID_GET(imageId) \ + (((imageId) >> 0) & 0xff) + +/* + * Instruction and Data Memory Size (in words) for each NPE + */ +#ifndef __ixp46X +#define IX_NPEDL_INS_MEMSIZE_WORDS_NPEA 4096 +#define IX_NPEDL_INS_MEMSIZE_WORDS_NPEB 2048 +#define IX_NPEDL_INS_MEMSIZE_WORDS_NPEC 2048 + +#define IX_NPEDL_DATA_MEMSIZE_WORDS_NPEA 2048 +#define IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB 2048 +#define IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC 2048 +#else +#define IX_NPEDL_INS_MEMSIZE_WORDS_NPEA 4096 +#define IX_NPEDL_INS_MEMSIZE_WORDS_NPEB 4096 +#define IX_NPEDL_INS_MEMSIZE_WORDS_NPEC 4096 + +#define IX_NPEDL_DATA_MEMSIZE_WORDS_NPEA 4096 +#define IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB 4096 +#define IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC 4096 +#endif + +/* BAR offsets */ +#define IX_NPEDL_REG_OFFSET_EXAD 0x00000000 /* Execution Address */ +#define IX_NPEDL_REG_OFFSET_EXDATA 0x00000004 /* Execution Data */ +#define IX_NPEDL_REG_OFFSET_EXCTL 0x00000008 /* Execution Control */ +#define IX_NPEDL_REG_OFFSET_EXCT 0x0000000C /* Execution Count */ +#define IX_NPEDL_REG_OFFSET_AP0 0x00000010 /* Action Point 0 */ +#define IX_NPEDL_REG_OFFSET_AP1 0x00000014 /* Action Point 1 */ +#define IX_NPEDL_REG_OFFSET_AP2 0x00000018 /* Action Point 2 */ +#define IX_NPEDL_REG_OFFSET_AP3 0x0000001C /* Action Point 3 */ +#define IX_NPEDL_REG_OFFSET_WFIFO 0x00000020 /* Watchpoint FIFO */ +#define IX_NPEDL_REG_OFFSET_WC 0x00000024 /* Watch Count */ +#define IX_NPEDL_REG_OFFSET_PROFCT 0x00000028 /* Profile Count */ +#define IX_NPEDL_REG_OFFSET_STAT 0x0000002C /* Messaging Status */ +#define IX_NPEDL_REG_OFFSET_CTL 0x00000030 /* Messaging Control */ +#define IX_NPEDL_REG_OFFSET_MBST 0x00000034 /* Mailbox Status */ +#define IX_NPEDL_REG_OFFSET_FIFO 0x00000038 /* Message FIFO */ + +/* + * Reset value for Mailbox (MBST) register + * NOTE that if used, it should be complemented with an NPE intruction + * to clear the Mailbox at the NPE side as well + */ +#define IX_NPEDL_REG_RESET_MBST 0x0000F0F0 + +#define IX_NPEDL_MASK_WFIFO_VALID 0x80000000 /* VALID bit */ +#define IX_NPEDL_MASK_STAT_OFNE 0x00010000 /* OFNE bit */ +#define IX_NPEDL_MASK_STAT_IFNE 0x00080000 /* IFNE bit */ + +/* + * EXCTL (Execution Control) Register commands +*/ +#define IX_NPEDL_EXCTL_CMD_NPE_STEP 0x01 /* Step 1 instruction */ +#define IX_NPEDL_EXCTL_CMD_NPE_START 0x02 /* Start execution */ +#define IX_NPEDL_EXCTL_CMD_NPE_STOP 0x03 /* Stop execution */ +#define IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE 0x04 /* Clear ins pipeline */ + +/* + * Read/write operations use address in EXAD and data in EXDATA. + */ +#define IX_NPEDL_EXCTL_CMD_RD_INS_MEM 0x10 /* Read ins memory */ +#define IX_NPEDL_EXCTL_CMD_WR_INS_MEM 0x11 /* Write ins memory */ +#define IX_NPEDL_EXCTL_CMD_RD_DATA_MEM 0x12 /* Read data memory */ +#define IX_NPEDL_EXCTL_CMD_WR_DATA_MEM 0x13 /* Write data memory */ +#define IX_NPEDL_EXCTL_CMD_RD_ECS_REG 0x14 /* Read ECS register */ +#define IX_NPEDL_EXCTL_CMD_WR_ECS_REG 0x15 /* Write ECS register */ + +#define IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT 0x0C /* Clear Profile Count register */ + + +/* + * EXCTL (Execution Control) Register status bit masks + */ +#define IX_NPEDL_EXCTL_STATUS_RUN 0x80000000 +#define IX_NPEDL_EXCTL_STATUS_STOP 0x40000000 +#define IX_NPEDL_EXCTL_STATUS_CLEAR 0x20000000 +#define IX_NPEDL_EXCTL_STATUS_ECS_K 0x00800000 /* pipeline Klean */ + +/* + * Executing Context Stack (ECS) level registers + */ +#define IX_NPEDL_ECS_BG_CTXT_REG_0 0x00 /* reg 0 @ bg ctx */ +#define IX_NPEDL_ECS_BG_CTXT_REG_1 0x01 /* reg 1 @ bg ctx */ +#define IX_NPEDL_ECS_BG_CTXT_REG_2 0x02 /* reg 2 @ bg ctx */ + +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0 0x04 /* reg 0 @ pri 1 ctx */ +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_1 0x05 /* reg 1 @ pri 1 ctx */ +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_2 0x06 /* reg 2 @ pri 1 ctx */ + +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0 0x08 /* reg 0 @ pri 2 ctx */ +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_1 0x09 /* reg 1 @ pri 2 ctx */ +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_2 0x0A /* reg 2 @ pri 2 ctx */ + +#define IX_NPEDL_ECS_DBG_CTXT_REG_0 0x0C /* reg 0 @ debug ctx */ +#define IX_NPEDL_ECS_DBG_CTXT_REG_1 0x0D /* reg 1 @ debug ctx */ +#define IX_NPEDL_ECS_DBG_CTXT_REG_2 0x0E /* reg 2 @ debug ctx */ + +#define IX_NPEDL_ECS_INSTRUCT_REG 0x11 /* Instruction reg */ + +/* + * Execution Access register reset values + */ +#define IX_NPEDL_ECS_BG_CTXT_REG_0_RESET 0xA0000000 +#define IX_NPEDL_ECS_BG_CTXT_REG_1_RESET 0x01000000 +#define IX_NPEDL_ECS_BG_CTXT_REG_2_RESET 0x00008000 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET 0x20000080 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET 0x01000000 +#define IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET 0x00008000 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET 0x20000080 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET 0x01000000 +#define IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET 0x00008000 +#define IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET 0x20000000 +#define IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET 0x00000000 +#define IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET 0x001E0000 +#define IX_NPEDL_ECS_INSTRUCT_REG_RESET 0x1003C00F + +/* + * Masks used to read/write particular bits in Execution Access registers + */ + +#define IX_NPEDL_MASK_ECS_REG_0_ACTIVE 0x80000000 /* Active bit */ +#define IX_NPEDL_MASK_ECS_REG_0_NEXTPC 0x1FFF0000 /* NextPC bits */ +#define IX_NPEDL_MASK_ECS_REG_0_LDUR 0x00000700 /* LDUR bits */ + +#define IX_NPEDL_MASK_ECS_REG_1_CCTXT 0x000F0000 /* NextPC bits */ +#define IX_NPEDL_MASK_ECS_REG_1_SELCTXT 0x0000000F + +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IF 0x00100000 /* IF bit */ +#define IX_NPEDL_MASK_ECS_DBG_REG_2_IE 0x00080000 /* IE bit */ + + +/* + * Bit-Offsets from LSB of particular bit-fields in Execution Access registers. + */ + +#define IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC 16 +#define IX_NPEDL_OFFSET_ECS_REG_0_LDUR 8 + +#define IX_NPEDL_OFFSET_ECS_REG_1_CCTXT 16 +#define IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT 0 + +/* + * NPE core & co-processor instruction templates to load into NPE Instruction + * Register, for read/write of NPE register file registers. + */ + +/* + * Read an 8-bit NPE internal logical register + * and return the value in the EXDATA register (aligned to MSB). + * NPE Assembler instruction: "mov8 d0, d0 &&& DBG_WrExec" + */ +#define IX_NPEDL_INSTR_RD_REG_BYTE 0x0FC00000 + +/* + * Read a 16-bit NPE internal logical register + * and return the value in the EXDATA register (aligned to MSB). + * NPE Assembler instruction: "mov16 d0, d0 &&& DBG_WrExec" + */ +#define IX_NPEDL_INSTR_RD_REG_SHORT 0x0FC08010 + +/* + * Read a 16-bit NPE internal logical register + * and return the value in the EXDATA register. + * NPE Assembler instruction: "mov32 d0, d0 &&& DBG_WrExec" + */ +#define IX_NPEDL_INSTR_RD_REG_WORD 0x0FC08210 + +/* + * Write an 8-bit NPE internal logical register. + * NPE Assembler instruction: "mov8 d0, #0" + */ +#define IX_NPEDL_INSTR_WR_REG_BYTE 0x00004000 + +/* + * Write a 16-bit NPE internal logical register. + * NPE Assembler instruction: "mov16 d0, #0" + */ +#define IX_NPEDL_INSTR_WR_REG_SHORT 0x0000C000 + +/* + * Write a 16-bit NPE internal logical register. + * NPE Assembler instruction: "cprd32 d0 &&& DBG_RdInFIFO" + */ +#define IX_NPEDL_INSTR_RD_FIFO 0x0F888220 + +/* + * Reset Mailbox (MBST) register + * NPE Assembler instruction: "mov32 d0, d0 &&& DBG_ClearM" + */ +#define IX_NPEDL_INSTR_RESET_MBOX 0x0FAC8210 + + +/* + * Bit-offsets from LSB, of particular bit-fields in an NPE instruction + */ +#define IX_NPEDL_OFFSET_INSTR_SRC 4 /* src operand */ +#define IX_NPEDL_OFFSET_INSTR_DEST 9 /* dest operand */ +#define IX_NPEDL_OFFSET_INSTR_COPROC 18 /* coprocessor ins */ + +/* + * Masks used to read/write particular bits of an NPE Instruction + */ + +/** + * Mask the bits of 16-bit data value (least-sig 5 bits) to be used in + * SRC field of immediate-mode NPE instruction + */ +#define IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA 0x1F + +/** + * Mask the bits of 16-bit data value (most-sig 11 bits) to be used in + * COPROC field of immediate-mode NPE instruction + */ +#define IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA 0xFFE0 + +/** + * LSB offset of the bit-field of 16-bit data value (most-sig 11 bits) + * to be used in COPROC field of immediate-mode NPE instruction + */ +#define IX_NPEDL_OFFSET_IMMED_INSTR_COPROC_DATA 5 + +/** + * Number of left-shifts required to align most-sig 11 bits of 16-bit + * data value into COPROC field of immediate-mode NPE instruction + */ +#define IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA \ + (IX_NPEDL_OFFSET_INSTR_COPROC - IX_NPEDL_OFFSET_IMMED_INSTR_COPROC_DATA) + +/** + * LDUR value used with immediate-mode NPE Instructions by the NpeDl + * for writing to NPE internal logical registers + */ +#define IX_NPEDL_WR_INSTR_LDUR 1 + +/** + * LDUR value used with NON-immediate-mode NPE Instructions by the NpeDl + * for reading from NPE internal logical registers + */ +#define IX_NPEDL_RD_INSTR_LDUR 0 + + +/** + * NPE internal Context Store registers. + */ +typedef enum +{ + IX_NPEDL_CTXT_REG_STEVT = 0, /**< identifies STEVT */ + IX_NPEDL_CTXT_REG_STARTPC, /**< identifies STARTPC */ + IX_NPEDL_CTXT_REG_REGMAP, /**< identifies REGMAP */ + IX_NPEDL_CTXT_REG_CINDEX, /**< identifies CINDEX */ + IX_NPEDL_CTXT_REG_MAX /**< Total number of Context Store registers */ +} IxNpeDlCtxtRegNum; + + +/* + * NPE Context Store register logical addresses + */ +#define IX_NPEDL_CTXT_REG_ADDR_STEVT 0x0000001B +#define IX_NPEDL_CTXT_REG_ADDR_STARTPC 0x0000001C +#define IX_NPEDL_CTXT_REG_ADDR_REGMAP 0x0000001E +#define IX_NPEDL_CTXT_REG_ADDR_CINDEX 0x0000001F + +/* + * NPE Context Store register reset values + */ + +/** + * Reset value of STEVT NPE internal Context Store register + * (STEVT = off, 0x80) + */ +#define IX_NPEDL_CTXT_REG_RESET_STEVT 0x80 + +/** + * Reset value of STARTPC NPE internal Context Store register + * (STARTPC = 0x0000) + */ +#define IX_NPEDL_CTXT_REG_RESET_STARTPC 0x0000 + +/** + * Reset value of REGMAP NPE internal Context Store register + * (REGMAP = d0->p0, d8->p2, d16->p4) + */ +#define IX_NPEDL_CTXT_REG_RESET_REGMAP 0x0820 + +/** + * Reset value of CINDEX NPE internal Context Store register + * (CINDEX = 0) + */ +#define IX_NPEDL_CTXT_REG_RESET_CINDEX 0x00 + + +/* + * Numeric range of context levels available on an NPE + */ +#define IX_NPEDL_CTXT_NUM_MIN 0 +#define IX_NPEDL_CTXT_NUM_MAX 15 + + +/** + * Number of Physical registers currently supported + * Initial NPE implementations will have a 32-word register file. + * Later implementations may have a 64-word register file. + */ +#define IX_NPEDL_TOTAL_NUM_PHYS_REG 32 + +/** + * LSB-offset of Regmap number in Physical NPE register address, used + * for Physical To Logical register address mapping in the NPE + */ +#define IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP 1 + +/** + * Mask to extract a logical NPE register address from a physical + * register address, used for Physical To Logical address mapping + */ +#define IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR 0x1 + +/* + * NPE Message/Mailbox interface. + */ +#define IX_NPESTAT IX_NPEDL_REG_OFFSET_STAT /* status register */ +#define IX_NPECTL IX_NPEDL_REG_OFFSET_CTL /* control register */ +#define IX_NPEFIFO IX_NPEDL_REG_OFFSET_FIFO /* FIFO register */ + +/* control register */ +#define IX_NPECTL_OFE 0x00010000 /* output fifo enable */ +#define IX_NPECTL_IFE 0x00020000 /* input fifo enable */ +#define IX_NPECTL_OFWE 0x01000000 /* output fifo write enable */ +#define IX_NPECTL_IFWE 0x02000000 /* input fifo write enable */ + +/* status register */ +#define IX_NPESTAT_OFNE 0x00010000 /* output fifo not empty */ +#define IX_NPESTAT_IFNF 0x00020000 /* input fifo not full */ +#define IX_NPESTAT_OFNF 0x00040000 /* output fifo not full */ +#define IX_NPESTAT_IFNE 0x00080000 /* input fifo not empty */ +#define IX_NPESTAT_MBINT 0x00100000 /* Mailbox interrupt */ +#define IX_NPESTAT_IFINT 0x00200000 /* input fifo interrupt */ +#define IX_NPESTAT_OFINT 0x00400000 /* output fifo interrupt */ +#define IX_NPESTAT_WFINT 0x00800000 /* watch fifo interrupt */ +#endif /* _IXP425_NPEREG_H_ */ diff --git a/sys/arm/xscale/ixp425/ixp425_npevar.h b/sys/arm/xscale/ixp425/ixp425_npevar.h new file mode 100644 index 000000000000..c30ff5f55117 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_npevar.h @@ -0,0 +1,96 @@ +/*- + * Copyright (c) 2006 Sam Leffler. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _IXP425_NPEVAR_H_ +#define _IXP425_NPEVAR_H_ + +/* + * Intel (R) IXP400 Software NPE Image ID Definition + * + * Firmware Id's for current firmware image. These are typed by + * NPE ID and the feature set. Not all features are available + * on all NPE's. + * + * HSS-0: supports 32 channelized and 4 packetized. + * HSS-0 + ATM + SPHY: + * For HSS, 16/32 channelized and 4/0 packetized. + * For ATM, AAL5, AAL0 and OAM for UTOPIA SPHY, 1 logical port, 32 VCs. + * Fast Path support. + * HSS-0 + ATM + MPHY: + * For HSS, 16/32 channelized and 4/0 packetized. + * For ATM, AAL5, AAL0 and OAM for UTOPIA MPHY, 1 logical port, 32 VCs. + * Fast Path support. + * ATM-Only: + * AAL5, AAL0 and OAM for UTOPIA MPHY, 12 logical ports, 32 VCs. + * Fast Path support. + * HSS-2: + * HSS-0 and HSS-1. + * Each HSS port supports 32 channelized and 4 packetized. + * ETH: Ethernet Rx/Tx which includes: + * MAC_FILTERING, MAC_LEARNING, SPANNING_TREE, FIREWALL + * ETH+VLAN Ethernet Rx/Tx which includes: + * MAC_FILTERING, MAC_LEARNING, SPANNING_TREE, FIREWALL, VLAN_QOS + * ETH+VLAN+HDR: Ethernet Rx/Tx which includes: + * SPANNING_TREE, FIREWALL, VLAN_QOS, HEADER_CONVERSION + */ +/* XXX not right, revise */ +/* NPE A Firmware Image Id's */ +#define NPEFW_A_HSS0 0x00010000 /* HSS-0: 32 chan+4 packet */ +#define NPEFW_A_HSS0_ATM_S_1 0x00020000 /* HSS-0+ATM UTOPIA SPHY (1 port) */ +#define NPEFW_A_HSS0_ATM_M_1 0x00020000 /* HSS-0+ATM UTOPIA MPHY (1 port) */ +#define NPEFW_A_ATM_M_12 0x00040000 /* ATM UTOPIA MPHY (12 ports) */ +#define NPEFW_A_DMA 0x00150100 /* DMA only */ +#define NPEFW_A_HSS2 0x00090000 /* HSS-0 + HSS-1 */ +#define NPEFW_A_ETH 0x10800200 /* Basic Ethernet */ +#define NPEFW_A_ETH_VLAN 0x10810200 /* NPEFW_A_ETH + VLAN QoS */ +#define NPEFW_A_ETH_VLAN_HDR 0x10820200 /* NPEFW_A_ETH_VLAN + Hdr conv */ +/* XXX ... more not included */ + +/* NPE B Firmware Image Id's */ +#define NPEFW_B_ETH 0x01000200 /* Basic Ethernet */ +#define NPEFW_B_ETH_VLAN 0x01010200 /* NPEFW_B_ETH + VLAN QoS */ +#define NPEFW_B_ETH_VLAN_HDR 0x01020201 /* NPEFW_B_ETH_VLAN + Hdr conv */ +#define NPEFW_B_DMA 0x01020100 /* DMA only */ +/* XXX ... more not include */ + +#define IXP425_NPE_B_IMAGEID 0x01000200 +#define IXP425_NPE_C_IMAGEID 0x02000200 + +struct ixpnpe_softc; +struct ixpnpe_softc *ixpnpe_attach(device_t); +void ixpnpe_detach(struct ixpnpe_softc *); +int ixpnpe_stopandreset(struct ixpnpe_softc *); +int ixpnpe_start(struct ixpnpe_softc *); +int ixpnpe_stop(struct ixpnpe_softc *); +int ixpnpe_init(struct ixpnpe_softc *, + const char *imageName, uint32_t imageId); +int ixpnpe_getfunctionality(struct ixpnpe_softc *sc); + +int ixpnpe_sendmsg(struct ixpnpe_softc *, const uint32_t msg[2]); +int ixpnpe_recvmsg(struct ixpnpe_softc *, uint32_t msg[2]); +int ixpnpe_sendandrecvmsg(struct ixpnpe_softc *, const uint32_t send[2], + uint32_t recv[2]); +#endif /* _IXP425_NPEVAR_H_ */ diff --git a/sys/arm/xscale/ixp425/ixp425_pci.c b/sys/arm/xscale/ixp425/ixp425_pci.c new file mode 100644 index 000000000000..adb9b16d93eb --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_pci.c @@ -0,0 +1,455 @@ +/* $NetBSD: ixp425_pci.c,v 1.5 2006/04/10 03:36:03 simonb Exp $ */ + +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include "pcib_if.h" + +#include +extern struct ixp425_softc *ixp425_softc; + +#define PCI_CSR_WRITE_4(sc, reg, data) \ + bus_write_4(sc->sc_csr, reg, data) + +#define PCI_CSR_READ_4(sc, reg) \ + bus_read_4(sc->sc_csr, reg) + +#define PCI_CONF_LOCK(s) (s) = disable_interrupts(I32_bit) +#define PCI_CONF_UNLOCK(s) restore_interrupts((s)) + +static device_probe_t ixppcib_probe; +static device_attach_t ixppcib_attach; +static bus_read_ivar_t ixppcib_read_ivar; +static bus_write_ivar_t ixppcib_write_ivar; +static bus_setup_intr_t ixppcib_setup_intr; +static bus_teardown_intr_t ixppcib_teardown_intr; +static bus_alloc_resource_t ixppcib_alloc_resource; +static bus_activate_resource_t ixppcib_activate_resource; +static bus_deactivate_resource_t ixppcib_deactivate_resource; +static bus_release_resource_t ixppcib_release_resource; +static pcib_maxslots_t ixppcib_maxslots; +static pcib_read_config_t ixppcib_read_config; +static pcib_write_config_t ixppcib_write_config; +static pcib_route_interrupt_t ixppcib_route_interrupt; + +static int +ixppcib_probe(device_t dev) +{ + + device_set_desc(dev, "IXP425 PCI Bus"); + return (0); +} + +static void +ixp425_pci_conf_reg_write(struct ixppcib_softc *sc, uint32_t reg, + uint32_t data) +{ + PCI_CSR_WRITE_4(sc, + PCI_CRP_AD_CBE, ((reg & ~3) | COMMAND_CRP_WRITE)); + PCI_CSR_WRITE_4(sc, + PCI_CRP_AD_WDATA, data); +} + +static int +ixppcib_attach(device_t dev) +{ + int rid; + struct ixppcib_softc *sc; + + sc = device_get_softc(dev); + + rid = 0; + sc->sc_csr = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, + IXP425_PCI_HWBASE, IXP425_PCI_HWBASE + IXP425_PCI_SIZE, + IXP425_PCI_SIZE, RF_ACTIVE); + if (sc->sc_csr == NULL) + panic("cannot allocate PCI CSR registers"); + + ixp425_md_attach(dev); + /* always setup the base, incase another OS messes w/ it */ + PCI_CSR_WRITE_4(sc, PCI_PCIMEMBASE, 0x48494a4b); + + rid = 0; + sc->sc_mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, + IXP425_PCI_MEM_HWBASE, IXP425_PCI_MEM_HWBASE + IXP425_PCI_MEM_SIZE, + IXP425_PCI_MEM_SIZE, RF_ACTIVE); + if (sc->sc_mem == NULL) + panic("cannot allocate PCI MEM space"); + + /* + * Initialize the bus space tags. + */ + ixp425_io_bs_init(&sc->sc_pci_iot, sc); + ixp425_mem_bs_init(&sc->sc_pci_memt, sc); + + sc->sc_dev = dev; + + /* Initialize memory and i/o rmans. */ + sc->sc_io_rman.rm_type = RMAN_ARRAY; + sc->sc_io_rman.rm_descr = "IXP425 PCI I/O Ports"; + if (rman_init(&sc->sc_io_rman) != 0 || + rman_manage_region(&sc->sc_io_rman, 0, + IXP425_PCI_IO_SIZE) != 0) { + panic("ixppcib_probe: failed to set up I/O rman"); + } + + sc->sc_mem_rman.rm_type = RMAN_ARRAY; + sc->sc_mem_rman.rm_descr = "IXP425 PCI Memory"; + if (rman_init(&sc->sc_mem_rman) != 0 || + rman_manage_region(&sc->sc_mem_rman, IXP425_PCI_MEM_HWBASE, + IXP425_PCI_MEM_HWBASE + IXP425_PCI_MEM_SIZE) != 0) { + panic("ixppcib_probe: failed to set up memory rman"); + } + + /* + * PCI->AHB address translation + * begin at the physical memory start + OFFSET + */ +#define AHB_OFFSET 0x10000000UL + PCI_CSR_WRITE_4(sc, PCI_AHBMEMBASE, + (AHB_OFFSET & 0xFF000000) + + ((AHB_OFFSET & 0xFF000000) >> 8) + + ((AHB_OFFSET & 0xFF000000) >> 16) + + ((AHB_OFFSET & 0xFF000000) >> 24) + + 0x00010203); + +#define IXPPCIB_WRITE_CONF(sc, reg, val) \ + ixp425_pci_conf_reg_write(sc, reg, val) + /* Write Mapping registers PCI Configuration Registers */ + /* Base Address 0 - 3 */ + IXPPCIB_WRITE_CONF(sc, PCI_MAPREG_BAR0, AHB_OFFSET + 0x00000000); + IXPPCIB_WRITE_CONF(sc, PCI_MAPREG_BAR1, AHB_OFFSET + 0x01000000); + IXPPCIB_WRITE_CONF(sc, PCI_MAPREG_BAR2, AHB_OFFSET + 0x02000000); + IXPPCIB_WRITE_CONF(sc, PCI_MAPREG_BAR3, AHB_OFFSET + 0x03000000); + + /* Base Address 4 */ + IXPPCIB_WRITE_CONF(sc, PCI_MAPREG_BAR4, 0xffffffff); + + /* Base Address 5 */ + IXPPCIB_WRITE_CONF(sc, PCI_MAPREG_BAR5, 0x00000000); + + /* Assert some PCI errors */ + PCI_CSR_WRITE_4(sc, PCI_ISR, ISR_AHBE | ISR_PPE | ISR_PFE | ISR_PSE); + +#ifdef __ARMEB__ + /* + * Set up byte lane swapping between little-endian PCI + * and the big-endian AHB bus + */ + PCI_CSR_WRITE_4(sc, PCI_CSR, CSR_IC | CSR_ABE | CSR_PDS); +#else + PCI_CSR_WRITE_4(sc, PCI_CSR, CSR_IC | CSR_ABE); +#endif + + /* + * Enable bus mastering and I/O,memory access + */ + IXPPCIB_WRITE_CONF(sc, PCIR_COMMAND, + PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); + + /* + * Wait some more to ensure PCI devices have stabilised. + */ + DELAY(50000); + + device_add_child(dev, "pci", -1); + return (bus_generic_attach(dev)); +} + +static int +ixppcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) +{ + struct ixppcib_softc *sc; + + sc = device_get_softc(dev); + switch (which) { + case PCIB_IVAR_BUS: + *result = sc->sc_bus; + return (0); + } + + return (ENOENT); +} + +static int +ixppcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) +{ + struct ixppcib_softc *sc; + + sc = device_get_softc(dev); + switch (which) { + case PCIB_IVAR_BUS: + sc->sc_bus = value; + return (0); + } + + return (ENOENT); +} + +static int +ixppcib_setup_intr(device_t dev, device_t child, struct resource *ires, + int flags, driver_intr_t *intr, void *arg, void **cookiep) +{ + + return (BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, + intr, arg, cookiep)); +} + +static int +ixppcib_teardown_intr(device_t dev, device_t child, struct resource *vec, + void *cookie) +{ + + return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, vec, cookie)); +} + +static struct resource * +ixppcib_alloc_resource(device_t bus, device_t child, int type, int *rid, + u_long start, u_long end, u_long count, u_int flags) +{ + bus_space_tag_t tag; + struct ixppcib_softc *sc = device_get_softc(bus); + struct rman *rmanp; + struct resource *rv; + + tag = NULL; /* shut up stupid gcc */ + rv = NULL; + switch (type) { + case SYS_RES_IRQ: + rmanp = &sc->sc_irq_rman; + break; + + case SYS_RES_IOPORT: + rmanp = &sc->sc_io_rman; + tag = &sc->sc_pci_iot; + break; + + case SYS_RES_MEMORY: + rmanp = &sc->sc_mem_rman; + tag = &sc->sc_pci_memt; + break; + + default: + return (rv); + } + + rv = rman_reserve_resource(rmanp, start, end, count, flags, child); + if (rv != NULL) { + rman_set_rid(rv, *rid); + if (type == SYS_RES_IOPORT) { + rman_set_bustag(rv, tag); + rman_set_bushandle(rv, rman_get_start(rv)); + } else if (type == SYS_RES_MEMORY) { + rman_set_bustag(rv, tag); + rman_set_bushandle(rv, rman_get_bushandle(sc->sc_mem) + + (rman_get_start(rv) - IXP425_PCI_MEM_HWBASE)); + } + } + + return (rv); +} + +static int +ixppcib_activate_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + + device_printf(bus, "%s called activate_resource\n", device_get_nameunit(child)); + return (ENXIO); +} + +static int +ixppcib_deactivate_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + + device_printf(bus, "%s called deactivate_resource\n", device_get_nameunit(child)); + return (ENXIO); +} + +static int +ixppcib_release_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + + device_printf(bus, "%s called release_resource\n", device_get_nameunit(child)); + return (ENXIO); +} + +static void +ixppcib_conf_setup(struct ixppcib_softc *sc, int bus, int slot, int func, + int reg) +{ + if (bus == 0) { + if (slot == 0 && func == 0) { + PCI_CSR_WRITE_4(sc, PCI_NP_AD, (reg & ~3)); + } else { + bus &= 0xff; + slot &= 0x1f; + func &= 0x07; + /* configuration type 0 */ + PCI_CSR_WRITE_4(sc, PCI_NP_AD, (1U << (32 - slot)) | + (func << 8) | (reg & ~3)); + } + } else { + /* configuration type 1 */ + PCI_CSR_WRITE_4(sc, PCI_NP_AD, + (bus << 16) | (slot << 11) | + (func << 8) | (reg & ~3) | 1); + } + +} + +static int +ixppcib_maxslots(device_t dev) +{ + + return (PCI_SLOTMAX); +} + +static u_int32_t +ixppcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, + int bytes) +{ + struct ixppcib_softc *sc = device_get_softc(dev); + u_int32_t data, ret; + + ixppcib_conf_setup(sc, bus, slot, func, reg & ~3); + + PCI_CSR_WRITE_4(sc, PCI_NP_CBE, COMMAND_NP_CONF_READ); + ret = PCI_CSR_READ_4(sc, PCI_NP_RDATA); + ret >>= (reg & 3) * 8; + ret &= 0xffffffff >> ((4 - bytes) * 8); +#if 0 + device_printf(dev, "read config: %u:%u:%u %#x(%d) = %#x\n", bus, slot, func, reg, bytes, ret); +#endif + + /* check & clear PCI abort */ + data = PCI_CSR_READ_4(sc, PCI_ISR); + if (data & ISR_PFE) { + PCI_CSR_WRITE_4(sc, PCI_ISR, ISR_PFE); + return (-1); + } + return (ret); +} + +static const int byteenables[] = { 0, 0x10, 0x30, 0x70, 0xf0 }; + +static void +ixppcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, + u_int32_t val, int bytes) +{ + struct ixppcib_softc *sc = device_get_softc(dev); + u_int32_t data; + +#if 0 + device_printf(dev, "write config: %u:%u:%u %#x(%d) = %#x\n", bus, slot, func, reg, bytes, val); +#endif + + ixppcib_conf_setup(sc, bus, slot, func, reg & ~3); + + /* Byte enables are active low, so not them first */ + PCI_CSR_WRITE_4(sc, PCI_NP_CBE, COMMAND_NP_CONF_WRITE | + (~(byteenables[bytes] << (reg & 3)) & 0xf0)); + PCI_CSR_WRITE_4(sc, PCI_NP_WDATA, val << ((reg & 3) * 8)); + + /* check & clear PCI abort */ + data = PCI_CSR_READ_4(sc, PCI_ISR); + if (data & ISR_PFE) + PCI_CSR_WRITE_4(sc, PCI_ISR, ISR_PFE); +} + +static int +ixppcib_route_interrupt(device_t bridge, device_t device, int pin) +{ + + return (ixp425_md_route_interrupt(bridge, device, pin)); +} + +static device_method_t ixppcib_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ixppcib_probe), + DEVMETHOD(device_attach, ixppcib_attach), + + /* Bus interface */ + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_read_ivar, ixppcib_read_ivar), + DEVMETHOD(bus_write_ivar, ixppcib_write_ivar), + DEVMETHOD(bus_setup_intr, ixppcib_setup_intr), + DEVMETHOD(bus_teardown_intr, ixppcib_teardown_intr), + DEVMETHOD(bus_alloc_resource, ixppcib_alloc_resource), + DEVMETHOD(bus_activate_resource, ixppcib_activate_resource), + DEVMETHOD(bus_deactivate_resource, ixppcib_deactivate_resource), + DEVMETHOD(bus_release_resource, ixppcib_release_resource), + /* DEVMETHOD(bus_get_dma_tag, ixppcib_get_dma_tag), */ + + /* pcib interface */ + DEVMETHOD(pcib_maxslots, ixppcib_maxslots), + DEVMETHOD(pcib_read_config, ixppcib_read_config), + DEVMETHOD(pcib_write_config, ixppcib_write_config), + DEVMETHOD(pcib_route_interrupt, ixppcib_route_interrupt), + + {0, 0}, +}; + +static driver_t ixppcib_driver = { + "pcib", + ixppcib_methods, + sizeof(struct ixppcib_softc), +}; +static devclass_t ixppcib_devclass; + +DRIVER_MODULE(ixppcib, ixp, ixppcib_driver, ixppcib_devclass, 0, 0); diff --git a/sys/arm/xscale/ixp425/ixp425_pci_asm.S b/sys/arm/xscale/ixp425/ixp425_pci_asm.S new file mode 100644 index 000000000000..f686d6577f09 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_pci_asm.S @@ -0,0 +1,102 @@ +/* $NetBSD: ixp425_pci_asm.S,v 1.2 2005/12/11 12:16:51 christos Exp $ */ + +/* + * Copyright (c) 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Jason R. Thorpe for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#include + +/* + * Bus space functions for IXP425 PCI space access. We have to swizzle + * the address for 1 and 2 byte accesses when in big-endian mode. + */ + +/* + * read single + */ + +ENTRY(ixp425_pci_mem_bs_r_1) +#ifdef __ARMEB__ + add r1, r1, r2 + eor r1, r1, #0x3 + ldrb r0, [r1] +#else + ldrb r0, [r1, r2] +#endif /* __ARMEB__ */ + mov pc, lr + +ENTRY(ixp425_pci_mem_bs_r_2) +#ifdef __ARMEB__ + add r1, r1, r2 + eor r1, r1, #0x2 + ldrh r0, [r1] +#else + ldrh r0, [r1, r2] +#endif /* __ARMEB__ */ + mov pc, lr + +ENTRY(ixp425_pci_mem_bs_r_4) + ldr r0, [r1, r2] + mov pc, lr + +/* + * write single + */ + +ENTRY(ixp425_pci_mem_bs_w_1) +#ifdef __ARMEB__ + add r1, r1, r2 + eor r1, r1, #0x3 + strb r3, [r1] +#else + strb r3, [r1, r2] +#endif /* __ARMEB__ */ + mov pc, lr + +ENTRY(ixp425_pci_mem_bs_w_2) +#ifdef __ARMEB__ + add r1, r1, r2 + eor r1, r1, #0x2 + strh r3, [r1] +#else + strh r3, [r1, r2] +#endif /* __ARMEB__ */ + mov pc, lr + +ENTRY(ixp425_pci_mem_bs_w_4) + str r3, [r1, r2] + mov pc, lr diff --git a/sys/arm/xscale/ixp425/ixp425_pci_space.c b/sys/arm/xscale/ixp425/ixp425_pci_space.c new file mode 100644 index 000000000000..6ee2a1d33180 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_pci_space.c @@ -0,0 +1,496 @@ +/* $NetBSD: ixp425_pci_space.c,v 1.6 2006/04/10 03:36:03 simonb Exp $ */ + +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * bus_space PCI functions for ixp425 + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +/* + * Macros to read/write registers +*/ +#define CSR_READ_4(x) *(volatile uint32_t *) \ + (IXP425_PCI_CSR_BASE + (x)) +#define CSR_WRITE_4(x, v) *(volatile uint32_t *) \ + (IXP425_PCI_CSR_BASE + (x)) = (v) + +/* Proto types for all the bus_space structure functions */ +bs_protos(ixp425_pci); +bs_protos(ixp425_pci_io); +bs_protos(ixp425_pci_mem); + +/* special I/O functions */ +static u_int8_t _pci_io_bs_r_1(void *, bus_space_handle_t, bus_size_t); +static u_int16_t _pci_io_bs_r_2(void *, bus_space_handle_t, bus_size_t); +static u_int32_t _pci_io_bs_r_4(void *, bus_space_handle_t, bus_size_t); + +static void _pci_io_bs_w_1(void *, bus_space_handle_t, bus_size_t, u_int8_t); +static void _pci_io_bs_w_2(void *, bus_space_handle_t, bus_size_t, u_int16_t); +static void _pci_io_bs_w_4(void *, bus_space_handle_t, bus_size_t, u_int32_t); + +#ifdef __ARMEB__ +static u_int8_t _pci_io_bs_r_1_s(void *, bus_space_handle_t, bus_size_t); +static u_int16_t _pci_io_bs_r_2_s(void *, bus_space_handle_t, bus_size_t); +static u_int32_t _pci_io_bs_r_4_s(void *, bus_space_handle_t, bus_size_t); + +static void _pci_io_bs_w_1_s(void *, bus_space_handle_t, bus_size_t, u_int8_t); +static void _pci_io_bs_w_2_s(void *, bus_space_handle_t, bus_size_t, u_int16_t); +static void _pci_io_bs_w_4_s(void *, bus_space_handle_t, bus_size_t, u_int32_t); + +static u_int8_t _pci_mem_bs_r_1(void *, bus_space_handle_t, bus_size_t); +static u_int16_t _pci_mem_bs_r_2(void *, bus_space_handle_t, bus_size_t); +static u_int32_t _pci_mem_bs_r_4(void *, bus_space_handle_t, bus_size_t); + +static void _pci_mem_bs_w_1(void *, bus_space_handle_t, bus_size_t, u_int8_t); +static void _pci_mem_bs_w_2(void *, bus_space_handle_t, bus_size_t, u_int16_t); +static void _pci_mem_bs_w_4(void *, bus_space_handle_t, bus_size_t, u_int32_t); +#endif + +struct bus_space ixp425_pci_io_bs_tag_template = { + /* mapping/unmapping */ + .bs_map = ixp425_pci_io_bs_map, + .bs_unmap = ixp425_pci_io_bs_unmap, + .bs_subregion = ixp425_pci_bs_subregion, + + .bs_alloc = ixp425_pci_io_bs_alloc, + .bs_free = ixp425_pci_io_bs_free, + + /* barrier */ + .bs_barrier = ixp425_pci_bs_barrier, + + /* + * IXP425 processor does not have PCI I/O windows + */ + /* read (single) */ + .bs_r_1 = _pci_io_bs_r_1, + .bs_r_2 = _pci_io_bs_r_2, + .bs_r_4 = _pci_io_bs_r_4, + + /* write (single) */ + .bs_w_1 = _pci_io_bs_w_1, + .bs_w_2 = _pci_io_bs_w_2, + .bs_w_4 = _pci_io_bs_w_4, + +#ifdef __ARMEB__ + .bs_r_1_s = _pci_io_bs_r_1_s, + .bs_r_2_s = _pci_io_bs_r_2_s, + .bs_r_4_s = _pci_io_bs_r_4_s, + + .bs_w_1_s = _pci_io_bs_w_1_s, + .bs_w_2_s = _pci_io_bs_w_2_s, + .bs_w_4_s = _pci_io_bs_w_4_s, +#else + .bs_r_1_s = _pci_io_bs_r_1, + .bs_r_2_s = _pci_io_bs_r_2, + .bs_r_4_s = _pci_io_bs_r_4, + + .bs_w_1_s = _pci_io_bs_w_1, + .bs_w_2_s = _pci_io_bs_w_2, + .bs_w_4_s = _pci_io_bs_w_4, +#endif +}; + +void +ixp425_io_bs_init(bus_space_tag_t bs, void *cookie) +{ + *bs = ixp425_pci_io_bs_tag_template; + bs->bs_cookie = cookie; +} + +struct bus_space ixp425_pci_mem_bs_tag_template = { + /* mapping/unmapping */ + .bs_map = ixp425_pci_mem_bs_map, + .bs_unmap = ixp425_pci_mem_bs_unmap, + .bs_subregion = ixp425_pci_bs_subregion, + + .bs_alloc = ixp425_pci_mem_bs_alloc, + .bs_free = ixp425_pci_mem_bs_free, + + /* barrier */ + .bs_barrier = ixp425_pci_bs_barrier, + +#ifdef __ARMEB__ + /* read (single) */ + .bs_r_1_s = _pci_mem_bs_r_1, + .bs_r_2_s = _pci_mem_bs_r_2, + .bs_r_4_s = _pci_mem_bs_r_4, + + .bs_r_1 = ixp425_pci_mem_bs_r_1, + .bs_r_2 = ixp425_pci_mem_bs_r_2, + .bs_r_4 = ixp425_pci_mem_bs_r_4, + + /* write (single) */ + .bs_w_1_s = _pci_mem_bs_w_1, + .bs_w_2_s = _pci_mem_bs_w_2, + .bs_w_4_s = _pci_mem_bs_w_4, + + .bs_w_1 = ixp425_pci_mem_bs_w_1, + .bs_w_2 = ixp425_pci_mem_bs_w_2, + .bs_w_4 = ixp425_pci_mem_bs_w_4, +#else + /* read (single) */ + .bs_r_1 = ixp425_pci_mem_bs_r_1, + .bs_r_2 = ixp425_pci_mem_bs_r_2, + .bs_r_4 = ixp425_pci_mem_bs_r_4, + .bs_r_1_s = ixp425_pci_mem_bs_r_1, + .bs_r_2_s = ixp425_pci_mem_bs_r_2, + .bs_r_4_s = ixp425_pci_mem_bs_r_4, + + /* write (single) */ + .bs_w_1 = ixp425_pci_mem_bs_w_1, + .bs_w_2 = ixp425_pci_mem_bs_w_2, + .bs_w_4 = ixp425_pci_mem_bs_w_4, + .bs_w_1_s = ixp425_pci_mem_bs_w_1, + .bs_w_2_s = ixp425_pci_mem_bs_w_2, + .bs_w_4_s = ixp425_pci_mem_bs_w_4, +#endif +}; + +void +ixp425_mem_bs_init(bus_space_tag_t bs, void *cookie) +{ + *bs = ixp425_pci_mem_bs_tag_template; + bs->bs_cookie = cookie; +} + +/* common routine */ +int +ixp425_pci_bs_subregion(void *t, bus_space_handle_t bsh, bus_size_t offset, + bus_size_t size, bus_space_handle_t *nbshp) +{ + *nbshp = bsh + offset; + return (0); +} + +void +ixp425_pci_bs_barrier(void *t, bus_space_handle_t bsh, bus_size_t offset, + bus_size_t len, int flags) +{ + /* NULL */ +} + +/* io bs */ +int +ixp425_pci_io_bs_map(void *t, bus_addr_t bpa, bus_size_t size, + int cacheable, bus_space_handle_t *bshp) +{ + *bshp = bpa; + return (0); +} + +void +ixp425_pci_io_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size) +{ + /* Nothing to do. */ +} + +int +ixp425_pci_io_bs_alloc(void *t, bus_addr_t rstart, bus_addr_t rend, + bus_size_t size, bus_size_t alignment, bus_size_t boundary, int cacheable, + bus_addr_t *bpap, bus_space_handle_t *bshp) +{ + panic("ixp425_pci_io_bs_alloc(): not implemented\n"); +} + +void +ixp425_pci_io_bs_free(void *t, bus_space_handle_t bsh, bus_size_t size) +{ + panic("ixp425_pci_io_bs_free(): not implemented\n"); +} + +/* special I/O functions */ +static __inline u_int32_t +_bs_r(void *v, bus_space_handle_t ioh, bus_size_t off, u_int32_t be) +{ + u_int32_t data; + + CSR_WRITE_4(PCI_NP_AD, (ioh + off) & ~3); + CSR_WRITE_4(PCI_NP_CBE, be | COMMAND_NP_IO_READ); + data = CSR_READ_4(PCI_NP_RDATA); + if (CSR_READ_4(PCI_ISR) & ISR_PFE) + CSR_WRITE_4(PCI_ISR, ISR_PFE); + + return data; +} + +static u_int8_t +_pci_io_bs_r_1(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~(1U << n)) << NP_CBE_SHIFT; + data = _bs_r(v, ioh, off, be); + + return data >> (8 * n); +} + +static u_int16_t +_pci_io_bs_r_2(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~((1U << n) | (1U << (n + 1)))) << NP_CBE_SHIFT; + data = _bs_r(v, ioh, off, be); + + return data >> (8 * n); +} + +static u_int32_t +_pci_io_bs_r_4(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + u_int32_t data; + + data = _bs_r(v, ioh, off, 0); + return data; +} + +#ifdef __ARMEB__ +static u_int8_t +_pci_io_bs_r_1_s(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~(1U << n)) << NP_CBE_SHIFT; + data = _bs_r(v, ioh, off, be); + + return data >> (8 * n); +} + +static u_int16_t +_pci_io_bs_r_2_s(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~((1U << n) | (1U << (n + 1)))) << NP_CBE_SHIFT; + data = _bs_r(v, ioh, off, be); + + return data >> (8 * n); +} + +static u_int32_t +_pci_io_bs_r_4_s(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + u_int32_t data; + + data = _bs_r(v, ioh, off, 0); + return le32toh(data); +} +#endif /* __ARMEB__ */ + +static __inline void +_bs_w(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int32_t be, u_int32_t data) +{ + CSR_WRITE_4(PCI_NP_AD, (ioh + off) & ~3); + CSR_WRITE_4(PCI_NP_CBE, be | COMMAND_NP_IO_WRITE); + CSR_WRITE_4(PCI_NP_WDATA, data); + if (CSR_READ_4(PCI_ISR) & ISR_PFE) + CSR_WRITE_4(PCI_ISR, ISR_PFE); +} + +static void +_pci_io_bs_w_1(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int8_t val) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~(1U << n)) << NP_CBE_SHIFT; + data = val << (8 * n); + _bs_w(v, ioh, off, be, data); +} + +static void +_pci_io_bs_w_2(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int16_t val) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~((1U << n) | (1U << (n + 1)))) << NP_CBE_SHIFT; + data = val << (8 * n); + _bs_w(v, ioh, off, be, data); +} + +static void +_pci_io_bs_w_4(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int32_t val) +{ + _bs_w(v, ioh, off, 0, val); +} + +#ifdef __ARMEB__ +static void +_pci_io_bs_w_1_s(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int8_t val) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~(1U << n)) << NP_CBE_SHIFT; + data = val << (8 * n); + _bs_w(v, ioh, off, be, data); +} + +static void +_pci_io_bs_w_2_s(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int16_t val) +{ + u_int32_t data, n, be; + + n = (ioh + off) % 4; + be = (0xf & ~((1U << n) | (1U << (n + 1)))) << NP_CBE_SHIFT; + data = val << (8 * n); + _bs_w(v, ioh, off, be, data); +} + +static void +_pci_io_bs_w_4_s(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int32_t val) +{ + _bs_w(v, ioh, off, 0, htole32(val)); +} +#endif /* __ARMEB__ */ + +/* mem bs */ +int +ixp425_pci_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, + int cacheable, bus_space_handle_t *bshp) +{ + vm_paddr_t pa, endpa; + + pa = trunc_page(bpa); + endpa = round_page(bpa + size); + + *bshp = (vm_offset_t)pmap_mapdev(pa, endpa - pa); + + return (0); +} + +void +ixp425_pci_mem_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size) +{ + vm_offset_t va, endva; + + va = trunc_page((vm_offset_t)t); + endva = va + round_page(size); + + /* Free the kernel virtual mapping. */ + kmem_free(kernel_map, va, endva - va); +} + +int +ixp425_pci_mem_bs_alloc(void *t, bus_addr_t rstart, bus_addr_t rend, + bus_size_t size, bus_size_t alignment, bus_size_t boundary, int cacheable, + bus_addr_t *bpap, bus_space_handle_t *bshp) +{ + panic("ixp425_mem_bs_alloc(): not implemented\n"); +} + +void +ixp425_pci_mem_bs_free(void *t, bus_space_handle_t bsh, bus_size_t size) +{ + panic("ixp425_mem_bs_free(): not implemented\n"); +} + +#ifdef __ARMEB__ +static u_int8_t +_pci_mem_bs_r_1(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + return ixp425_pci_mem_bs_r_1(v, ioh, off); +} + +static u_int16_t +_pci_mem_bs_r_2(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + return (ixp425_pci_mem_bs_r_2(v, ioh, off)); +} + +static u_int32_t +_pci_mem_bs_r_4(void *v, bus_space_handle_t ioh, bus_size_t off) +{ + u_int32_t data; + + data = ixp425_pci_mem_bs_r_4(v, ioh, off); + return (le32toh(data)); +} + +static void +_pci_mem_bs_w_1(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int8_t val) +{ + ixp425_pci_mem_bs_w_1(v, ioh, off, val); +} + +static void +_pci_mem_bs_w_2(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int16_t val) +{ + ixp425_pci_mem_bs_w_2(v, ioh, off, val); +} + +static void +_pci_mem_bs_w_4(void *v, bus_space_handle_t ioh, bus_size_t off, + u_int32_t val) +{ + ixp425_pci_mem_bs_w_4(v, ioh, off, htole32(val)); +} +#endif /* __ARMEB__ */ + +/* End of ixp425_pci_space.c */ diff --git a/sys/arm/xscale/ixp425/ixp425_qmgr.c b/sys/arm/xscale/ixp425/ixp425_qmgr.c new file mode 100644 index 000000000000..fcf2da257fad --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_qmgr.c @@ -0,0 +1,1077 @@ +/*- + * Copyright (c) 2006 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + */ + +/*- + * Copyright (c) 2001-2005, Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. +*/ +#include +__FBSDID("$FreeBSD$"); + +/* + * Intel XScale Queue Manager support. + * + * Each IXP4XXX device has a hardware block that implements a priority + * queue manager that is shared between the XScale cpu and the backend + * devices (such as the NPE). Queues are accessed by reading/writing + * special memory locations. The queue contents are mapped into a shared + * SRAM region with entries managed in a circular buffer. The XScale + * processor can receive interrupts based on queue contents (a condition + * code determines when interrupts should be delivered). + * + * The code here basically replaces the qmgr class in the Intel Access + * Library (IAL). + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * State per AQM hw queue. + * This structure holds q configuration and dispatch state. + */ +struct qmgrInfo { + int qSizeInWords; /* queue size in words */ + + uint32_t qOflowStatBitMask; /* overflow status mask */ + int qWriteCount; /* queue write count */ + + bus_size_t qAccRegAddr; /* access register */ + bus_size_t qUOStatRegAddr; /* status register */ + bus_size_t qConfigRegAddr; /* config register */ + int qSizeInEntries; /* queue size in entries */ + + uint32_t qUflowStatBitMask; /* underflow status mask */ + int qReadCount; /* queue read count */ + + /* XXX union */ + uint32_t qStatRegAddr; + uint32_t qStatBitsOffset; + uint32_t qStat0BitMask; + uint32_t qStat1BitMask; + + uint32_t intRegCheckMask; /* interrupt reg check mask */ + void (*cb)(int, void *); /* callback function */ + void *cbarg; /* callback argument */ + int priority; /* dispatch priority */ +#if 0 + /* NB: needed only for A0 parts */ + u_int statusWordOffset; /* status word offset */ + uint32_t statusMask; /* status mask */ + uint32_t statusCheckValue; /* status check value */ +#endif +}; + +struct ixpqmgr_softc { + device_t sc_dev; + bus_space_tag_t sc_iot; + bus_space_handle_t sc_ioh; + struct resource *sc_irq; /* IRQ resource */ + void *sc_ih; /* interrupt handler */ + int sc_rid; /* resource id for irq */ + + struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES]; + /* + * This array contains a list of queue identifiers ordered by + * priority. The table is split logically between queue + * identifiers 0-31 and 32-63. To optimize lookups bit masks + * are kept for the first-32 and last-32 q's. When the + * table needs to be rebuilt mark rebuildTable and it'll + * happen after the next interrupt. + */ + int priorityTable[IX_QMGR_MAX_NUM_QUEUES]; + uint32_t lowPriorityTableFirstHalfMask; + uint32_t uppPriorityTableFirstHalfMask; + int rebuildTable; /* rebuild priorityTable */ + + uint32_t aqmFreeSramAddress; /* SRAM free space */ +}; + +static int qmgr_debug = 0; +SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug, + 0, "IXP425 Q-Manager debug msgs"); +TUNABLE_INT("debug.qmgr", &qmgr_debug); +#define DPRINTF(dev, fmt, ...) do { \ + if (qmgr_debug) printf(fmt, __VA_ARGS__); \ +} while (0) +#define DPRINTFn(n, dev, fmt, ...) do { \ + if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \ +} while (0) + +static struct ixpqmgr_softc *ixpqmgr_sc = NULL; + +static void ixpqmgr_rebuild(struct ixpqmgr_softc *); +static void ixpqmgr_intr(void *); + +static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId); +static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId); +static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf); +static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId); +static void aqm_reset(struct ixpqmgr_softc *sc); + +static void +dummyCallback(int qId, void *arg) +{ + /* XXX complain */ +} + +static uint32_t +aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off) +{ + DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off); + return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); +} + +static void +aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val) +{ + DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val); + bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); +} + +static int +ixpqmgr_probe(device_t dev) +{ + device_set_desc(dev, "IXP425 Q-Manager"); + return 0; +} + +static void +ixpqmgr_attach(device_t dev) +{ + struct ixpqmgr_softc *sc = device_get_softc(dev); + struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); + int i; + + ixpqmgr_sc = sc; + + sc->sc_dev = dev; + sc->sc_iot = sa->sc_iot; + if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, + 0, &sc->sc_ioh)) + panic("%s: Cannot map registers", device_get_name(dev)); + + /* NB: we only use the lower 32 q's */ + sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid, + IXP425_INT_QUE1_32, IXP425_INT_QUE33_64, 2, RF_ACTIVE); + if (!sc->sc_irq) + panic("Unable to allocate the qmgr irqs.\n"); + /* XXX could be a source of entropy */ + bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, + ixpqmgr_intr, NULL, &sc->sc_ih); + + /* NB: softc is pre-zero'd */ + for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) { + struct qmgrInfo *qi = &sc->qinfo[i]; + + qi->cb = dummyCallback; + qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */ + /* + * There are two interrupt registers, 32 bits each. One + * for the lower queues(0-31) and one for the upper + * queues(32-63). Therefore need to mod by 32 i.e the + * min upper queue identifier. + */ + qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID))); + + /* + * Register addresses and bit masks are calculated and + * stored here to optimize QRead, QWrite and QStatusGet + * functions. + */ + + /* AQM Queue access reg addresses, per queue */ + qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); + qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); + qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i); + + /* AQM Queue lower-group (0-31), only */ + if (i < IX_QMGR_MIN_QUEUPP_QID) { + /* AQM Q underflow/overflow status reg address, per queue */ + qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET + + ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) * + sizeof(uint32_t)); + + /* AQM Q underflow status bit masks for status reg per queue */ + qi->qUflowStatBitMask = + (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) << + ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * + (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); + + /* AQM Q overflow status bit masks for status reg, per queue */ + qi->qOflowStatBitMask = + (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) << + ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * + (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); + + /* AQM Q lower-group (0-31) status reg addresses, per queue */ + qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET + + ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * + sizeof(uint32_t)); + + /* AQM Q lower-group (0-31) status register bit offset */ + qi->qStatBitsOffset = + (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) * + (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD); + } else { /* AQM Q upper-group (32-63), only */ + qi->qUOStatRegAddr = 0; /* XXX */ + + /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */ + qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); + + /* AQM Q upper-group (32-63) Full status register bitmasks */ + qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); + } + } + + sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */ + + ixpqmgr_rebuild(sc); /* build inital priority table */ + aqm_reset(sc); /* reset h/w */ +} + +static void +ixpqmgr_detach(device_t dev) +{ + struct ixpqmgr_softc *sc = device_get_softc(dev); + + aqm_reset(sc); /* disable interrupts */ + bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); + bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq); + bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE); +} + +int +ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel, + void (*cb)(int, void *), void *cbarg) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + struct qmgrInfo *qi = &sc->qinfo[qId]; + + DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n", + __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg); + + /* NB: entry size is always 1 */ + qi->qSizeInWords = qEntries; + + qi->qReadCount = 0; + qi->qWriteCount = 0; + qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */ + + if (cb == NULL) { + /* Reset to dummy callback */ + qi->cb = dummyCallback; + qi->cbarg = 0; + } else { + qi->cb = cb; + qi->cbarg = cbarg; + } + + /* Write the config register; NB must be AFTER qinfo setup */ + aqm_qcfg(sc, qId, ne, nf); + /* + * Account for space just allocated to queue. + */ + sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t)); + + /* Set the interupt source if this queue is in the range 0-31 */ + if (qId < IX_QMGR_MIN_QUEUPP_QID) + aqm_srcsel_write(sc, qId, srcSel); + + if (cb != NULL) /* Enable the interrupt */ + aqm_int_enable(sc, qId); + + sc->rebuildTable = TRUE; + + return 0; /* XXX */ +} + +int +ixpqmgr_qwrite(int qId, uint32_t entry) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + struct qmgrInfo *qi = &sc->qinfo[qId]; + + DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n", + __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries); + + /* write the entry */ + aqm_reg_write(sc, qi->qAccRegAddr, entry); + + /* NB: overflow is available for lower queues only */ + if (qId < IX_QMGR_MIN_QUEUPP_QID) { + int qSize = qi->qSizeInEntries; + /* + * Increment the current number of entries in the queue + * and check for overflow . + */ + if (qi->qWriteCount++ == qSize) { /* check for overflow */ + uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); + int qPtrs; + + /* + * Read the status twice because the status may + * not be immediately ready after the write operation + */ + if ((status & qi->qOflowStatBitMask) || + ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) { + /* + * The queue is full, clear the overflow status bit if set. + */ + aqm_reg_write(sc, qi->qUOStatRegAddr, + status & ~qi->qOflowStatBitMask); + qi->qWriteCount = qSize; + DPRINTFn(5, sc->sc_dev, + "%s(%u, 0x%x) Q full, overflow status cleared\n", + __func__, qId, entry); + return ENOSPC; + } + /* + * No overflow occured : someone is draining the queue + * and the current counter needs to be + * updated from the current number of entries in the queue + */ + + /* calculate number of words in q */ + qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr); + DPRINTFn(2, sc->sc_dev, + "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n", + __func__, qId, entry, qPtrs); + qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f; + + if (qPtrs == 0) { + /* + * The queue may be full at the time of the + * snapshot. Next access will check + * the overflow status again. + */ + qi->qWriteCount = qSize; + } else { + /* convert the number of words to a number of entries */ + qi->qWriteCount = qPtrs & (qSize - 1); + } + } + } + return 0; +} + +int +ixpqmgr_qread(int qId, uint32_t *entry) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + struct qmgrInfo *qi = &sc->qinfo[qId]; + bus_size_t off = qi->qAccRegAddr; + + *entry = aqm_reg_read(sc, off); + + /* + * Reset the current read count : next access to the read function + * will force a underflow status check. + */ + qi->qReadCount = 0; + + /* Check if underflow occurred on the read */ + if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { + /* get the queue status */ + uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); + + if (status & qi->qUflowStatBitMask) { /* clear underflow status */ + aqm_reg_write(sc, qi->qUOStatRegAddr, + status &~ qi->qUflowStatBitMask); + return ENOSPC; + } + } + return 0; +} + +int +ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + struct qmgrInfo *qi = &sc->qinfo[qId]; + uint32_t entry; + bus_size_t off = qi->qAccRegAddr; + + entry = aqm_reg_read(sc, off); + while (--n) { + if (entry == 0) { + /* if we read a NULL entry, stop. We have underflowed */ + break; + } + *p++ = entry; /* store */ + entry = aqm_reg_read(sc, off); + } + *p = entry; + + /* + * Reset the current read count : next access to the read function + * will force a underflow status check. + */ + qi->qReadCount = 0; + + /* Check if underflow occurred on the read */ + if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { + /* get the queue status */ + uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); + + if (status & qi->qUflowStatBitMask) { /* clear underflow status */ + aqm_reg_write(sc, qi->qUOStatRegAddr, + status &~ qi->qUflowStatBitMask); + return ENOSPC; + } + } + return 0; +} + +uint32_t +ixpqmgr_getqstatus(int qId) +{ +#define QLOWSTATMASK \ + ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1) + struct ixpqmgr_softc *sc = ixpqmgr_sc; + const struct qmgrInfo *qi = &sc->qinfo[qId]; + uint32_t status; + + if (qId < IX_QMGR_MIN_QUEUPP_QID) { + /* read the status of a queue in the range 0-31 */ + status = aqm_reg_read(sc, qi->qStatRegAddr); + + /* mask out the status bits relevant only to this queue */ + status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK; + } else { /* read status of a queue in the range 32-63 */ + status = 0; + if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask) + status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */ + if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask) + status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */ + } + return status; +#undef QLOWSTATMASK +} + +uint32_t +ixpqmgr_getqconfig(int qId) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + + return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)); +} + +void +ixpqmgr_dump(void) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + int i, a; + + /* status registers */ + printf("0x%04x: %08x %08x %08x %08x\n" + , 0x400 + , aqm_reg_read(sc, 0x400) + , aqm_reg_read(sc, 0x400+4) + , aqm_reg_read(sc, 0x400+8) + , aqm_reg_read(sc, 0x400+12) + ); + printf("0x%04x: %08x %08x %08x %08x\n" + , 0x410 + , aqm_reg_read(sc, 0x410) + , aqm_reg_read(sc, 0x410+4) + , aqm_reg_read(sc, 0x410+8) + , aqm_reg_read(sc, 0x410+12) + ); + printf("0x%04x: %08x %08x %08x %08x\n" + , 0x420 + , aqm_reg_read(sc, 0x420) + , aqm_reg_read(sc, 0x420+4) + , aqm_reg_read(sc, 0x420+8) + , aqm_reg_read(sc, 0x420+12) + ); + printf("0x%04x: %08x %08x %08x %08x\n" + , 0x430 + , aqm_reg_read(sc, 0x430) + , aqm_reg_read(sc, 0x430+4) + , aqm_reg_read(sc, 0x430+8) + , aqm_reg_read(sc, 0x430+12) + ); + /* q configuration registers */ + for (a = 0x2000; a < 0x20ff; a += 32) + printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" + , a + , aqm_reg_read(sc, a) + , aqm_reg_read(sc, a+4) + , aqm_reg_read(sc, a+8) + , aqm_reg_read(sc, a+12) + , aqm_reg_read(sc, a+16) + , aqm_reg_read(sc, a+20) + , aqm_reg_read(sc, a+24) + , aqm_reg_read(sc, a+28) + ); + /* allocated SRAM */ + for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) { + a = 0x2000 + i; + printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" + , a + , aqm_reg_read(sc, a) + , aqm_reg_read(sc, a+4) + , aqm_reg_read(sc, a+8) + , aqm_reg_read(sc, a+12) + , aqm_reg_read(sc, a+16) + , aqm_reg_read(sc, a+20) + , aqm_reg_read(sc, a+24) + , aqm_reg_read(sc, a+28) + ); + } + for (i = 0; i < 16; i++) { + printf("Q[%2d] config 0x%08x status 0x%02x " + "Q[%2d] config 0x%08x status 0x%02x\n" + , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i) + , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16) + ); + } +} + +void +ixpqmgr_notify_enable(int qId, int srcSel) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; +#if 0 + /* Calculate the checkMask and checkValue for this q */ + aqm_calc_statuscheck(sc, qId, srcSel); +#endif + /* Set the interupt source if this queue is in the range 0-31 */ + if (qId < IX_QMGR_MIN_QUEUPP_QID) + aqm_srcsel_write(sc, qId, srcSel); + + /* Enable the interrupt */ + aqm_int_enable(sc, qId); +} + +void +ixpqmgr_notify_disable(int qId) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + + aqm_int_disable(sc, qId); +} + +/* + * Rebuild the priority table used by the dispatcher. + */ +static void +ixpqmgr_rebuild(struct ixpqmgr_softc *sc) +{ + int q, pri; + int lowQuePriorityTableIndex, uppQuePriorityTableIndex; + struct qmgrInfo *qi; + + sc->lowPriorityTableFirstHalfMask = 0; + sc->uppPriorityTableFirstHalfMask = 0; + + lowQuePriorityTableIndex = 0; + uppQuePriorityTableIndex = 32; + for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) { + /* low priority q's */ + for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) { + qi = &sc->qinfo[q]; + if (qi->priority == pri) { + /* + * Build the priority table bitmask which match the + * queues of the first half of the priority table. + */ + if (lowQuePriorityTableIndex < 16) { + sc->lowPriorityTableFirstHalfMask |= + qi->intRegCheckMask; + } + sc->priorityTable[lowQuePriorityTableIndex++] = q; + } + } + /* high priority q's */ + for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) { + qi = &sc->qinfo[q]; + if (qi->priority == pri) { + /* + * Build the priority table bitmask which match the + * queues of the first half of the priority table . + */ + if (uppQuePriorityTableIndex < 48) { + sc->uppPriorityTableFirstHalfMask |= + qi->intRegCheckMask; + } + sc->priorityTable[uppQuePriorityTableIndex++] = q; + } + } + } + sc->rebuildTable = FALSE; +} + +/* + * Count the number of leading zero bits in a word, + * and return the same value than the CLZ instruction. + * Note this is similar to the standard ffs function but + * it counts zero's from the MSB instead of the LSB. + * + * word (in) return value (out) + * 0x80000000 0 + * 0x40000000 1 + * ,,, ,,, + * 0x00000002 30 + * 0x00000001 31 + * 0x00000000 32 + * + * The C version of this function is used as a replacement + * for system not providing the equivalent of the CLZ + * assembly language instruction. + * + * Note that this version is big-endian + */ +static unsigned int +_lzcount(uint32_t word) +{ + unsigned int lzcount = 0; + + if (word == 0) + return 32; + while ((word & 0x80000000) == 0) { + word <<= 1; + lzcount++; + } + return lzcount; +} + +static void +ixpqmgr_intr(void *arg) +{ + struct ixpqmgr_softc *sc = ixpqmgr_sc; + uint32_t intRegVal; /* Interrupt reg val */ + struct qmgrInfo *qi; + int priorityTableIndex; /* Priority table index */ + int qIndex; /* Current queue being processed */ + + /* Read the interrupt register */ + intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET); + /* Write back to clear interrupt */ + aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal); + + DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n", + __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET)); + + /* No queue has interrupt register set */ + if (intRegVal != 0) { + /* get the first queue Id from the interrupt register value */ + qIndex = (32 - 1) - _lzcount(intRegVal); + + DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n", + __func__, intRegVal, qIndex); + + /* + * Optimize for single callback case. + */ + qi = &sc->qinfo[qIndex]; + if (intRegVal == qi->intRegCheckMask) { + /* + * Only 1 queue event triggered a notification. + * Call the callback function for this queue + */ + qi->cb(qIndex, qi->cbarg); + } else { + /* + * The event is triggered by more than 1 queue, + * the queue search will start from the beginning + * or the middle of the priority table. + * + * The search will end when all the bits of the interrupt + * register are cleared. There is no need to maintain + * a seperate value and test it at each iteration. + */ + if (intRegVal & sc->lowPriorityTableFirstHalfMask) { + priorityTableIndex = 0; + } else { + priorityTableIndex = 16; + } + /* + * Iterate over the priority table until all the bits + * of the interrupt register are cleared. + */ + do { + qIndex = sc->priorityTable[priorityTableIndex++]; + qi = &sc->qinfo[qIndex]; + + /* If this queue caused this interrupt to be raised */ + if (intRegVal & qi->intRegCheckMask) { + /* Call the callback function for this queue */ + qi->cb(qIndex, qi->cbarg); + /* Clear the interrupt register bit */ + intRegVal &= ~qi->intRegCheckMask; + } + } while (intRegVal); + } + } + + /* Rebuild the priority table if needed */ + if (sc->rebuildTable) + ixpqmgr_rebuild(sc); +} + +#if 0 +/* + * Generate the parameters used to check if a Q's status matches + * the specified source select. We calculate which status word + * to check (statusWordOffset), the value to check the status + * against (statusCheckValue) and the mask (statusMask) to mask + * out all but the bits to check in the status word. + */ +static void +aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel) +{ + struct qmgrInfo *qi = &qinfo[qId]; + uint32_t shiftVal; + + if (qId < IX_QMGR_MIN_QUEUPP_QID) { + switch (srcSel) { + case IX_QMGR_Q_SOURCE_ID_E: + qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK; + qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; + break; + case IX_QMGR_Q_SOURCE_ID_NE: + qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK; + qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; + break; + case IX_QMGR_Q_SOURCE_ID_NF: + qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK; + qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; + break; + case IX_QMGR_Q_SOURCE_ID_F: + qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK; + qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; + break; + case IX_QMGR_Q_SOURCE_ID_NOT_E: + qi->statusCheckValue = 0; + qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; + break; + case IX_QMGR_Q_SOURCE_ID_NOT_NE: + qi->statusCheckValue = 0; + qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; + break; + case IX_QMGR_Q_SOURCE_ID_NOT_NF: + qi->statusCheckValue = 0; + qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; + break; + case IX_QMGR_Q_SOURCE_ID_NOT_F: + qi->statusCheckValue = 0; + qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; + break; + default: + /* Should never hit */ + IX_OSAL_ASSERT(0); + break; + } + + /* One nibble of status per queue so need to shift the + * check value and mask out to the correct position. + */ + shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * + IX_QMGR_QUELOWSTAT_BITS_PER_Q; + + /* Calculate the which status word to check from the qId, + * 8 Qs status per word + */ + qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD; + + qi->statusCheckValue <<= shiftVal; + qi->statusMask <<= shiftVal; + } else { + /* One status word */ + qi->statusWordOffset = 0; + /* Single bits per queue and int source bit hardwired NE, + * Qs start at 32. + */ + qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID); + qi->statusCheckValue = qi->statusMask; + } +} +#endif + +static void +aqm_int_enable(struct ixpqmgr_softc *sc, int qId) +{ + bus_size_t reg; + uint32_t v; + + if (qId < IX_QMGR_MIN_QUEUPP_QID) + reg = IX_QMGR_QUEIEREG0_OFFSET; + else + reg = IX_QMGR_QUEIEREG1_OFFSET; + v = aqm_reg_read(sc, reg); + aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); + + DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", + __func__, qId, reg, v, aqm_reg_read(sc, reg)); +} + +static void +aqm_int_disable(struct ixpqmgr_softc *sc, int qId) +{ + bus_size_t reg; + uint32_t v; + + if (qId < IX_QMGR_MIN_QUEUPP_QID) + reg = IX_QMGR_QUEIEREG0_OFFSET; + else + reg = IX_QMGR_QUEIEREG1_OFFSET; + v = aqm_reg_read(sc, reg); + aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); + + DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", + __func__, qId, reg, v, aqm_reg_read(sc, reg)); +} + +static unsigned +log2(unsigned n) +{ + unsigned count; + /* + * N.B. this function will return 0 if supplied 0. + */ + for (count = 0; n/2; count++) + n /= 2; + return count; +} + +static __inline unsigned +toAqmEntrySize(int entrySize) +{ + /* entrySize 1("00"),2("01"),4("10") */ + return log2(entrySize); +} + +static __inline unsigned +toAqmBufferSize(unsigned bufferSizeInWords) +{ + /* bufferSize 16("00"),32("01),64("10"),128("11") */ + return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE); +} + +static __inline unsigned +toAqmWatermark(int watermark) +{ + /* + * Watermarks 0("000"),1("001"),2("010"),4("011"), + * 8("100"),16("101"),32("110"),64("111") + */ + return log2(2 * watermark); +} + +static void +aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf) +{ + const struct qmgrInfo *qi = &sc->qinfo[qId]; + uint32_t qCfg; + uint32_t baseAddress; + + /* Build config register */ + qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) << + IX_QMGR_Q_CONFIG_ESIZE_OFFSET) + | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) << + IX_QMGR_Q_CONFIG_BSIZE_OFFSET); + + /* baseAddress, calculated relative to start address */ + baseAddress = sc->aqmFreeSramAddress; + + /* base address must be word-aligned */ + KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0, + ("address not word-aligned")); + + /* Now convert to a 16 word pointer as required by QUECONFIG register */ + baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT; + qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET; + + /* set watermarks */ + qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET) + | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET); + + DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n", + __func__, qId, ne, nf, + aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)), + qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId)); + + aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg); +} + +static void +aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId) +{ + bus_size_t off; + uint32_t v; + + /* + * Calculate the register offset; multiple queues split across registers + */ + off = IX_QMGR_INT0SRCSELREG0_OFFSET + + ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t)); + + v = aqm_reg_read(sc, off); + if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) { + /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */ + v |= 0x7; + } else { + const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD; + uint32_t mask; + int qshift; + + qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq; + mask = ((1 << bpq) - 1) << qshift; /* q's status mask */ + + /* merge sourceId */ + v = (v &~ mask) | ((sourceId << qshift) & mask); + } + + DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n", + __func__, qId, sourceId, aqm_reg_read(sc, off), v, off); + aqm_reg_write(sc, off, v); +} + +/* + * Reset AQM registers to default values. + */ +static void +aqm_reset(struct ixpqmgr_softc *sc) +{ + int i; + + /* Reset queues 0..31 status registers 0..3 */ + aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET, + IX_QMGR_QUELOWSTAT_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET, + IX_QMGR_QUELOWSTAT_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET, + IX_QMGR_QUELOWSTAT_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET, + IX_QMGR_QUELOWSTAT_RESET_VALUE); + + /* Reset underflow/overflow status registers 0..1 */ + aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET, + IX_QMGR_QUEUOSTAT_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET, + IX_QMGR_QUEUOSTAT_RESET_VALUE); + + /* Reset queues 32..63 nearly empty status registers */ + aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET, + IX_QMGR_QUEUPPSTAT0_RESET_VALUE); + + /* Reset queues 32..63 full status registers */ + aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET, + IX_QMGR_QUEUPPSTAT1_RESET_VALUE); + + /* Reset int0 status flag source select registers 0..3 */ + aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET, + IX_QMGR_INT0SRCSELREG_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET, + IX_QMGR_INT0SRCSELREG_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET, + IX_QMGR_INT0SRCSELREG_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET, + IX_QMGR_INT0SRCSELREG_RESET_VALUE); + + /* Reset queue interrupt enable register 0..1 */ + aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET, + IX_QMGR_QUEIEREG_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET, + IX_QMGR_QUEIEREG_RESET_VALUE); + + /* Reset queue interrupt register 0..1 */ + aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); + aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); + + /* Reset queue configuration words 0..63 */ + for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) + aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr, + IX_QMGR_QUECONFIG_RESET_VALUE); + + /* XXX zero SRAM to simplify debugging */ + for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET; + i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t)) + aqm_reg_write(sc, i, 0); +} + +static device_method_t ixpqmgr_methods[] = { + DEVMETHOD(device_probe, ixpqmgr_probe), + DEVMETHOD(device_attach, ixpqmgr_attach), + DEVMETHOD(device_detach, ixpqmgr_detach), + + { 0, 0 } +}; + +static driver_t ixpqmgr_driver = { + "ixpqmgr", + ixpqmgr_methods, + sizeof(struct ixpqmgr_softc), +}; +static devclass_t ixpqmgr_devclass; + +DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0); diff --git a/sys/arm/xscale/ixp425/ixp425_qmgr.h b/sys/arm/xscale/ixp425/ixp425_qmgr.h new file mode 100644 index 000000000000..af021d553990 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_qmgr.h @@ -0,0 +1,243 @@ +/*- + * Copyright (c) 2006 Sam Leffler, Errno Consulting + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any + * redistribution must be conditioned upon including a substantially + * similar Disclaimer requirement for further binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, + * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGES. + * + * $FreeBSD$ + */ + +/*- + * Copyright (c) 2001-2005, Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. +*/ + +#ifndef ARM_XSCALE_IXP425_QMGR_H +#define ARM_XSCALE_IXP425_QMGR_H + +#define IX_QMGR_MAX_NUM_QUEUES 64 +#define IX_QMGR_MIN_QUEUPP_QID 32 + +#define IX_QMGR_MIN_ENTRY_SIZE_IN_WORDS 16 + +/* Total size of SRAM */ +#define IX_QMGR_AQM_SRAM_SIZE_IN_BYTES 0x4000 + +#define IX_QMGR_Q_PRIORITY_0 0 +#define IX_QMGR_Q_PRIORITY_1 1 +#define IX_QMGR_Q_PRIORITY_2 2 +#define IX_QMGR_NUM_PRIORITY_LEVELS 3 /* number of priority levels */ + +#define IX_QMGR_Q_STATUS_E_BIT_MASK 0x1 /* Empty */ +#define IX_QMGR_Q_STATUS_NE_BIT_MASK 0x2 /* Nearly Empty */ +#define IX_QMGR_Q_STATUS_NF_BIT_MASK 0x4 /* Nearly Full */ +#define IX_QMGR_Q_STATUS_F_BIT_MASK 0x8 /* Full */ +#define IX_QMGR_Q_STATUS_UF_BIT_MASK 0x10 /* Underflow */ +#define IX_QMGR_Q_STATUS_OF_BIT_MASK 0x20 /* Overflow */ + +#define IX_QMGR_Q_SOURCE_ID_E 0 /* Q Empty after last read */ +#define IX_QMGR_Q_SOURCE_ID_NE 1 /* Q Nearly Empty after last read */ +#define IX_QMGR_Q_SOURCE_ID_NF 2 /* Q Nearly Full after last write */ +#define IX_QMGR_Q_SOURCE_ID_F 3 /* Q Full after last write */ +#define IX_QMGR_Q_SOURCE_ID_NOT_E 4 /* Q !Empty after last write */ +#define IX_QMGR_Q_SOURCE_ID_NOT_NE 5 /* Q !Nearly Empty after last write */ +#define IX_QMGR_Q_SOURCE_ID_NOT_NF 6 /* Q !Nearly Full after last read */ +#define IX_QMGR_Q_SOURCE_ID_NOT_F 7 /* Q !Full after last read */ + +#define IX_QMGR_UNDERFLOW_BIT_OFFSET 0x0 /* underflow bit mask */ +#define IX_QMGR_OVERFLOW_BIT_OFFSET 0x1 /* overflow bit mask */ + +#define IX_QMGR_QUEACC0_OFFSET 0x0000 /* q 0 access register */ +#define IX_QMGR_QUEACC_SIZE 0x4/*words*/ + +#define IX_QMGR_QUELOWSTAT0_OFFSET 0x400 /* Q status, q's 0-7 */ +#define IX_QMGR_QUELOWSTAT1_OFFSET 0x404 /* Q status, q's 8-15 */ +#define IX_QMGR_QUELOWSTAT2_OFFSET 0x408 /* Q status, q's 16-23 */ +#define IX_QMGR_QUELOWSTAT3_OFFSET 0x40c /* Q status, q's 24-31 */ + +/* Queue status register Q status bits mask */ +#define IX_QMGR_QUELOWSTAT_QUE_STS_BITS_MASK 0xF +/* Size of queue 0-31 status register */ +#define IX_QMGR_QUELOWSTAT_SIZE 0x4 /*words*/ +#define IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD 8 /* # status/word */ + +#define IX_QMGR_QUEUOSTAT0_OFFSET 0x410 /* Q UF/OF status, q's 0-15 */ +#define IX_QMGR_QUEUOSTAT1_OFFSET 0x414 /* Q UF/OF status, q's 16-31 */ + +#define IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD 16 /* # UF/OF status/word */ + +#define IX_QMGR_QUEUPPSTAT0_OFFSET 0x418 /* NE status, q's 32-63 */ +#define IX_QMGR_QUEUPPSTAT1_OFFSET 0x41c /* F status, q's 32-63 */ + +#define IX_QMGR_INT0SRCSELREG0_OFFSET 0x420 /* INT src select, q's 0-7 */ +#define IX_QMGR_INT0SRCSELREG1_OFFSET 0x424 /* INT src select, q's 8-15 */ +#define IX_QMGR_INT0SRCSELREG2_OFFSET 0x428 /* INT src select, q's 16-23 */ +#define IX_QMGR_INT0SRCSELREG3_OFFSET 0x42c /* INT src select, q's 24-31 */ + +#define IX_QMGR_INTSRC_NUM_QUE_PER_WORD 8 /* # INT src select/word */ + +#define IX_QMGR_QUEIEREG0_OFFSET 0x430 /* INT enable, q's 0-31 */ +#define IX_QMGR_QUEIEREG1_OFFSET 0x434 /* INT enable, q's 32-63 */ +#define IX_QMGR_QINTREG0_OFFSET 0x438 /* INT status, q's 0-31 */ +#define IX_QMGR_QINTREG1_OFFSET 0x43c /* INT status, q's 32-63 */ + +#define IX_QMGR_QUECONFIG_BASE_OFFSET 0x2000 /* Q config register, q 0 */ + +#define IX_QMGR_QUECONFIG_SIZE 0x100 /* total size of Q config regs*/ + +#define IX_QMGR_QUEBUFFER_SPACE_OFFSET 0x2100 /* start of SRAM */ + +/* Total bits in a word */ +#define BITS_PER_WORD 32 + +/* Size of queue buffer space */ +#define IX_QMGR_QUE_BUFFER_SPACE_SIZE 0x1F00 + +/* + * This macro will return the address of the access register for the + * queue specified by qId + */ +#define IX_QMGR_Q_ACCESS_ADDR_GET(qId)\ + (((qId) * (IX_QMGR_QUEACC_SIZE * sizeof(uint32_t)))\ + + IX_QMGR_QUEACC0_OFFSET) + +/* + * Bit location of bit-3 of INT0SRCSELREG0 register to enabled + * sticky interrupt register. + */ +#define IX_QMGR_INT0SRCSELREG0_BIT3 3 + +/* + * These defines are the bit offsets of the various fields of + * the queue configuration register. + */ +#if 0 +#define IX_QMGR_Q_CONFIG_WRPTR_OFFSET 0x00 +#define IX_QMGR_Q_CONFIG_RDPTR_OFFSET 0x07 +#define IX_QMGR_Q_CONFIG_BADDR_OFFSET 0x0E +#define IX_QMGR_Q_CONFIG_ESIZE_OFFSET 0x16 +#define IX_QMGR_Q_CONFIG_BSIZE_OFFSET 0x18 +#define IX_QMGR_Q_CONFIG_NE_OFFSET 0x1A +#define IX_QMGR_Q_CONFIG_NF_OFFSET 0x1D + +#define IX_QMGR_NE_NF_CLEAR_MASK 0x03FFFFFF +#define IX_QMGR_NE_MASK 0x7 +#define IX_QMGR_NF_MASK 0x7 +#define IX_QMGR_SIZE_MASK 0x3 +#define IX_QMGR_ENTRY_SIZE_MASK 0x3 +#define IX_QMGR_BADDR_MASK 0x003FC000 +#define IX_QMGR_RDPTR_MASK 0x7F +#define IX_QMGR_WRPTR_MASK 0x7F +#define IX_QMGR_RDWRPTR_MASK 0x00003FFF +#else +#define IX_QMGR_Q_CONFIG_WRPTR_OFFSET 0 +#define IX_QMGR_WRPTR_MASK 0x7F +#define IX_QMGR_Q_CONFIG_RDPTR_OFFSET 7 +#define IX_QMGR_RDPTR_MASK 0x7F +#define IX_QMGR_Q_CONFIG_BADDR_OFFSET 14 +#define IX_QMGR_BADDR_MASK 0x3FC000 /* XXX not used */ +#define IX_QMGR_Q_CONFIG_ESIZE_OFFSET 22 +#define IX_QMGR_ENTRY_SIZE_MASK 0x3 +#define IX_QMGR_Q_CONFIG_BSIZE_OFFSET 24 +#define IX_QMGR_SIZE_MASK 0x3 +#define IX_QMGR_Q_CONFIG_NE_OFFSET 26 +#define IX_QMGR_NE_MASK 0x7 +#define IX_QMGR_Q_CONFIG_NF_OFFSET 29 +#define IX_QMGR_NF_MASK 0x7 + +#define IX_QMGR_RDWRPTR_MASK 0x00003FFF +#define IX_QMGR_NE_NF_CLEAR_MASK 0x03FFFFFF +#endif + +#define IX_QMGR_BASE_ADDR_16_WORD_ALIGN 64 +#define IX_QMGR_BASE_ADDR_16_WORD_SHIFT 6 + +#define IX_QMGR_AQM_ADDRESS_SPACE_SIZE_IN_WORDS 0x1000 + +/* Base address of AQM SRAM */ +#define IX_QMGR_AQM_SRAM_BASE_ADDRESS_OFFSET \ +((IX_QMGR_QUECONFIG_BASE_OFFSET) + (IX_QMGR_QUECONFIG_SIZE)) + +/* Min buffer size used for generating buffer size in QUECONFIG */ +#define IX_QMGR_MIN_BUFFER_SIZE 16 + +/* Reset values of QMgr hardware registers */ +#define IX_QMGR_QUELOWSTAT_RESET_VALUE 0x33333333 +#define IX_QMGR_QUEUOSTAT_RESET_VALUE 0x00000000 +#define IX_QMGR_QUEUPPSTAT0_RESET_VALUE 0xFFFFFFFF +#define IX_QMGR_QUEUPPSTAT1_RESET_VALUE 0x00000000 +#define IX_QMGR_INT0SRCSELREG_RESET_VALUE 0x00000000 +#define IX_QMGR_QUEIEREG_RESET_VALUE 0x00000000 +#define IX_QMGR_QINTREG_RESET_VALUE 0xFFFFFFFF +#define IX_QMGR_QUECONFIG_RESET_VALUE 0x00000000 + +#define IX_QMGR_QUELOWSTAT_BITS_PER_Q \ + (BITS_PER_WORD/IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) + +#define IX_QMGR_QUELOWSTAT_QID_MASK 0x7 +#define IX_QMGR_Q_CONFIG_ADDR_GET(qId)\ + (((qId) * sizeof(uint32_t)) + IX_QMGR_QUECONFIG_BASE_OFFSET) + +#define IX_QMGR_ENTRY1_OFFSET 0 +#define IX_QMGR_ENTRY2_OFFSET 1 +#define IX_QMGR_ENTRY4_OFFSET 3 + +int ixpqmgr_qconfig(int qId, int qSizeInWords, int ne, int nf, int srcSel, + void (*cb)(int, void *), void *cbarg); +int ixpqmgr_qwrite(int qId, uint32_t entry); +int ixpqmgr_qread(int qId, uint32_t *entry); +int ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p); +uint32_t ixpqmgr_getqstatus(int qId); +uint32_t ixpqmgr_getqconfig(int qId); +void ixpqmgr_notify_enable(int qId, int srcSel); +void ixpqmgr_notify_disable(int qId); +void ixpqmgr_dump(void); + +#endif /* ARM_XSCALE_IXP425_QMGR_H */ diff --git a/sys/arm/xscale/ixp425/ixp425_space.c b/sys/arm/xscale/ixp425/ixp425_space.c new file mode 100644 index 000000000000..816ff5055680 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_space.c @@ -0,0 +1,215 @@ +/* $NetBSD: ixp425_space.c,v 1.6 2006/04/10 03:36:03 simonb Exp $ */ + +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * bus_space I/O functions for ixp425 + */ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +/* Proto types for all the bus_space structure functions */ +bs_protos(ixp425); +bs_protos(generic); +bs_protos(generic_armv4); + +struct bus_space ixp425_bs_tag = { + /* cookie */ + .bs_cookie = (void *) 0, + + /* mapping/unmapping */ + .bs_map = ixp425_bs_map, + .bs_unmap = ixp425_bs_unmap, + .bs_subregion = ixp425_bs_subregion, + + /* allocation/deallocation */ + .bs_alloc = ixp425_bs_alloc, + .bs_free = ixp425_bs_free, + + /* barrier */ + .bs_barrier = ixp425_bs_barrier, + + /* read (single) */ + .bs_r_1 = generic_bs_r_1, + .bs_r_2 = generic_armv4_bs_r_2, + .bs_r_4 = generic_bs_r_4, + .bs_r_8 = NULL, + + /* read multiple */ + .bs_rm_1 = generic_bs_rm_1, + .bs_rm_2 = generic_armv4_bs_rm_2, + .bs_rm_4 = generic_bs_rm_4, + .bs_rm_8 = NULL, + + /* read region */ + .bs_rr_1 = generic_bs_rr_1, + .bs_rr_2 = generic_armv4_bs_rr_2, + .bs_rr_4 = generic_bs_rr_4, + .bs_rr_8 = NULL, + + /* write (single) */ + .bs_w_1 = generic_bs_w_1, + .bs_w_2 = generic_armv4_bs_w_2, + .bs_w_4 = generic_bs_w_4, + .bs_w_8 = NULL, + + /* write multiple */ + .bs_wm_1 = generic_bs_wm_1, + .bs_wm_2 = generic_armv4_bs_wm_2, + .bs_wm_4 = generic_bs_wm_4, + .bs_wm_8 = NULL, + + /* write region */ + .bs_wr_1 = generic_bs_wr_1, + .bs_wr_2 = generic_armv4_bs_wr_2, + .bs_wr_4 = generic_bs_wr_4, + .bs_wr_8 = NULL, + + /* set multiple */ + /* XXX not implemented */ + + /* set region */ + .bs_sr_1 = NULL, + .bs_sr_2 = generic_armv4_bs_sr_2, + .bs_sr_4 = generic_bs_sr_4, + .bs_sr_8 = NULL, + + /* copy */ + .bs_c_1 = NULL, + .bs_c_2 = generic_armv4_bs_c_2, + .bs_c_4 = NULL, + .bs_c_8 = NULL, +}; + +int +ixp425_bs_map(void *t, bus_addr_t bpa, bus_size_t size, + int cacheable, bus_space_handle_t *bshp) +{ + const struct pmap_devmap *pd; + vm_paddr_t startpa, endpa, pa, offset; + vm_offset_t va; + pt_entry_t *pte; + + if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) { + /* Device was statically mapped. */ + *bshp = pd->pd_va + (bpa - pd->pd_pa); + return (0); + } + + endpa = round_page(bpa + size); + offset = bpa & PAGE_MASK; + startpa = trunc_page(bpa); + + va = kmem_alloc(kernel_map, endpa - startpa); + if (va == 0) + return (ENOMEM); + + *bshp = va + offset; + + for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { + pmap_kenter(va, pa); + pte = vtopte(va); + *pte &= ~L2_S_CACHE_MASK; + PTE_SYNC(pte); + } + + return (0); +} + +void +ixp425_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size) +{ + vm_offset_t va, endva; + + if (pmap_devmap_find_va((vm_offset_t)t, size) != NULL) { + /* Device was statically mapped; nothing to do. */ + return; + } + + endva = round_page((vm_offset_t)t + size); + va = trunc_page((vm_offset_t)t); + + while (va < endva) { + pmap_kremove(va); + va += PAGE_SIZE; + } + kmem_free(kernel_map, va, endva - va); +} + +int +ixp425_bs_alloc(void *t, bus_addr_t rstart, bus_addr_t rend, + bus_size_t size, bus_size_t alignment, bus_size_t boundary, int cacheable, + bus_addr_t *bpap, bus_space_handle_t *bshp) +{ + panic("ixp425_bs_alloc(): not implemented"); +} + +void +ixp425_bs_free(void *t, bus_space_handle_t bsh, bus_size_t size) +{ + panic("ixp425_bs_free(): not implemented"); +} + +int +ixp425_bs_subregion(void *t, bus_space_handle_t bsh, bus_size_t offset, + bus_size_t size, bus_space_handle_t *nbshp) +{ + *nbshp = bsh + offset; + return (0); +} + +void +ixp425_bs_barrier(void *t, bus_space_handle_t bsh, bus_size_t offset, + bus_size_t len, int flags) +{ + /* Nothing to do. */ +} diff --git a/sys/arm/xscale/ixp425/ixp425_timer.c b/sys/arm/xscale/ixp425/ixp425_timer.c new file mode 100644 index 000000000000..0fa9d1f1419b --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_timer.c @@ -0,0 +1,267 @@ +/* $NetBSD: ixp425_timer.c,v 1.11 2006/04/10 03:36:03 simonb Exp $ */ + +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static uint32_t counts_per_hz; + +/* callback functions for intr_functions */ +void ixpclk_intr(void *); + +struct ixpclk_softc { + device_t sc_dev; + bus_addr_t sc_baseaddr; + bus_space_tag_t sc_iot; + bus_space_handle_t sc_ioh; +}; + +static unsigned ixp425_timer_get_timecount(struct timecounter *tc); + +#ifndef IXP425_CLOCK_FREQ +#define COUNTS_PER_SEC 66666600 /* 66MHz */ +#else +#define COUNTS_PER_SEC IXP425_CLOCK_FREQ +#endif +#define COUNTS_PER_USEC ((COUNTS_PER_SEC / 1000000) + 1) + +static struct ixpclk_softc *ixpclk_sc = NULL; + +#define GET_TS_VALUE(sc) (*(volatile u_int32_t *) \ + (IXP425_TIMER_VBASE + IXP425_OST_TS)) + +static struct timecounter ixp425_timer_timecounter = { + ixp425_timer_get_timecount, /* get_timecount */ + NULL, /* no poll_pps */ + ~0u, /* counter_mask */ + COUNTS_PER_SEC, /* frequency */ + "IXP425 Timer", /* name */ + 1000, /* quality */ +}; + +static int +ixpclk_probe(device_t dev) +{ + device_set_desc(dev, "IXP425 Timer"); + return (0); +} + +static int +ixpclk_attach(device_t dev) +{ + struct ixpclk_softc *sc = device_get_softc(dev); + struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); + + ixpclk_sc = sc; + + sc->sc_dev = dev; + sc->sc_iot = sa->sc_iot; + sc->sc_baseaddr = IXP425_TIMER_HWBASE; + + if (bus_space_map(sc->sc_iot, sc->sc_baseaddr, 8, 0, + &sc->sc_ioh)) + panic("%s: Cannot map registers", device_get_name(dev)); + + return (0); +} + +static device_method_t ixpclk_methods[] = { + DEVMETHOD(device_probe, ixpclk_probe), + DEVMETHOD(device_attach, ixpclk_attach), + {0, 0}, +}; + +static driver_t ixpclk_driver = { + "ixpclk", + ixpclk_methods, + sizeof(struct ixpclk_softc), +}; +static devclass_t ixpclk_devclass; + +DRIVER_MODULE(ixpclk, ixp, ixpclk_driver, ixpclk_devclass, 0, 0); +static unsigned +ixp425_timer_get_timecount(struct timecounter *tc) +{ + uint32_t ret; + + ret = GET_TS_VALUE(sc); + return (ret); +} + +/* + * cpu_initclocks: + * + * Initialize the clock and get them going. + */ +void +cpu_initclocks(void) +{ + struct ixpclk_softc* sc = ixpclk_sc; + struct resource *irq; + device_t dev = sc->sc_dev; + u_int oldirqstate; + int rid = 0; + void *ihl; + + if (hz < 50 || COUNTS_PER_SEC % hz) { + printf("Cannot get %d Hz clock; using 100 Hz\n", hz); + hz = 100; + } + tick = 1000000 / hz; /* number of microseconds between interrupts */ + + /* + * We only have one timer available; stathz and profhz are + * always left as 0 (the upper-layer clock code deals with + * this situation). + */ + if (stathz != 0) + printf("Cannot get %d Hz statclock\n", stathz); + stathz = 0; + + if (profhz != 0) + printf("Cannot get %d Hz profclock\n", profhz); + profhz = 0; + + /* Report the clock frequency. */ + + oldirqstate = disable_interrupts(I32_bit); + + irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, IXP425_INT_TMR0, + IXP425_INT_TMR0, 1, RF_ACTIVE); + if (!irq) + panic("Unable to setup the clock irq handler.\n"); + else + bus_setup_intr(dev, irq, INTR_TYPE_CLK | INTR_FAST, + ixpclk_intr, NULL, &ihl); + + /* Set up the new clock parameters. */ + + /* clear interrupt */ + bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS, + OST_WARM_RESET | OST_WDOG_INT | OST_TS_INT | + OST_TIM1_INT | OST_TIM0_INT); + + counts_per_hz = COUNTS_PER_SEC / hz; + + /* reload value & Timer enable */ + bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_TIM0_RELOAD, + (counts_per_hz & TIMERRELOAD_MASK) | OST_TIMER_EN); + + tc_init(&ixp425_timer_timecounter); + restore_interrupts(oldirqstate); + rid = 0; +} + + +/* + * DELAY: + * + * Delay for at least N microseconds. + */ +void +DELAY(int n) +{ + u_int32_t first, last; + int usecs; + + if (n == 0) + return; + + /* + * Clamp the timeout at a maximum value (about 32 seconds with + * a 66MHz clock). *Nobody* should be delay()ing for anywhere + * near that length of time and if they are, they should be hung + * out to dry. + */ + if (n >= (0x80000000U / COUNTS_PER_USEC)) + usecs = (0x80000000U / COUNTS_PER_USEC) - 1; + else + usecs = n * COUNTS_PER_USEC; + + /* Note: Timestamp timer counts *up*, unlike the other timers */ + first = GET_TS_VALUE(); + + while (usecs > 0) { + last = GET_TS_VALUE(); + usecs -= (int)(last - first); + first = last; + } +} + +/* + * ixpclk_intr: + * + * Handle the hardclock interrupt. + */ +void +ixpclk_intr(void *arg) +{ + struct ixpclk_softc* sc = ixpclk_sc; + struct trapframe *frame = arg; + + bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS, + OST_TIM0_INT); + + hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); +} + +void +cpu_startprofclock(void) +{ +} + +void +cpu_stopprofclock(void) +{ +} diff --git a/sys/arm/xscale/ixp425/ixp425_wdog.c b/sys/arm/xscale/ixp425/ixp425_wdog.c new file mode 100644 index 000000000000..a94ef0a83c92 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425_wdog.c @@ -0,0 +1,118 @@ +/*- + * Copyright (c) 2006 Sam Leffler. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +__FBSDID("$FreeBSD$"); + +/* + * IXP425 Watchdog Timer Support. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ixpwdog_softc { + device_t sc_dev; +}; + +static __inline uint32_t +RD4(struct ixpwdog_softc *sc, bus_size_t off) +{ + return bus_space_read_4(&ixp425_bs_tag, IXP425_TIMER_VBASE, off); +} + +static __inline void +WR4(struct ixpwdog_softc *sc, bus_size_t off, uint32_t val) +{ + bus_space_write_4(&ixp425_bs_tag, IXP425_TIMER_VBASE, off, val); +} + +static void +ixp425_watchdog(void *arg, u_int cmd, int *error) +{ + struct ixpwdog_softc *sc = arg; + u_int u = cmd & WD_INTERVAL; + + WR4(sc, IXP425_OST_WDOG_KEY, OST_WDOG_KEY_MAJICK); + if (cmd && 4 <= u && u <= 35) { + WR4(sc, IXP425_OST_WDOG_ENAB, 0); + /* approximate 66.66MHz cycles */ + WR4(sc, IXP425_OST_WDOG, 2<<(u - 4)); + /* NB: reset on timer expiration */ + WR4(sc, IXP425_OST_WDOG_ENAB, + OST_WDOG_ENAB_CNT_ENA | OST_WDOG_ENAB_RST_ENA); + *error = 0; + } else { + /* disable watchdog */ + WR4(sc, IXP425_OST_WDOG_ENAB, 0); + } + WR4(sc, IXP425_OST_WDOG_KEY, 0); +} + +static int +ixpwdog_probe(device_t dev) +{ + device_set_desc(dev, "IXP425 Watchdog Timer"); + return (0); +} + +static int +ixpwdog_attach(device_t dev) +{ + struct ixpwdog_softc *sc = device_get_softc(dev); + + sc->sc_dev = dev; + + EVENTHANDLER_REGISTER(watchdog_list, ixp425_watchdog, sc, 0); + return (0); +} + +static device_method_t ixpwdog_methods[] = { + DEVMETHOD(device_probe, ixpwdog_probe), + DEVMETHOD(device_attach, ixpwdog_attach), + {0, 0}, +}; + +static driver_t ixpwdog_driver = { + "ixpwdog", + ixpwdog_methods, + sizeof(struct ixpwdog_softc), +}; +static devclass_t ixpwdog_devclass; +DRIVER_MODULE(ixpwdog, ixp, ixpwdog_driver, ixpwdog_devclass, 0, 0); diff --git a/sys/arm/xscale/ixp425/ixp425reg.h b/sys/arm/xscale/ixp425/ixp425reg.h new file mode 100644 index 000000000000..55e1fd938c94 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425reg.h @@ -0,0 +1,582 @@ +/* $NetBSD: ixp425reg.h,v 1.19 2005/12/11 12:16:51 christos Exp $ */ +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef _IXP425REG_H_ +#define _IXP425REG_H_ + +/* + * Physical memory map for the Intel IXP425 + */ +/* + * CC00 00FF --------------------------- + * SDRAM Configuration Registers + * CC00 0000 --------------------------- + * + * C800 BFFF --------------------------- + * System and Peripheral Registers + * C800 0000 --------------------------- + * Expansion Bus Configuration Registers + * C400 0000 --------------------------- + * PCI Configuration and Status Registers + * C000 0000 --------------------------- + * + * 6400 0000 --------------------------- + * Queue manager + * 6000 0000 --------------------------- + * Expansion Bus Data + * 5000 0000 --------------------------- + * PCI Data + * 4800 0000 --------------------------- + * + * 4000 0000 --------------------------- + * SDRAM + * 1000 0000 --------------------------- + */ + +/* + * Virtual memory map for the Intel IXP425 integrated devices + */ +/* + * FFFF FFFF --------------------------- + * + * FC00 0000 --------------------------- + * PCI Data (memory space) + * F800 0000 --------------------------- + * + * F020 1000 --------------------------- + * SDRAM Controller + * F020 0000 --------------------------- + * + * F001 2000 --------------------------- + * PCI Configuration and Status Registers + * F001 1000 --------------------------- + * Expansion bus Configuration Registers + * F001 0000 --------------------------- + * System and Peripheral Registers + * VA F000 0000 = PA C800 0000 (SIZE 0x10000) + * F000 0000 --------------------------- + * + * 0000 0000 --------------------------- + * + */ + +/* Physical/Virtual address for I/O space */ + +#define IXP425_IO_VBASE 0xf0000000UL +#define IXP425_IO_HWBASE 0xc8000000UL +#define IXP425_IO_SIZE 0x00010000UL + +/* Offset */ + +#define IXP425_UART0_OFFSET 0x00000000UL +#define IXP425_UART1_OFFSET 0x00001000UL +#define IXP425_PMC_OFFSET 0x00002000UL +#define IXP425_INTR_OFFSET 0x00003000UL +#define IXP425_GPIO_OFFSET 0x00004000UL +#define IXP425_TIMER_OFFSET 0x00005000UL +#define IXP425_NPE_A_OFFSET 0x00006000UL /* Not User Programmable */ +#define IXP425_NPE_B_OFFSET 0x00007000UL /* Not User Programmable */ +#define IXP425_NPE_C_OFFSET 0x00008000UL /* Not User Programmable */ +#define IXP425_MAC_A_OFFSET 0x00009000UL +#define IXP425_MAC_B_OFFSET 0x0000a000UL +#define IXP425_USB_OFFSET 0x0000b000UL + +#define IXP425_REG_SIZE 0x1000 + +/* + * UART + * UART0 0xc8000000 + * UART1 0xc8001000 + * + */ +/* I/O space */ +#define IXP425_UART0_HWBASE (IXP425_IO_HWBASE + IXP425_UART0_OFFSET) +#define IXP425_UART1_HWBASE (IXP425_IO_HWBASE + IXP425_UART1_OFFSET) + +#define IXP425_UART0_VBASE (IXP425_IO_VBASE + IXP425_UART0_OFFSET) + /* 0xf0000000 */ +#define IXP425_UART1_VBASE (IXP425_IO_VBASE + IXP425_UART1_OFFSET) + /* 0xf0001000 */ + +#define IXP425_UART_FREQ 14745600 + +#define IXP425_UART_IER 0x01 /* interrupt enable register */ +#define IXP425_UART_IER_RTOIE 0x10 /* receiver timeout interrupt enable */ +#define IXP425_UART_IER_UUE 0x40 /* UART Unit enable */ + +/*#define IXP4XX_COM_NPORTS 8*/ + +/* + * Timers + * + */ +#define IXP425_TIMER_HWBASE (IXP425_IO_HWBASE + IXP425_TIMER_OFFSET) +#define IXP425_TIMER_VBASE (IXP425_IO_VBASE + IXP425_TIMER_OFFSET) + +#define IXP425_OST_TS 0x0000 +#define IXP425_OST_TIM0 0x0004 +#define IXP425_OST_TIM1 0x000C + +#define IXP425_OST_TIM0_RELOAD 0x0008 +#define IXP425_OST_TIM1_RELOAD 0x0010 +#define TIMERRELOAD_MASK 0xFFFFFFFC +#define OST_ONESHOT_EN (1U << 1) +#define OST_TIMER_EN (1U << 0) + +#define IXP425_OST_STATUS 0x0020 +#define OST_WARM_RESET (1U << 4) +#define OST_WDOG_INT (1U << 3) +#define OST_TS_INT (1U << 2) +#define OST_TIM1_INT (1U << 1) +#define OST_TIM0_INT (1U << 0) + +#define IXP425_OST_WDOG 0x0014 +#define IXP425_OST_WDOG_ENAB 0x0018 +#define IXP425_OST_WDOG_KEY 0x001c +#define OST_WDOG_KEY_MAJICK 0x482e +#define OST_WDOG_ENAB_RST_ENA (1u << 0) +#define OST_WDOG_ENAB_INT_ENA (1u << 1) +#define OST_WDOG_ENAB_CNT_ENA (1u << 2) + +/* + * Interrupt Controller Unit. + * PA 0xc8003000 + */ + +#define IXP425_IRQ_HWBASE IXP425_IO_HWBASE + IXP425_INTR_OFFSET +#define IXP425_IRQ_VBASE IXP425_IO_VBASE + IXP425_INTR_OFFSET + /* 0xf0003000 */ +#define IXP425_IRQ_SIZE 0x00000020UL + +#define IXP425_INT_STATUS (IXP425_IRQ_VBASE + 0x00) +#define IXP425_INT_ENABLE (IXP425_IRQ_VBASE + 0x04) +#define IXP425_INT_SELECT (IXP425_IRQ_VBASE + 0x08) +#define IXP425_IRQ_STATUS (IXP425_IRQ_VBASE + 0x0C) +#define IXP425_FIQ_STATUS (IXP425_IRQ_VBASE + 0x10) +#define IXP425_INT_PRTY (IXP425_IRQ_VBASE + 0x14) +#define IXP425_IRQ_ENC (IXP425_IRQ_VBASE + 0x18) +#define IXP425_FIQ_ENC (IXP425_IRQ_VBASE + 0x1C) + +#define IXP425_INT_SW1 31 /* SW Interrupt 1 */ +#define IXP425_INT_SW0 30 /* SW Interrupt 0 */ +#define IXP425_INT_GPIO_12 29 /* GPIO 12 */ +#define IXP425_INT_GPIO_11 28 /* GPIO 11 */ +#define IXP425_INT_GPIO_10 27 /* GPIO 11 */ +#define IXP425_INT_GPIO_9 26 /* GPIO 9 */ +#define IXP425_INT_GPIO_8 25 /* GPIO 8 */ +#define IXP425_INT_GPIO_7 24 /* GPIO 7 */ +#define IXP425_INT_GPIO_6 23 /* GPIO 6 */ +#define IXP425_INT_GPIO_5 22 /* GPIO 5 */ +#define IXP425_INT_GPIO_4 21 /* GPIO 4 */ +#define IXP425_INT_GPIO_3 20 /* GPIO 3 */ +#define IXP425_INT_GPIO_2 19 /* GPIO 2 */ +#define IXP425_INT_XSCALE_PMU 18 /* XScale PMU */ +#define IXP425_INT_AHB_PMU 17 /* AHB PMU */ +#define IXP425_INT_WDOG 16 /* Watchdog Timer */ +#define IXP425_INT_UART0 15 /* HighSpeed UART */ +#define IXP425_INT_STAMP 14 /* Timestamp Timer */ +#define IXP425_INT_UART1 13 /* Console UART */ +#define IXP425_INT_USB 12 /* USB */ +#define IXP425_INT_TMR1 11 /* General-Purpose Timer1 */ +#define IXP425_INT_PCIDMA2 10 /* PCI DMA Channel 2 */ +#define IXP425_INT_PCIDMA1 9 /* PCI DMA Channel 1 */ +#define IXP425_INT_PCIINT 8 /* PCI Interrupt */ +#define IXP425_INT_GPIO_1 7 /* GPIO 1 */ +#define IXP425_INT_GPIO_0 6 /* GPIO 0 */ +#define IXP425_INT_TMR0 5 /* General-Purpose Timer0 */ +#define IXP425_INT_QUE33_64 4 /* Queue Manager 33-64 */ +#define IXP425_INT_QUE1_32 3 /* Queue Manager 1-32 */ +#define IXP425_INT_NPE_C 2 /* NPE C */ +#define IXP425_INT_NPE_B 1 /* NPE B */ +#define IXP425_INT_NPE_A 0 /* NPE A */ + +/* + * software interrupt + */ +#define IXP425_INT_bit31 31 +#define IXP425_INT_bit30 30 +#define IXP425_INT_bit14 14 +#define IXP425_INT_bit11 11 + +#define IXP425_INT_HWMASK (0xffffffff & \ + ~((1 << IXP425_INT_bit31) | \ + (1 << IXP425_INT_bit30) | \ + (1 << IXP425_INT_bit14) | \ + (1 << IXP425_INT_bit11))) +#define IXP425_INT_GPIOMASK (0x3ff800c0u) + +/* + * GPIO + */ +#define IXP425_GPIO_HWBASE IXP425_IO_HWBASE + IXP425_GPIO_OFFSET +#define IXP425_GPIO_VBASE IXP425_IO_VBASE + IXP425_GPIO_OFFSET + /* 0xf0004000 */ +#define IXP425_GPIO_SIZE 0x00000020UL + +#define IXP425_GPIO_GPOUTR 0x00 +#define IXP425_GPIO_GPOER 0x04 +#define IXP425_GPIO_GPINR 0x08 +#define IXP425_GPIO_GPISR 0x0c +#define IXP425_GPIO_GPIT1R 0x10 +#define IXP425_GPIO_GPIT2R 0x14 +#define IXP425_GPIO_GPCLKR 0x18 +# define GPCLKR_MUX14 (1U << 8) +# define GPCLKR_CLK0TC_SHIFT 4 +# define GPCLKR_CLK0DC_SHIFT 0 + +/* GPIO Output */ +#define GPOUT_ON 0x1 +#define GPOUT_OFF 0x0 + +/* GPIO direction */ +#define GPOER_INPUT 0x1 +#define GPOER_OUTPUT 0x0 + +/* GPIO Type bits */ +#define GPIO_TYPE_ACT_HIGH 0x0 +#define GPIO_TYPE_ACT_LOW 0x1 +#define GPIO_TYPE_EDG_RISING 0x2 +#define GPIO_TYPE_EDG_FALLING 0x3 +#define GPIO_TYPE_TRANSITIONAL 0x4 +#define GPIO_TYPE_MASK 0x7 +#define GPIO_TYPE(b,v) ((v) << (((b) & 0x7) * 3)) +#define GPIO_TYPE_REG(b) (((b)&8)?IXP425_GPIO_GPIT2R:IXP425_GPIO_GPIT1R) + +/* + * Expansion Bus Configuration Space. + */ +#define IXP425_EXP_HWBASE 0xc4000000UL +#define IXP425_EXP_VBASE (IXP425_IO_VBASE + IXP425_IO_SIZE) + /* 0xf0010000 */ +#define IXP425_EXP_SIZE IXP425_REG_SIZE /* 0x1000 */ + +/* offset */ +#define EXP_TIMING_CS0_OFFSET 0x0000 +#define EXP_TIMING_CS1_OFFSET 0x0004 +#define EXP_TIMING_CS2_OFFSET 0x0008 +#define EXP_TIMING_CS3_OFFSET 0x000c +#define EXP_TIMING_CS4_OFFSET 0x0010 +#define EXP_TIMING_CS5_OFFSET 0x0014 +#define EXP_TIMING_CS6_OFFSET 0x0018 +#define EXP_TIMING_CS7_OFFSET 0x001c +#define EXP_CNFG0_OFFSET 0x0020 +#define EXP_CNFG1_OFFSET 0x0024 +#define EXP_FCTRL_OFFSET 0x0028 + +#define IXP425_EXP_RECOVERY_SHIFT 16 +#define IXP425_EXP_HOLD_SHIFT 20 +#define IXP425_EXP_STROBE_SHIFT 22 +#define IXP425_EXP_SETUP_SHIFT 26 +#define IXP425_EXP_ADDR_SHIFT 28 +#define IXP425_EXP_CS_EN (1U << 31) + +#define IXP425_EXP_RECOVERY_T(x) (((x) & 15) << IXP425_EXP_RECOVERY_SHIFT) +#define IXP425_EXP_HOLD_T(x) (((x) & 3) << IXP425_EXP_HOLD_SHIFT) +#define IXP425_EXP_STROBE_T(x) (((x) & 15) << IXP425_EXP_STROBE_SHIFT) +#define IXP425_EXP_SETUP_T(x) (((x) & 3) << IXP425_EXP_SETUP_SHIFT) +#define IXP425_EXP_ADDR_T(x) (((x) & 3) << IXP425_EXP_ADDR_SHIFT) + +/* EXP_CSn bits */ +#define EXP_BYTE_EN 0x00000001 /* bus uses only 8-bit data */ +#define EXP_WR_EN 0x00000002 /* ena writes to CS region */ +/* bit 2 is reserved */ +#define EXP_SPLT_EN 0x00000008 /* ena AHB split transfers */ +#define EXP_MUX_EN 0x00000010 /* multiplexed address/data */ +#define EXP_HRDY_POL 0x00000020 /* HPI|HRDY polarity */ +#define EXP_BYTE_RD16 0x00000040 /* byte rd access to word dev */ +#define EXP_CNFG 0x00003c00 /* device config size */ +#define EXP_SZ_512 (0 << 10) +#define EXP_SZ_1K (1 << 10) +#define EXP_SZ_2K (2 << 10) +#define EXP_SZ_4K (3 << 10) +#define EXP_SZ_8K (4 << 10) +#define EXP_SZ_16K (5 << 10) +#define EXP_SZ_32K (6 << 10) +#define EXP_SZ_64K (7 << 10) +#define EXP_SZ_128K (8 << 10) +#define EXP_SZ_256K (9 << 10) +#define EXP_SZ_512K (10 << 10) +#define EXP_SZ_1M (11 << 10) +#define EXP_SZ_2M (12 << 10) +#define EXP_SZ_4M (13 << 10) +#define EXP_SZ_8M (14 << 10) +#define EXP_SZ_16M (15 << 10) +#define EXP_CYC_TYPE 0x0000c000 /* bus cycle "type" */ +#define EXP_CYC_INTEL (0 << 14) +#define EXP_CYC_MOTO (1 << 14) +#define EXP_CYC_HPI (2 << 14) +#define EXP_T5 0x000f0000 /* recovery timing */ +#define EXP_T4 0x00300000 /* hold timing */ +#define EXP_T3 0x03c00000 /* strobe timing */ +#define EXP_T2 0x0c000000 /* setup/chip select timing */ +#define EXP_T1 0x30000000 /* address timing */ +/* bit 30 is reserved */ +#define EXP_CS_EN 0x80000000 /* chip select enabled */ + +/* EXP_CNFG0 bits */ +#define EXP_CNFG0_8BIT (1 << 0) +#define EXP_CNFG0_PCI_HOST (1 << 1) +#define EXP_CNFG0_PCI_ARB (1 << 2) +#define EXP_CNFG0_PCI_66MHZ (1 << 4) +#define EXP_CNFG0_MEM_MAP (1 << 31) + +/* EXP_CNFG1 bits */ +#define EXP_CNFG1_SW_INT0 (1 << 0) +#define EXP_CNFG1_SW_INT1 (1 << 1) + +#define EXP_FCTRL_RCOMP (1<<0) +#define EXP_FCTRL_USB (1<<1) +#define EXP_FCTRL_HASH (1<<2) +#define EXP_FCTRL_AES (1<<3) +#define EXP_FCTRL_DES (1<<4) +#define EXP_FCTRL_HDLC (1<<5) +#define EXP_FCTRL_AAL (1<<6) +#define EXP_FCTRL_HSS (1<<7) +#define EXP_FCTRL_UTOPIA (1<<8) +#define EXP_FCTRL_ETH0 (1<<9) +#define EXP_FCTRL_ETH1 (1<<10) +#define EXP_FCTRL_NPEA (1<<11) +#define EXP_FCTRL_NPEB (1<<12) +#define EXP_FCTRL_NPEC (1<<13) +#define EXP_FCTRL_PCI (1<<14) +/* XXX more stuff we don't care about */ + +/* + * PCI + */ +#define IXP425_PCI_HWBASE 0xc0000000 +#define IXP425_PCI_VBASE (IXP425_EXP_VBASE + IXP425_EXP_SIZE) + /* 0xf0011000 */ +#define IXP425_PCI_SIZE IXP425_REG_SIZE /* 0x1000 */ + +/* + * Mapping registers of IXP425 PCI Configuration + */ +/* PCI_ID_REG 0x00 */ +/* PCI_COMMAND_STATUS_REG 0x04 */ +/* PCI_CLASS_REG 0x08 */ +/* PCI_BHLC_REG 0x0c */ +#define PCI_MAPREG_BAR0 0x10 /* Base Address 0 */ +#define PCI_MAPREG_BAR1 0x14 /* Base Address 1 */ +#define PCI_MAPREG_BAR2 0x18 /* Base Address 2 */ +#define PCI_MAPREG_BAR3 0x1c /* Base Address 3 */ +#define PCI_MAPREG_BAR4 0x20 /* Base Address 4 */ +#define PCI_MAPREG_BAR5 0x24 /* Base Address 5 */ +/* PCI_SUBSYS_ID_REG 0x2c */ +/* PCI_INTERRUPT_REG 0x3c */ +#define PCI_RTOTTO 0x40 + +/* PCI Controller CSR Base Address */ +#define IXP425_PCI_CSR_BASE IXP425_PCI_VBASE + +/* PCI Memory Space */ +#define IXP425_PCI_MEM_HWBASE 0x48000000UL +#define IXP425_PCI_MEM_VBASE 0xf8000000UL +#define IXP425_PCI_MEM_SIZE 0x04000000UL /* 64MB */ + +/* PCI I/O Space */ +#define IXP425_PCI_IO_HWBASE 0x00000000UL +#define IXP425_PCI_IO_SIZE 0x00100000UL /* 1Mbyte */ + +/* PCI Controller Configuration Offset */ +#define PCI_NP_AD 0x00 +#define PCI_NP_CBE 0x04 +# define NP_CBE_SHIFT 4 +#define PCI_NP_WDATA 0x08 +#define PCI_NP_RDATA 0x0c +#define PCI_CRP_AD_CBE 0x10 +#define PCI_CRP_AD_WDATA 0x14 +#define PCI_CRP_AD_RDATA 0x18 +#define PCI_CSR 0x1c +# define CSR_PRST (1U << 16) +# define CSR_IC (1U << 15) +# define CSR_ABE (1U << 4) +# define CSR_PDS (1U << 3) +# define CSR_ADS (1U << 2) +# define CSR_HOST (1U << 0) +#define PCI_ISR 0x20 +# define ISR_AHBE (1U << 3) +# define ISR_PPE (1U << 2) +# define ISR_PFE (1U << 1) +# define ISR_PSE (1U << 0) +#define PCI_INTEN 0x24 +#define PCI_DMACTRL 0x28 +#define PCI_AHBMEMBASE 0x2c +#define PCI_AHBIOBASE 0x30 +#define PCI_PCIMEMBASE 0x34 +#define PCI_AHBDOORBELL 0x38 +#define PCI_PCIDOORBELL 0x3c +#define PCI_ATPDMA0_AHBADDR 0x40 +#define PCI_ATPDMA0_PCIADDR 0x44 +#define PCI_ATPDMA0_LENGTH 0x48 +#define PCI_ATPDMA1_AHBADDR 0x4c +#define PCI_ATPDMA1_PCIADDR 0x50 +#define PCI_ATPDMA1_LENGTH 0x54 +#define PCI_PTADMA0_AHBADDR 0x58 +#define PCI_PTADMA0_PCIADDR 0x5c +#define PCI_PTADMA0_LENGTH 0x60 +#define PCI_PTADMA1_AHBADDR 0x64 +#define PCI_PTADMA1_PCIADDR 0x68 +#define PCI_PTADMA1_LENGTH 0x6c + +/* PCI target(T)/initiator(I) Interface Commands for PCI_NP_CBE register */ +#define COMMAND_NP_IA 0x0 /* Interrupt Acknowledge (I)*/ +#define COMMAND_NP_SC 0x1 /* Special Cycle (I)*/ +#define COMMAND_NP_IO_READ 0x2 /* I/O Read (T)(I) */ +#define COMMAND_NP_IO_WRITE 0x3 /* I/O Write (T)(I) */ +#define COMMAND_NP_MEM_READ 0x6 /* Memory Read (T)(I) */ +#define COMMAND_NP_MEM_WRITE 0x7 /* Memory Write (T)(I) */ +#define COMMAND_NP_CONF_READ 0xa /* Configuration Read (T)(I) */ +#define COMMAND_NP_CONF_WRITE 0xb /* Configuration Write (T)(I) */ + +/* PCI byte enables */ +#define BE_8BIT(a) ((0x10u << ((a) & 0x03)) ^ 0xf0) +#define BE_16BIT(a) ((0x30u << ((a) & 0x02)) ^ 0xf0) +#define BE_32BIT(a) 0x00 + +/* PCI byte selects */ +#define READ_8BIT(v,a) ((u_int8_t)((v) >> (((a) & 3) * 8))) +#define READ_16BIT(v,a) ((u_int16_t)((v) >> (((a) & 2) * 8))) +#define WRITE_8BIT(v,a) (((u_int32_t)(v)) << (((a) & 3) * 8)) +#define WRITE_16BIT(v,a) (((u_int32_t)(v)) << (((a) & 2) * 8)) + +/* PCI Controller Configuration Commands for PCI_CRP_AD_CBE */ +#define COMMAND_CRP_READ 0x00 +#define COMMAND_CRP_WRITE (1U << 16) + +/* + * SDRAM Configuration Register + */ +#define IXP425_MCU_HWBASE 0xcc000000UL +#define IXP425_MCU_VBASE 0xf0200000UL +#define IXP425_MCU_SIZE 0x1000 /* Actually only 256 bytes */ +#define MCU_SDR_CONFIG 0x00 +#define MCU_SDR_CONFIG_MCONF(x) ((x) & 0x7) +#define MCU_SDR_CONFIG_64MBIT (1u << 5) +#define MCU_SDR_REFRESH 0x04 +#define MCU_SDR_IR 0x08 + +/* + * Performance Monitoring Unit (CP14) + * + * CP14.0.1 Performance Monitor Control Register(PMNC) + * CP14.1.1 Clock Counter(CCNT) + * CP14.4.1 Interrupt Enable Register(INTEN) + * CP14.5.1 Overflow Flag Register(FLAG) + * CP14.8.1 Event Selection Register(EVTSEL) + * CP14.0.2 Performance Counter Register 0(PMN0) + * CP14.1.2 Performance Counter Register 0(PMN1) + * CP14.2.2 Performance Counter Register 0(PMN2) + * CP14.3.2 Performance Counter Register 0(PMN3) + */ + +#define PMNC_E 0x00000001 /* enable all counters */ +#define PMNC_P 0x00000002 /* reset all PMNs to 0 */ +#define PMNC_C 0x00000004 /* clock counter reset */ +#define PMNC_D 0x00000008 /* clock counter / 64 */ + +#define INTEN_CC_IE 0x00000001 /* enable clock counter interrupt */ +#define INTEN_PMN0_IE 0x00000002 /* enable PMN0 interrupt */ +#define INTEN_PMN1_IE 0x00000004 /* enable PMN1 interrupt */ +#define INTEN_PMN2_IE 0x00000008 /* enable PMN2 interrupt */ +#define INTEN_PMN3_IE 0x00000010 /* enable PMN3 interrupt */ + +#define FLAG_CC_IF 0x00000001 /* clock counter overflow */ +#define FLAG_PMN0_IF 0x00000002 /* PMN0 overflow */ +#define FLAG_PMN1_IF 0x00000004 /* PMN1 overflow */ +#define FLAG_PMN2_IF 0x00000008 /* PMN2 overflow */ +#define FLAG_PMN3_IF 0x00000010 /* PMN3 overflow */ + +#define EVTSEL_EVCNT_MASK 0x0000000ff /* event to count for PMNs */ +#define PMNC_EVCNT0_SHIFT 0 +#define PMNC_EVCNT1_SHIFT 8 +#define PMNC_EVCNT2_SHIFT 16 +#define PMNC_EVCNT3_SHIFT 24 + +/* + * Queue Manager + */ +#define IXP425_QMGR_HWBASE 0x60000000UL +#define IXP425_QMGR_VBASE (IXP425_PCI_VBASE + IXP425_PCI_SIZE) +#define IXP425_QMGR_SIZE 0x4000 + +/* + * Network Processing Engines (NPE's) and associated Ethernet MAC's. + */ +#define IXP425_NPE_A_HWBASE (IXP425_IO_HWBASE + IXP425_NPE_A_OFFSET) +#define IXP425_NPE_A_VBASE (IXP425_IO_VBASE + IXP425_NPE_A_OFFSET) +#define IXP425_NPE_A_SIZE 0x1000 /* Actually only 256 bytes */ + +#define IXP425_NPE_B_HWBASE (IXP425_IO_HWBASE + IXP425_NPE_B_OFFSET) +#define IXP425_NPE_B_VBASE (IXP425_IO_VBASE + IXP425_NPE_B_OFFSET) +#define IXP425_NPE_B_SIZE 0x1000 /* Actually only 256 bytes */ + +#define IXP425_NPE_C_HWBASE (IXP425_IO_HWBASE + IXP425_NPE_C_OFFSET) +#define IXP425_NPE_C_VBASE (IXP425_IO_VBASE + IXP425_NPE_C_OFFSET) +#define IXP425_NPE_C_SIZE 0x1000 /* Actually only 256 bytes */ + +#define IXP425_MAC_A_HWBASE (IXP425_IO_HWBASE + IXP425_MAC_A_OFFSET) +#define IXP425_MAC_A_VBASE (IXP425_IO_VBASE + IXP425_MAC_A_OFFSET) +#define IXP425_MAC_A_SIZE 0x1000 /* Actually only 256 bytes */ + +#define IXP425_MAC_B_HWBASE (IXP425_IO_HWBASE + IXP425_MAC_B_OFFSET) +#define IXP425_MAC_B_VBASE (IXP425_IO_VBASE + IXP425_MAC_B_OFFSET) +#define IXP425_MAC_B_SIZE 0x1000 /* Actually only 256 bytes */ + +/* + * Expansion Bus Data Space. + */ +#define IXP425_EXP_BUS_HWBASE 0x50000000UL +#define IXP425_EXP_BUS_SIZE 0x01000000 /* max, typically smaller */ + +#define IXP425_EXP_BUS_CSx_HWBASE(i) \ + (IXP425_EXP_BUS_HWBASE + (i)*IXP425_EXP_BUS_SIZE) + +#define IXP425_EXP_BUS_CS1_HWBASE IXP425_EXP_BUS_CSx_HWBASE(1) +#define IXP425_EXP_BUS_CS1_VBASE (IXP425_MAC_B_VBASE + IXP425_MAC_B_SIZE) +#define IXP425_EXP_BUS_CS1_SIZE 0x1000 + +/* NB: not mapped (yet) */ +#define IXP425_EXP_BUS_CS0_HWBASE IXP425_EXP_BUS_CSx_HWBASE(0) +#define IXP425_EXP_BUS_CS2_HWBASE IXP425_EXP_BUS_CSx_HWBASE(2) +#define IXP425_EXP_BUS_CS3_HWBASE IXP425_EXP_BUS_CSx_HWBASE(3) +#define IXP425_EXP_BUS_CS4_HWBASE IXP425_EXP_BUS_CSx_HWBASE(4) +#define IXP425_EXP_BUS_CS5_HWBASE IXP425_EXP_BUS_CSx_HWBASE(5) +#define IXP425_EXP_BUS_CS6_HWBASE IXP425_EXP_BUS_CSx_HWBASE(6) +#define IXP425_EXP_BUS_CS7_HWBASE IXP425_EXP_BUS_CSx_HWBASE(7) + +#endif /* _IXP425REG_H_ */ diff --git a/sys/arm/xscale/ixp425/ixp425var.h b/sys/arm/xscale/ixp425/ixp425var.h new file mode 100644 index 000000000000..91a163cd72f1 --- /dev/null +++ b/sys/arm/xscale/ixp425/ixp425var.h @@ -0,0 +1,99 @@ +/* $NetBSD: ixp425var.h,v 1.10 2006/04/10 03:36:03 simonb Exp $ */ + +/* + * Copyright (c) 2003 + * Ichiro FUKUHARA . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Ichiro FUKUHARA. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY ICHIRO FUKUHARA ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL ICHIRO FUKUHARA OR THE VOICES IN HIS HEAD BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef _IXP425VAR_H_ +#define _IXP425VAR_H_ + +#include +#include + +#include + +#include +#include + +struct ixp425_softc { + device_t sc_dev; + bus_space_tag_t sc_iot; + bus_space_handle_t sc_gpio_ioh; + bus_space_handle_t sc_exp_ioh; + + u_int32_t sc_intrmask; + + struct rman sc_irq_rman; + struct rman sc_mem_rman; +}; + +struct ixppcib_softc { + device_t sc_dev; + + u_int sc_bus; + + struct resource *sc_csr; + struct resource *sc_mem; + + struct rman sc_io_rman; + struct rman sc_mem_rman; + struct rman sc_irq_rman; + + struct bus_space sc_pci_memt; + struct bus_space sc_pci_iot; +}; + +#define EXP_BUS_WRITE_4(sc, reg, data) \ + bus_space_write_4(sc->sc_iot, sc->sc_exp_ioh, reg, data) +#define EXP_BUS_READ_4(sc, reg) \ + bus_space_read_4(sc->sc_iot, sc->sc_exp_ioh, reg) + +#define GPIO_CONF_WRITE_4(sc, reg, data) \ + bus_space_write_4(sc->sc_iot, sc->sc_gpio_ioh, reg, data) +#define GPIO_CONF_READ_4(sc, reg) \ + bus_space_read_4(sc->sc_iot, sc->sc_gpio_ioh, reg) + +extern struct bus_space ixp425_bs_tag; +extern struct bus_space ixp425_a4x_bs_tag; + +void ixp425_io_bs_init(bus_space_tag_t, void *); +void ixp425_mem_bs_init(bus_space_tag_t, void *); + +uint32_t ixp425_sdram_size(void); + +int ixp425_md_route_interrupt(device_t, device_t, int); +void ixp425_md_attach(device_t); + +#endif /* _IXP425VAR_H_ */ diff --git a/sys/arm/xscale/ixp425/std.avila b/sys/arm/xscale/ixp425/std.avila new file mode 100644 index 000000000000..b69a65ebfa49 --- /dev/null +++ b/sys/arm/xscale/ixp425/std.avila @@ -0,0 +1,6 @@ +#GW2348-4 board configuration +#$FreeBSD$ +include "../xscale/ixp425/std.ixp425" +files "../xscale/ixp425/files.avila" +makeoptions KERNPHYSADDR=0x10200000 +makeoptions KERNVIRTADDR=0xc0200000 diff --git a/sys/arm/xscale/ixp425/std.ixp425 b/sys/arm/xscale/ixp425/std.ixp425 new file mode 100644 index 000000000000..69cc2a148965 --- /dev/null +++ b/sys/arm/xscale/ixp425/std.ixp425 @@ -0,0 +1,6 @@ +#XScale IXP425 generic configuration +#$FreeBSD$ +files "../xscale/ixp425/files.ixp425" +include "../xscale/std.xscale" +cpu CPU_XSCALE_IXP425 +makeoption ARM_BIG_ENDIAN diff --git a/sys/arm/xscale/ixp425/uart_bus_ixp425.c b/sys/arm/xscale/ixp425/uart_bus_ixp425.c new file mode 100644 index 000000000000..a27557afbd98 --- /dev/null +++ b/sys/arm/xscale/ixp425/uart_bus_ixp425.c @@ -0,0 +1,91 @@ +/*- + * Copyright (c) 2006 Kevin Lo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#include "uart_if.h" + +static int uart_ixp425_probe(device_t dev); + +static device_method_t uart_ixp425_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, uart_ixp425_probe), + DEVMETHOD(device_attach, uart_bus_attach), + DEVMETHOD(device_detach, uart_bus_detach), + { 0, 0 } +}; + +static driver_t uart_ixp425_driver = { + uart_driver_name, + uart_ixp425_methods, + sizeof(struct uart_softc), +}; + +extern SLIST_HEAD(uart_devinfo_list, uart_devinfo) uart_sysdevs; +static int +uart_ixp425_probe(device_t dev) +{ + struct uart_softc *sc; + + sc = device_get_softc(dev); + sc->sc_sysdev = SLIST_FIRST(&uart_sysdevs); + sc->sc_class = &uart_ns8250_class; + bcopy(&sc->sc_sysdev->bas, &sc->sc_bas, sizeof(sc->sc_bas)); + /* + * XXX set UART Unit Enable (0x40) AND + * receiver timeout int enable (0x10). + * The first turns on the UART. The second is necessary to get + * interrupts when the FIFO has data but is not full. Note that + * uart_ns8250 carefully avoids touching these bits so we can + * just set them here and proceed. But this is fragile... + */ + bus_space_write_4(&ixp425_a4x_bs_tag, + device_get_unit(dev) == 0 ? IXP425_UART0_VBASE : IXP425_UART1_VBASE, + IXP425_UART_IER, IXP425_UART_IER_UUE | IXP425_UART_IER_RTOIE); + return(uart_bus_probe(dev, 0, IXP425_UART_FREQ, 0, 0)); +} + + +DRIVER_MODULE(uart, ixp, uart_ixp425_driver, uart_devclass, 0, 0); diff --git a/sys/arm/xscale/ixp425/uart_cpu_ixp425.c b/sys/arm/xscale/ixp425/uart_cpu_ixp425.c new file mode 100644 index 000000000000..e691968538c6 --- /dev/null +++ b/sys/arm/xscale/ixp425/uart_cpu_ixp425.c @@ -0,0 +1,67 @@ +/*- + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +bus_space_tag_t uart_bus_space_io; +bus_space_tag_t uart_bus_space_mem; + +int +uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2) +{ + return ((b1->bsh == b2->bsh && b1->bst == b2->bst) ? 1 : 0); +} + +int +uart_cpu_getdev(int devtype, struct uart_devinfo *di) +{ + di->ops = uart_ns8250_ops; + di->bas.chan = 0; + di->bas.bst = &ixp425_a4x_bs_tag; + di->bas.regshft = 0; + di->bas.rclk = IXP425_UART_FREQ; + di->baudrate = 115200; + di->databits = 8; + di->stopbits = 1; + di->parity = UART_PARITY_NONE; + uart_bus_space_io = &ixp425_a4x_bs_tag; + uart_bus_space_mem = NULL; + di->bas.bsh = IXP425_UART0_VBASE; + return (0); +}