From 42c1b001f75679f2193f176c8f53cce62fd41e81 Mon Sep 17 00:00:00 2001 From: Thomas Moestl Date: Wed, 27 Feb 2002 17:41:06 +0000 Subject: [PATCH] Add a driver for the Sun GEM (Gigabit) and ERI (100 Mb/s) PCI ethernet adaptors, ported from NetBSD. --- sys/dev/gem/if_gem.c | 2017 ++++++++++++++++++++++++++++++++++++++ sys/dev/gem/if_gem_pci.c | 192 ++++ sys/dev/gem/if_gemreg.h | 538 ++++++++++ sys/dev/gem/if_gemvar.h | 242 +++++ 4 files changed, 2989 insertions(+) create mode 100644 sys/dev/gem/if_gem.c create mode 100644 sys/dev/gem/if_gem_pci.c create mode 100644 sys/dev/gem/if_gemreg.h create mode 100644 sys/dev/gem/if_gemvar.h diff --git a/sys/dev/gem/if_gem.c b/sys/dev/gem/if_gem.c new file mode 100644 index 000000000000..f19d649734eb --- /dev/null +++ b/sys/dev/gem/if_gem.c @@ -0,0 +1,2017 @@ +/* + * Copyright (C) 2001 Eduardo Horvath. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: gem.c,v 1.9 2001/10/21 20:45:15 thorpej Exp + * + * $FreeBSD$ + */ + +/* + * Driver for Sun GEM ethernet controllers. + */ + +#define GEM_DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + +#define TRIES 10000 + +static void gem_start __P((struct ifnet *)); +static void gem_stop __P((struct ifnet *, int)); +static int gem_ioctl __P((struct ifnet *, u_long, caddr_t)); +static void gem_cddma_callback __P((void *, bus_dma_segment_t *, int, int)); +static void gem_rxdma_callback __P((void *, bus_dma_segment_t *, int, int)); +static void gem_txdma_callback __P((void *, bus_dma_segment_t *, int, int)); +static void gem_tick __P((void *)); +static void gem_watchdog __P((struct ifnet *)); +static void gem_init __P((void *)); +static void gem_init_regs __P((struct gem_softc *sc)); +static int gem_ringsize __P((int sz)); +static int gem_meminit __P((struct gem_softc *)); +static int gem_dmamap_load_mbuf __P((struct gem_softc *, struct mbuf *, + bus_dmamap_callback_t *, struct gem_txjob *, int)); +static void gem_dmamap_unload_mbuf __P((struct gem_softc *, + struct gem_txjob *)); +static void gem_dmamap_commit_mbuf __P((struct gem_softc *, + struct gem_txjob *)); +static void gem_mifinit __P((struct gem_softc *)); +static int gem_bitwait __P((struct gem_softc *sc, bus_addr_t r, + u_int32_t clr, u_int32_t set)); +static int gem_reset_rx __P((struct gem_softc *)); +static int gem_reset_tx __P((struct gem_softc *)); +static int gem_disable_rx __P((struct gem_softc *)); +static int gem_disable_tx __P((struct gem_softc *)); +static void gem_rxdrain __P((struct gem_softc *)); +static int gem_add_rxbuf __P((struct gem_softc *, int)); +static void gem_setladrf __P((struct gem_softc *)); + +struct mbuf *gem_get __P((struct gem_softc *, int, int)); +static void gem_eint __P((struct gem_softc *, u_int)); +static void gem_rint __P((struct gem_softc *)); +static void gem_tint __P((struct gem_softc *)); +#ifdef notyet +static void gem_power __P((int, void *)); +#endif + +devclass_t gem_devclass; +DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); +MODULE_DEPEND(gem, miibus, 1, 1, 1); + +#ifdef GEM_DEBUG +#define DPRINTF(sc, x) if ((sc)->sc_arpcom.ac_if.if_flags & IFF_DEBUG) \ + printf x +#include +#define KTR_GEM KTR_CT2 +#else +#define DPRINTF(sc, x) /* nothing */ +#endif + +#define GEM_NSEGS GEM_NTXSEGS + +/* + * gem_attach: + * + * Attach a Gem interface to the system. + */ +int +gem_attach(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct mii_softc *child; + int i, error; + + /* Make sure the chip is stopped. */ + ifp->if_softc = sc; + gem_reset(sc); + + error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, + BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag); + if (error) + return (error); + + error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, + GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, + &sc->sc_dmatag); + if (error) + goto fail_0; + + error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + sizeof(struct gem_control_data), 1, + sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, + &sc->sc_cdmatag); + if (error) + goto fail_1; + + /* + * Allocate the control data structures, and create and load the + * DMA map for it. + */ + if ((error = bus_dmamem_alloc(sc->sc_cdmatag, + (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { + device_printf(sc->sc_dev, "unable to allocate control data," + " error = %d\n", error); + goto fail_2; + } + + sc->sc_cddma = 0; + if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, + sc->sc_control_data, sizeof(struct gem_control_data), + gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { + device_printf(sc->sc_dev, "unable to load control data DMA " + "map, error = %d\n", error); + goto fail_3; + } + + /* + * Initialize the transmit job descriptors. + */ + STAILQ_INIT(&sc->sc_txfreeq); + STAILQ_INIT(&sc->sc_txdirtyq); + + /* + * Create the transmit buffer DMA maps. + */ + error = ENOMEM; + for (i = 0; i < GEM_TXQUEUELEN; i++) { + struct gem_txsoft *txs; + + txs = &sc->sc_txsoft[i]; + txs->txs_mbuf = NULL; + txs->txs_ndescs = 0; + if ((error = bus_dmamap_create(sc->sc_dmatag, 0, + &txs->txs_dmamap)) != 0) { + device_printf(sc->sc_dev, "unable to create tx DMA map " + "%d, error = %d\n", i, error); + goto fail_4; + } + STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); + } + + /* + * Create the receive buffer DMA maps. + */ + for (i = 0; i < GEM_NRXDESC; i++) { + if ((error = bus_dmamap_create(sc->sc_dmatag, 0, + &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { + device_printf(sc->sc_dev, "unable to create rx DMA map " + "%d, error = %d\n", i, error); + goto fail_5; + } + sc->sc_rxsoft[i].rxs_mbuf = NULL; + } + + + gem_mifinit(sc); + + if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, + gem_mediastatus)) != 0) { + device_printf(sc->sc_dev, "phy probe failed: %d\n", error); + goto fail_5; + } + sc->sc_mii = device_get_softc(sc->sc_miibus); + + /* + * From this point forward, the attachment cannot fail. A failure + * before this point releases all resources that may have been + * allocated. + */ + + /* Announce ourselves. */ + device_printf(sc->sc_dev, "Ethernet address:"); + for (i = 0; i < 6; i++) + printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]); + printf("\n"); + + /* Initialize ifnet structure. */ + ifp->if_softc = sc; + ifp->if_unit = device_get_unit(sc->sc_dev); + ifp->if_name = "gem"; + ifp->if_mtu = ETHERMTU; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_start = gem_start; + ifp->if_ioctl = gem_ioctl; + ifp->if_watchdog = gem_watchdog; + ifp->if_init = gem_init; + ifp->if_output = ether_output; + ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; + /* + * Walk along the list of attached MII devices and + * establish an `MII instance' to `phy number' + * mapping. We'll use this mapping in media change + * requests to determine which phy to use to program + * the MIF configuration register. + */ + for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; + child = LIST_NEXT(child, mii_list)) { + /* + * Note: we support just two PHYs: the built-in + * internal device and an external on the MII + * connector. + */ + if (child->mii_phy > 1 || child->mii_inst > 1) { + device_printf(sc->sc_dev, "cannot accomodate " + "MII device %s at phy %d, instance %d\n", + device_get_name(child->mii_dev), + child->mii_phy, child->mii_inst); + continue; + } + + sc->sc_phys[child->mii_inst] = child->mii_phy; + } + + /* + * Now select and activate the PHY we will use. + * + * The order of preference is External (MDI1), + * Internal (MDI0), Serial Link (no MII). + */ + if (sc->sc_phys[1]) { +#ifdef GEM_DEBUG + printf("using external phy\n"); +#endif + sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; + } else { +#ifdef GEM_DEBUG + printf("using internal phy\n"); +#endif + sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; + } + bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, + sc->sc_mif_config); + /* Attach the interface. */ + ether_ifattach(ifp, ETHER_BPF_SUPPORTED); + +#if notyet + /* + * Add a suspend hook to make sure we come back up after a + * resume. + */ + sc->sc_powerhook = powerhook_establish(gem_power, sc); + if (sc->sc_powerhook == NULL) + device_printf(sc->sc_dev, "WARNING: unable to establish power " + "hook\n"); +#endif + + callout_init(&sc->sc_tick_ch, 0); + return (0); + + /* + * Free any resources we've allocated during the failed attach + * attempt. Do this in reverse order and fall through. + */ +fail_5: + for (i = 0; i < GEM_NRXDESC; i++) { + if (sc->sc_rxsoft[i].rxs_dmamap != NULL) + bus_dmamap_destroy(sc->sc_dmatag, + sc->sc_rxsoft[i].rxs_dmamap); + } +fail_4: + for (i = 0; i < GEM_TXQUEUELEN; i++) { + if (sc->sc_txsoft[i].txs_dmamap != NULL) + bus_dmamap_destroy(sc->sc_dmatag, + sc->sc_txsoft[i].txs_dmamap); + } + bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); +fail_3: + bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, + sc->sc_cddmamap); +fail_2: + bus_dma_tag_destroy(sc->sc_cdmatag); +fail_1: + bus_dma_tag_destroy(sc->sc_dmatag); +fail_0: + bus_dma_tag_destroy(sc->sc_pdmatag); + return (error); +} + +static void +gem_cddma_callback(xsc, segs, nsegs, error) + void *xsc; + bus_dma_segment_t *segs; + int nsegs; + int error; +{ + struct gem_softc *sc = (struct gem_softc *)xsc; + + if (error != 0) + return; + if (nsegs != 1) { + /* can't happen... */ + panic("gem_cddma_callback: bad control buffer segment count"); + } + sc->sc_cddma = segs[0].ds_addr; +} + +static void +gem_rxdma_callback(xsc, segs, nsegs, error) + void *xsc; + bus_dma_segment_t *segs; + int nsegs; + int error; +{ + struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; + + if (error != 0) + return; + if (nsegs != 1) { + /* can't happen... */ + panic("gem_rxdma_callback: bad control buffer segment count"); + } + rxs->rxs_paddr = segs[0].ds_addr; +} + +/* + * This is called multiple times in our version of dmamap_load_mbuf, but should + * be fit for a generic version that only calls it once. + */ +static void +gem_txdma_callback(xsc, segs, nsegs, error) + void *xsc; + bus_dma_segment_t *segs; + int nsegs; + int error; +{ + struct gem_txdma *tx = (struct gem_txdma *)xsc; + int seg; + + tx->txd_error = error; + if (error != 0) + return; + tx->txd_nsegs = nsegs; + + /* + * Initialize the transmit descriptors. + */ + for (seg = 0; seg < nsegs; + seg++, tx->txd_nexttx = GEM_NEXTTX(tx->txd_nexttx)) { + uint64_t flags; + + DPRINTF(tx->txd_sc, ("txdma_cb: mapping seg %d (txd %d), len " + "%lx, addr %#lx (%#lx)\n", seg, tx->txd_nexttx, + segs[seg].ds_len, segs[seg].ds_addr, + GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr))); + CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " + "%lx, addr %#lx (%#lx)", seg, tx->txd_nexttx, + segs[seg].ds_len, segs[seg].ds_addr, + GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr)); + /* + * If this is the first descriptor we're + * enqueueing, set the start of packet flag, + * and the checksum stuff if we want the hardware + * to do it. + */ + tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_addr = + GEM_DMA_WRITE(tx->txd_sc, segs[seg].ds_addr); + flags = segs[seg].ds_len & GEM_TD_BUFSIZE; + if ((tx->txd_flags & GTXD_FIRST) != 0 && seg == 0) { + CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " + "tx %d", seg, tx->txd_nexttx); + flags |= GEM_TD_START_OF_PACKET; + } + if ((tx->txd_flags & GTXD_LAST) != 0 && seg == nsegs - 1) { + CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " + "tx %d", seg, tx->txd_nexttx); + flags |= GEM_TD_END_OF_PACKET; + } + tx->txd_sc->sc_txdescs[tx->txd_nexttx].gd_flags = + GEM_DMA_WRITE(tx->txd_sc, flags); + tx->txd_lasttx = tx->txd_nexttx; + } +} + +static void +gem_tick(arg) + void *arg; +{ + struct gem_softc *sc = arg; + int s; + + s = splnet(); + mii_tick(sc->sc_mii); + splx(s); + + callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); +} + +static int +gem_bitwait(sc, r, clr, set) + struct gem_softc *sc; + bus_addr_t r; + u_int32_t clr; + u_int32_t set; +{ + int i; + u_int32_t reg; + + for (i = TRIES; i--; DELAY(100)) { + reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); + if ((r & clr) == 0 && (r & set) == set) + return (1); + } + return (0); +} + +void +gem_reset(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + int s; + + s = splnet(); + DPRINTF(sc, ("%s: gem_reset\n", device_get_name(sc->sc_dev))); + CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); + gem_reset_rx(sc); + gem_reset_tx(sc); + + /* Do a full reset */ + bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); + if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) + device_printf(sc->sc_dev, "cannot reset device\n"); + splx(s); +} + + +/* + * gem_rxdrain: + * + * Drain the receive queue. + */ +static void +gem_rxdrain(sc) + struct gem_softc *sc; +{ + struct gem_rxsoft *rxs; + int i; + + for (i = 0; i < GEM_NRXDESC; i++) { + rxs = &sc->sc_rxsoft[i]; + if (rxs->rxs_mbuf != NULL) { + bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); + m_freem(rxs->rxs_mbuf); + rxs->rxs_mbuf = NULL; + } + } +} + +/* + * Reset the whole thing. + */ +static void +gem_stop(ifp, disable) + struct ifnet *ifp; + int disable; +{ + struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; + struct gem_txsoft *txs; + + DPRINTF(sc, ("%s: gem_stop\n", device_get_name(sc->sc_dev))); + CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); + + callout_stop(&sc->sc_tick_ch); + + /* XXX - Should we reset these instead? */ + gem_disable_tx(sc); + gem_disable_rx(sc); + + /* + * Release any queued transmit buffers. + */ + while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { + STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); + if (txs->txs_ndescs != 0) { + bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); + if (txs->txs_mbuf != NULL) { + m_freem(txs->txs_mbuf); + txs->txs_mbuf = NULL; + } + } + STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); + } + + if (disable) + gem_rxdrain(sc); + + /* + * Mark the interface down and cancel the watchdog timer. + */ + ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); + ifp->if_timer = 0; +} + +/* + * Reset the receiver + */ +int +gem_reset_rx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + + /* + * Resetting while DMA is in progress can cause a bus hang, so we + * disable DMA first. + */ + gem_disable_rx(sc); + bus_space_write_4(t, h, GEM_RX_CONFIG, 0); + /* Wait till it finishes */ + if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) + device_printf(sc->sc_dev, "cannot disable read dma\n"); + + /* Wait 5ms extra. */ + DELAY(5000); + + /* Finally, reset the ERX */ + bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); + /* Wait till it finishes */ + if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { + device_printf(sc->sc_dev, "cannot reset receiver\n"); + return (1); + } + return (0); +} + + +/* + * Reset the transmitter + */ +static int +gem_reset_tx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + int i; + + /* + * Resetting while DMA is in progress can cause a bus hang, so we + * disable DMA first. + */ + gem_disable_tx(sc); + bus_space_write_4(t, h, GEM_TX_CONFIG, 0); + /* Wait till it finishes */ + if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) + device_printf(sc->sc_dev, "cannot disable read dma\n"); + + /* Wait 5ms extra. */ + DELAY(5000); + + /* Finally, reset the ETX */ + bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); + /* Wait till it finishes */ + for (i = TRIES; i--; DELAY(100)) + if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) + break; + if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { + device_printf(sc->sc_dev, "cannot reset receiver\n"); + return (1); + } + return (0); +} + +/* + * disable receiver. + */ +static int +gem_disable_rx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + u_int32_t cfg; + + /* Flip the enable bit */ + cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); + cfg &= ~GEM_MAC_RX_ENABLE; + bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); + + /* Wait for it to finish */ + return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); +} + +/* + * disable transmitter. + */ +static int +gem_disable_tx(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + u_int32_t cfg; + + /* Flip the enable bit */ + cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); + cfg &= ~GEM_MAC_TX_ENABLE; + bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); + + /* Wait for it to finish */ + return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); +} + +/* + * Initialize interface. + */ +static int +gem_meminit(sc) + struct gem_softc *sc; +{ + struct gem_rxsoft *rxs; + int i, error; + + /* + * Initialize the transmit descriptor ring. + */ + memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); + for (i = 0; i < GEM_NTXDESC; i++) { + sc->sc_txdescs[i].gd_flags = 0; + sc->sc_txdescs[i].gd_addr = 0; + } + GEM_CDTXSYNC(sc, 0, GEM_NTXDESC, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + sc->sc_txfree = GEM_NTXDESC; + sc->sc_txnext = 0; + + /* + * Initialize the receive descriptor and receive job + * descriptor rings. + */ + for (i = 0; i < GEM_NRXDESC; i++) { + rxs = &sc->sc_rxsoft[i]; + if (rxs->rxs_mbuf == NULL) { + if ((error = gem_add_rxbuf(sc, i)) != 0) { + device_printf(sc->sc_dev, "unable to " + "allocate or map rx buffer %d, error = " + "%d\n", i, error); + /* + * XXX Should attempt to run with fewer receive + * XXX buffers instead of just failing. + */ + gem_rxdrain(sc); + return (1); + } + } else + GEM_INIT_RXDESC(sc, i); + } + sc->sc_rxptr = 0; + + return (0); +} + +static int +gem_ringsize(sz) + int sz; +{ + int v = 0; + + switch (sz) { + case 32: + v = GEM_RING_SZ_32; + break; + case 64: + v = GEM_RING_SZ_64; + break; + case 128: + v = GEM_RING_SZ_128; + break; + case 256: + v = GEM_RING_SZ_256; + break; + case 512: + v = GEM_RING_SZ_512; + break; + case 1024: + v = GEM_RING_SZ_1024; + break; + case 2048: + v = GEM_RING_SZ_2048; + break; + case 4096: + v = GEM_RING_SZ_4096; + break; + case 8192: + v = GEM_RING_SZ_8192; + break; + default: + printf("gem: invalid Receive Descriptor ring size\n"); + break; + } + return (v); +} + +/* + * Initialization of interface; set up initialization block + * and transmit/receive descriptor rings. + */ +static void +gem_init(xsc) + void *xsc; +{ + struct gem_softc *sc = (struct gem_softc *)xsc; + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + int s; + u_int32_t v; + + s = splnet(); + + DPRINTF(sc, ("%s: gem_init: calling stop\n", device_get_name(sc->sc_dev))); + CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); + /* + * Initialization sequence. The numbered steps below correspond + * to the sequence outlined in section 6.3.5.1 in the Ethernet + * Channel Engine manual (part of the PCIO manual). + * See also the STP2002-STQ document from Sun Microsystems. + */ + + /* step 1 & 2. Reset the Ethernet Channel */ + gem_stop(&sc->sc_arpcom.ac_if, 0); + gem_reset(sc); + DPRINTF(sc, ("%s: gem_init: restarting\n", device_get_name(sc->sc_dev))); + CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); + + /* Re-initialize the MIF */ + gem_mifinit(sc); + + /* Call MI reset function if any */ + if (sc->sc_hwreset) + (*sc->sc_hwreset)(sc); + + /* step 3. Setup data structures in host memory */ + gem_meminit(sc); + + /* step 4. TX MAC registers & counters */ + gem_init_regs(sc); + /* XXX: VLAN code from NetBSD temporarily removed. */ + bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, + (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); + + /* step 5. RX MAC registers & counters */ + gem_setladrf(sc); + + /* step 6 & 7. Program Descriptor Ring Base Addresses */ + /* NOTE: we use only 32-bit DMA addresses here. */ + bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); + bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); + + bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); + bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); + DPRINTF(sc, ("loading rx ring %lx, tx ring %lx, cddma %lx\n", + GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma)); + CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", + GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); + + /* step 8. Global Configuration & Interrupt Mask */ + bus_space_write_4(t, h, GEM_INTMASK, + ~(GEM_INTR_TX_INTME| + GEM_INTR_TX_EMPTY| + GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| + GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| + GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| + GEM_INTR_BERR)); + bus_space_write_4(t, h, GEM_MAC_RX_MASK, 0); /* XXXX */ + bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ + bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ + + /* step 9. ETX Configuration: use mostly default values */ + + /* Enable DMA */ + v = gem_ringsize(GEM_NTXDESC /*XXX*/); + bus_space_write_4(t, h, GEM_TX_CONFIG, + v|GEM_TX_CONFIG_TXDMA_EN| + ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); + + /* step 10. ERX Configuration */ + + /* Encode Receive Descriptor ring size: four possible values */ + v = gem_ringsize(GEM_NRXDESC /*XXX*/); + + /* Enable DMA */ + bus_space_write_4(t, h, GEM_RX_CONFIG, + v|(GEM_THRSH_1024<sc_dev); + + /* step 12. RX_MAC Configuration Register */ + v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); + v |= GEM_MAC_RX_ENABLE; + bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); + + /* step 14. Issue Transmit Pending command */ + + /* Call MI initialization function if any */ + if (sc->sc_hwinit) + (*sc->sc_hwinit)(sc); + + /* step 15. Give the reciever a swift kick */ + bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); + + /* Start the one second timer. */ + callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); + + ifp->if_flags |= IFF_RUNNING; + ifp->if_flags &= ~IFF_OACTIVE; + ifp->if_timer = 0; + sc->sc_flags = ifp->if_flags; + splx(s); +} + +/* + * XXX: This is really a substitute for bus_dmamap_load_mbuf(), which FreeBSD + * does not yet have, with some adaptions for this driver. + * Some changes are mandated by the fact that multiple maps may needed to map + * a single mbuf. + * It should be removed once generic support is available. + * + * This is derived from NetBSD (syssrc/sys/arch/sparc64/sparc64/machdep.c), for + * a copyright notice see sparc64/sparc64/bus_machdep.c. + * + * Not every error condition is passed to the callback in this version, and the + * callback may be called more than once. + * It also gropes in the entails of the callback arg... + */ +static int +gem_dmamap_load_mbuf(sc, m0, cb, txj, flags) + struct gem_softc *sc; + struct mbuf *m0; + bus_dmamap_callback_t *cb; + struct gem_txjob *txj; + int flags; +{ + struct gem_txdma txd; + struct gem_txsoft *txs; + struct mbuf *m; + void *vaddr; + int error, first = 1, len, totlen; + + if ((m0->m_flags & M_PKTHDR) == 0) + panic("gem_dmamap_load_mbuf: no packet header"); + totlen = m0->m_pkthdr.len; + len = 0; + txd.txd_sc = sc; + txd.txd_nexttx = txj->txj_nexttx; + txj->txj_nsegs = 0; + STAILQ_INIT(&txj->txj_txsq); + m = m0; + while (m != NULL && len < totlen) { + if (m->m_len == 0) + continue; + /* Get a work queue entry. */ + if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { + /* + * Ran out of descriptors, return a value that + * cannot be returned by bus_dmamap_load to notify + * the caller. + */ + error = -1; + goto fail; + } + len += m->m_len; + txd.txd_flags = first ? GTXD_FIRST : 0; + if (m->m_next == NULL || len >= totlen) + txd.txd_flags |= GTXD_LAST; + vaddr = mtod(m, void *); + error = bus_dmamap_load(sc->sc_dmatag, txs->txs_dmamap, vaddr, + m->m_len, cb, &txd, flags); + if (error != 0 || txd.txd_error != 0) + goto fail; + /* Sync the DMA map. */ + bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, + BUS_DMASYNC_PREWRITE); + m = m->m_next; + /* + * Store a pointer to the packet so we can free it later, + * and remember what txdirty will be once the packet is + * done. + */ + txs->txs_mbuf = first ? m0 : NULL; + txs->txs_firstdesc = txj->txj_nexttx; + txs->txs_lastdesc = txd.txd_lasttx; + txs->txs_ndescs = txd.txd_nsegs; + CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " + "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, + txs->txs_ndescs); + STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); + STAILQ_INSERT_TAIL(&txj->txj_txsq, txs, txs_q); + txj->txj_nexttx = txd.txd_nexttx; + txj->txj_nsegs += txd.txd_nsegs; + first = 0; + } + txj->txj_lasttx = txd.txd_lasttx; + return (0); + +fail: + CTR1(KTR_GEM, "gem_dmamap_load_mbuf failed (%d)", error); + gem_dmamap_unload_mbuf(sc, txj); + return (error); +} + +/* + * Unload an mbuf using the txd the information was placed in. + * The tx interrupt code frees the tx segments one by one, because the txd is + * not available any more. + */ +static void +gem_dmamap_unload_mbuf(sc, txj) + struct gem_softc *sc; + struct gem_txjob *txj; +{ + struct gem_txsoft *txs; + + /* Readd the removed descriptors and unload the segments. */ + while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { + bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); + STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); + STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); + } +} + +static void +gem_dmamap_commit_mbuf(sc, txj) + struct gem_softc *sc; + struct gem_txjob *txj; +{ + struct gem_txsoft *txs; + + /* Commit the txjob by transfering the txsoft's to the txdirtyq. */ + while ((txs = STAILQ_FIRST(&txj->txj_txsq)) != NULL) { + STAILQ_REMOVE_HEAD(&txj->txj_txsq, txs_q); + STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); + } +} + +static void +gem_init_regs(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + + /* These regs are not cleared on reset */ + sc->sc_inited = 0; + if (!sc->sc_inited) { + + /* Wooo. Magic values. */ + bus_space_write_4(t, h, GEM_MAC_IPG0, 0); + bus_space_write_4(t, h, GEM_MAC_IPG1, 8); + bus_space_write_4(t, h, GEM_MAC_IPG2, 4); + + bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); + /* Max frame and max burst size */ + bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, + (ifp->if_mtu+18) | (0x2000<<16)/* Burst size */); + bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); + bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); + bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); + /* Dunno.... */ + bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); + bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, + ((sc->sc_arpcom.ac_enaddr[5]<<8)| + sc->sc_arpcom.ac_enaddr[4])&0x3ff); + /* Secondary MAC addr set to 0:0:0:0:0:0 */ + bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); + /* MAC control addr set to 0:1:c2:0:1:80 */ + bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); + bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); + bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); + + /* MAC filter addr set to 0:0:0:0:0:0 */ + bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); + bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); + + bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); + bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); + + sc->sc_inited = 1; + } + + /* Counters need to be zeroed */ + bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); + bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); + bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); + bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); + bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); + + /* Un-pause stuff */ +#if 0 + bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); +#else + bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); +#endif + + /* + * Set the station address. + */ + bus_space_write_4(t, h, GEM_MAC_ADDR0, + (sc->sc_arpcom.ac_enaddr[4]<<8) | sc->sc_arpcom.ac_enaddr[5]); + bus_space_write_4(t, h, GEM_MAC_ADDR1, + (sc->sc_arpcom.ac_enaddr[2]<<8) | sc->sc_arpcom.ac_enaddr[3]); + bus_space_write_4(t, h, GEM_MAC_ADDR2, + (sc->sc_arpcom.ac_enaddr[0]<<8) | sc->sc_arpcom.ac_enaddr[1]); +} + +static void +gem_start(ifp) + struct ifnet *ifp; +{ + struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; + struct mbuf *m0 = NULL, *m; + struct gem_txjob txj; + int firsttx, ofree, seg, ntx, txmfail; + + if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) + return; + + /* + * Remember the previous number of free descriptors and + * the first descriptor we'll use. + */ + ofree = sc->sc_txfree; + firsttx = sc->sc_txnext; + + DPRINTF(sc, ("%s: gem_start: txfree %d, txnext %d\n", + device_get_name(sc->sc_dev), ofree, firsttx)); + CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", + device_get_name(sc->sc_dev), ofree, firsttx); + + txj.txj_nexttx = firsttx; + txj.txj_lasttx = 0; + /* + * Loop through the send queue, setting up transmit descriptors + * until we drain the queue, or use up all available transmit + * descriptors. + */ + txmfail = 0; + for (ntx = 0;; ntx++) { + /* + * Grab a packet off the queue. + */ + IF_DEQUEUE(&ifp->if_snd, m0); + if (m0 == NULL) + break; + m = NULL; + + /* + * Load the DMA map. If this fails, the packet either + * didn't fit in the alloted number of segments, or we were + * short on resources. In this case, we'll copy and try + * again. + */ + txmfail = gem_dmamap_load_mbuf(sc, m0, + gem_txdma_callback, &txj, BUS_DMA_NOWAIT); + if (txmfail == -1) { + IF_PREPEND(&ifp->if_snd, m0); + break; + } + if (txmfail > 0) { + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) { + device_printf(sc->sc_dev, "unable to " + "allocate Tx mbuf\n"); + /* Failed; requeue. */ + IF_PREPEND(&ifp->if_snd, m0); + break; + } + if (m0->m_pkthdr.len > MHLEN) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + device_printf(sc->sc_dev, "unable to " + "allocate Tx cluster\n"); + IF_PREPEND(&ifp->if_snd, m0); + m_freem(m); + break; + } + } + m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); + m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; + txmfail = gem_dmamap_load_mbuf(sc, m, + gem_txdma_callback, &txj, BUS_DMA_NOWAIT); + if (txmfail != 0) { + if (txmfail > 0) { + device_printf(sc->sc_dev, "unable to " + "load Tx buffer, error = %d\n", + txmfail); + } + m_freem(m); + IF_PREPEND(&ifp->if_snd, m0); + break; + } + } + + /* + * Ensure we have enough descriptors free to describe + * the packet. Note, we always reserve one descriptor + * at the end of the ring as a termination point, to + * prevent wrap-around. + */ + if (txj.txj_nsegs > (sc->sc_txfree - 1)) { + /* + * Not enough free descriptors to transmit this + * packet. We haven't committed to anything yet, + * so just unload the DMA map, put the packet + * back on the queue, and punt. Notify the upper + * layer that there are no more slots left. + * + * XXX We could allocate an mbuf and copy, but + * XXX it is worth it? + */ + ifp->if_flags |= IFF_OACTIVE; + gem_dmamap_unload_mbuf(sc, &txj); + if (m != NULL) + m_freem(m); + IF_PREPEND(&ifp->if_snd, m0); + break; + } + + if (m != NULL) + m_freem(m0); + + /* + * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. + */ + +#ifdef GEM_DEBUG + if (ifp->if_flags & IFF_DEBUG) { + printf(" gem_start %p transmit chain:\n", + STAILQ_FIRST(&txj.txj_txsq)); + for (seg = sc->sc_txnext;; seg = GEM_NEXTTX(seg)) { + printf("descriptor %d:\t", seg); + printf("gd_flags: 0x%016llx\t", (long long) + GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_flags)); + printf("gd_addr: 0x%016llx\n", (long long) + GEM_DMA_READ(sc, sc->sc_txdescs[seg].gd_addr)); + if (seg == txj.txj_lasttx) + break; + } + } +#endif + + /* Sync the descriptors we're using. */ + GEM_CDTXSYNC(sc, sc->sc_txnext, txj.txj_nsegs, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + /* Advance the tx pointer. */ + sc->sc_txfree -= txj.txj_nsegs; + sc->sc_txnext = txj.txj_nexttx; + + gem_dmamap_commit_mbuf(sc, &txj); + } + + if (txmfail == -1 || sc->sc_txfree == 0) { + ifp->if_flags |= IFF_OACTIVE; + /* No more slots left; notify upper layer. */ + } + + if (ntx > 0) { + DPRINTF(sc, ("%s: packets enqueued, IC on %d, OWN on %d\n", + device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx)); + CTR3(KTR_GEM, "%s: packets enqueued, IC on %d, OWN on %d", + device_get_name(sc->sc_dev), txj.txj_lasttx, firsttx); + /* + * The entire packet chain is set up. + * Kick the transmitter. + */ + DPRINTF(sc, ("%s: gem_start: kicking tx %d\n", + device_get_name(sc->sc_dev), txj.txj_nexttx)); + CTR3(KTR_GEM, "%s: gem_start: kicking tx %d=%d", + device_get_name(sc->sc_dev), txj.txj_nexttx, + sc->sc_txnext); + bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, + sc->sc_txnext); + + /* Set a watchdog timer in case the chip flakes out. */ + ifp->if_timer = 5; + DPRINTF(sc, ("%s: gem_start: watchdog %d\n", + device_get_name(sc->sc_dev), ifp->if_timer)); + CTR2(KTR_GEM, "%s: gem_start: watchdog %d", + device_get_name(sc->sc_dev), ifp->if_timer); + } +} + +/* + * Transmit interrupt. + */ +static void +gem_tint(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mac = sc->sc_h; + struct gem_txsoft *txs; + int txlast; + + + DPRINTF(sc, ("%s: gem_tint\n", device_get_name(sc->sc_dev))); + CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); + + /* + * Unload collision counters + */ + ifp->if_collisions += + bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + + bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + + bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + + bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); + + /* + * then clear the hardware counters. + */ + bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); + bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); + bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); + bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); + + /* + * Go through our Tx list and free mbufs for those + * frames that have been transmitted. + */ + while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { + GEM_CDTXSYNC(sc, txs->txs_lastdesc, + txs->txs_ndescs, + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + +#ifdef GEM_DEBUG + if (ifp->if_flags & IFF_DEBUG) { + int i; + printf(" txsoft %p transmit chain:\n", txs); + for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { + printf("descriptor %d: ", i); + printf("gd_flags: 0x%016llx\t", (long long) + GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); + printf("gd_addr: 0x%016llx\n", (long long) + GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); + if (i == txs->txs_lastdesc) + break; + } + } +#endif + + /* + * In theory, we could harveast some descriptors before + * the ring is empty, but that's a bit complicated. + * + * GEM_TX_COMPLETION points to the last descriptor + * processed +1. + */ + txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); + DPRINTF(sc, + ("gem_tint: txs->txs_lastdesc = %d, txlast = %d\n", + txs->txs_lastdesc, txlast)); + CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " + "txs->txs_lastdesc = %d, txlast = %d", + txs->txs_firstdesc, txs->txs_lastdesc, txlast); + if (txs->txs_firstdesc <= txs->txs_lastdesc) { + if ((txlast >= txs->txs_firstdesc) && + (txlast <= txs->txs_lastdesc)) + break; + } else { + /* Ick -- this command wraps */ + if ((txlast >= txs->txs_firstdesc) || + (txlast <= txs->txs_lastdesc)) + break; + } + + DPRINTF(sc, ("gem_tint: releasing a desc\n")); + CTR0(KTR_GEM, "gem_tint: releasing a desc"); + STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); + + sc->sc_txfree += txs->txs_ndescs; + + bus_dmamap_sync(sc->sc_dmatag, txs->txs_dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->sc_dmatag, txs->txs_dmamap); + if (txs->txs_mbuf != NULL) { + m_freem(txs->txs_mbuf); + txs->txs_mbuf = NULL; + } + + STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); + + ifp->if_opackets++; + } + + DPRINTF(sc, ("gem_tint: GEM_TX_STATE_MACHINE %x " + "GEM_TX_DATA_PTR %llx " + "GEM_TX_COMPLETION %x\n", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), + ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, + GEM_TX_DATA_PTR_HI) << 32) | + bus_space_read_4(sc->sc_bustag, sc->sc_h, + GEM_TX_DATA_PTR_LO), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION))); + CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " + "GEM_TX_DATA_PTR %llx " + "GEM_TX_COMPLETION %x", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), + ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, + GEM_TX_DATA_PTR_HI) << 32) | + bus_space_read_4(sc->sc_bustag, sc->sc_h, + GEM_TX_DATA_PTR_LO), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); + + if (STAILQ_FIRST(&sc->sc_txdirtyq) == NULL) + ifp->if_timer = 0; + + + DPRINTF(sc, ("%s: gem_tint: watchdog %d\n", + device_get_name(sc->sc_dev), ifp->if_timer)); + CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", + device_get_name(sc->sc_dev), ifp->if_timer); + + /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ + ifp->if_flags &= ~IFF_OACTIVE; + gem_start(ifp); +} + +/* + * Receive interrupt. + */ +static void +gem_rint(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + struct ether_header *eh; + struct gem_rxsoft *rxs; + struct mbuf *m; + u_int64_t rxstat; + int i, len; + + DPRINTF(sc, ("%s: gem_rint\n", device_get_name(sc->sc_dev))); + CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); + /* + * XXXX Read the lastrx only once at the top for speed. + */ + DPRINTF(sc, ("gem_rint: sc->rxptr %d, complete %d\n", + sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); + CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", + sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); + for (i = sc->sc_rxptr; i != bus_space_read_4(t, h, GEM_RX_COMPLETION); + i = GEM_NEXTRX(i)) { + rxs = &sc->sc_rxsoft[i]; + + GEM_CDRXSYNC(sc, i, + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + + rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); + + if (rxstat & GEM_RD_OWN) { + printf("gem_rint: completed descriptor " + "still owned %d\n", i); + /* + * We have processed all of the receive buffers. + */ + break; + } + + if (rxstat & GEM_RD_BAD_CRC) { + device_printf(sc->sc_dev, "receive error: CRC error\n"); + GEM_INIT_RXDESC(sc, i); + continue; + } + + bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, + BUS_DMASYNC_POSTREAD); +#ifdef GEM_DEBUG + if (ifp->if_flags & IFF_DEBUG) { + printf(" rxsoft %p descriptor %d: ", rxs, i); + printf("gd_flags: 0x%016llx\t", (long long) + GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); + printf("gd_addr: 0x%016llx\n", (long long) + GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); + } +#endif + + /* + * No errors; receive the packet. Note the Gem + * includes the CRC with every packet. + */ + len = GEM_RD_BUFLEN(rxstat); + + /* + * Allocate a new mbuf cluster. If that fails, we are + * out of memory, and must drop the packet and recycle + * the buffer that's already attached to this descriptor. + */ + m = rxs->rxs_mbuf; + if (gem_add_rxbuf(sc, i) != 0) { + ifp->if_ierrors++; + GEM_INIT_RXDESC(sc, i); + bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, + BUS_DMASYNC_PREREAD); + continue; + } + m->m_data += 2; /* We're already off by two */ + + ifp->if_ipackets++; + eh = mtod(m, struct ether_header *); + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; + m_adj(m, sizeof(struct ether_header)); + + /* Pass it on. */ + ether_input(ifp, eh, m); + } + + /* Update the receive pointer. */ + sc->sc_rxptr = i; + bus_space_write_4(t, h, GEM_RX_KICK, i); + + DPRINTF(sc, ("gem_rint: done sc->rxptr %d, complete %d\n", + sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION))); + CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", + sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); + +} + + +/* + * gem_add_rxbuf: + * + * Add a receive buffer to the indicated descriptor. + */ +static int +gem_add_rxbuf(sc, idx) + struct gem_softc *sc; + int idx; +{ + struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; + struct mbuf *m; + int error; + + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) + return (ENOBUFS); + + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_freem(m); + return (ENOBUFS); + } + +#ifdef GEM_DEBUG + /* bzero the packet to check dma */ + memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); +#endif + + if (rxs->rxs_mbuf != NULL) + bus_dmamap_unload(sc->sc_dmatag, rxs->rxs_dmamap); + + rxs->rxs_mbuf = m; + + error = bus_dmamap_load(sc->sc_dmatag, rxs->rxs_dmamap, + m->m_ext.ext_buf, m->m_ext.ext_size, gem_rxdma_callback, rxs, + BUS_DMA_NOWAIT); + if (error != 0 || rxs->rxs_paddr == 0) { + device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " + "%d\n", idx, error); + panic("gem_add_rxbuf"); /* XXX */ + } + + bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); + + GEM_INIT_RXDESC(sc, idx); + + return (0); +} + + +static void +gem_eint(sc, status) + struct gem_softc *sc; + u_int status; +{ + + if ((status & GEM_INTR_MIF) != 0) { + device_printf(sc->sc_dev, "XXXlink status changed\n"); + return; + } + + device_printf(sc->sc_dev, "status=%x\n", status); +} + + +void +gem_intr(v) + void *v; +{ + struct gem_softc *sc = (struct gem_softc *)v; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t seb = sc->sc_h; + u_int32_t status; + + status = bus_space_read_4(t, seb, GEM_STATUS); + DPRINTF(sc, ("%s: gem_intr: cplt %x, status %x\n", + device_get_name(sc->sc_dev), (status>>19), + (u_int)status)); + CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", + device_get_name(sc->sc_dev), (status>>19), + (u_int)status); + + if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) + gem_eint(sc, status); + + if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) + gem_tint(sc); + + if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) + gem_rint(sc); + + /* We should eventually do more than just print out error stats. */ + if (status & GEM_INTR_TX_MAC) { + int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); + if (txstat & ~GEM_MAC_TX_XMIT_DONE) + printf("MAC tx fault, status %x\n", txstat); + } + if (status & GEM_INTR_RX_MAC) { + int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); + if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) + printf("MAC rx fault, status %x\n", rxstat); + } +} + + +static void +gem_watchdog(ifp) + struct ifnet *ifp; +{ + struct gem_softc *sc = ifp->if_softc; + + DPRINTF(sc, ("gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " + "GEM_MAC_RX_CONFIG %x\n", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG))); + CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " + "GEM_MAC_RX_CONFIG %x", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); + CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " + "GEM_MAC_TX_CONFIG %x", + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), + bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); + + device_printf(sc->sc_dev, "device timeout\n"); + ++ifp->if_oerrors; + + /* Try to get more packets going. */ + gem_start(ifp); +} + +/* + * Initialize the MII Management Interface + */ +static void +gem_mifinit(sc) + struct gem_softc *sc; +{ + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mif = sc->sc_h; + + /* Configure the MIF in frame mode */ + sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); + sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; + bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); +} + +/* + * MII interface + * + * The GEM MII interface supports at least three different operating modes: + * + * Bitbang mode is implemented using data, clock and output enable registers. + * + * Frame mode is implemented by loading a complete frame into the frame + * register and polling the valid bit for completion. + * + * Polling mode uses the frame register but completion is indicated by + * an interrupt. + * + */ +int +gem_mii_readreg(dev, phy, reg) + device_t dev; + int phy, reg; +{ + struct gem_softc *sc = device_get_softc(dev); + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mif = sc->sc_h; + int n; + u_int32_t v; + +#ifdef GEM_DEBUG_PHY + printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); +#endif + +#if 0 + /* Select the desired PHY in the MIF configuration register */ + v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); + /* Clear PHY select bit */ + v &= ~GEM_MIF_CONFIG_PHY_SEL; + if (phy == GEM_PHYAD_EXTERNAL) + /* Set PHY select bit to get at external device */ + v |= GEM_MIF_CONFIG_PHY_SEL; + bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); +#endif + + /* Construct the frame command */ + v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | + GEM_MIF_FRAME_READ; + + bus_space_write_4(t, mif, GEM_MIF_FRAME, v); + for (n = 0; n < 100; n++) { + DELAY(1); + v = bus_space_read_4(t, mif, GEM_MIF_FRAME); + if (v & GEM_MIF_FRAME_TA0) + return (v & GEM_MIF_FRAME_DATA); + } + + device_printf(sc->sc_dev, "mii_read timeout\n"); + return (0); +} + +int +gem_mii_writereg(dev, phy, reg, val) + device_t dev; + int phy, reg, val; +{ + struct gem_softc *sc = device_get_softc(dev); + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mif = sc->sc_h; + int n; + u_int32_t v; + +#ifdef GEM_DEBUG_PHY + printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); +#endif + +#if 0 + /* Select the desired PHY in the MIF configuration register */ + v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); + /* Clear PHY select bit */ + v &= ~GEM_MIF_CONFIG_PHY_SEL; + if (phy == GEM_PHYAD_EXTERNAL) + /* Set PHY select bit to get at external device */ + v |= GEM_MIF_CONFIG_PHY_SEL; + bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); +#endif + /* Construct the frame command */ + v = GEM_MIF_FRAME_WRITE | + (phy << GEM_MIF_PHY_SHIFT) | + (reg << GEM_MIF_REG_SHIFT) | + (val & GEM_MIF_FRAME_DATA); + + bus_space_write_4(t, mif, GEM_MIF_FRAME, v); + for (n = 0; n < 100; n++) { + DELAY(1); + v = bus_space_read_4(t, mif, GEM_MIF_FRAME); + if (v & GEM_MIF_FRAME_TA0) + return (1); + } + + device_printf(sc->sc_dev, "mii_write timeout\n"); + return (0); +} + +void +gem_mii_statchg(dev) + device_t dev; +{ + struct gem_softc *sc = device_get_softc(dev); +#ifdef GEM_DEBUG + int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); +#endif + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t mac = sc->sc_h; + u_int32_t v; + +#ifdef GEM_DEBUG + if (sc->sc_debug) + printf("gem_mii_statchg: status change: phy = %d\n", + sc->sc_phys[instance]); +#endif + + /* Set tx full duplex options */ + bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); + DELAY(10000); /* reg must be cleared and delay before changing. */ + v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| + GEM_MAC_TX_ENABLE; + if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { + v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; + } + bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); + + /* XIF Configuration */ + /* We should really calculate all this rather than rely on defaults */ + v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); + v = GEM_MAC_XIF_LINK_LED; + v |= GEM_MAC_XIF_TX_MII_ENA; + /* If an external transceiver is connected, enable its MII drivers */ + sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); + if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { + /* External MII needs echo disable if half duplex. */ + if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) + /* turn on full duplex LED */ + v |= GEM_MAC_XIF_FDPLX_LED; + else + /* half duplex -- disable echo */ + v |= GEM_MAC_XIF_ECHO_DISABL; + } else { + /* Internal MII needs buf enable */ + v |= GEM_MAC_XIF_MII_BUF_ENA; + } + bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); +} + +int +gem_mediachange(ifp) + struct ifnet *ifp; +{ + struct gem_softc *sc = ifp->if_softc; + + /* XXX Add support for serial media. */ + + return (mii_mediachg(sc->sc_mii)); +} + +void +gem_mediastatus(ifp, ifmr) + struct ifnet *ifp; + struct ifmediareq *ifmr; +{ + struct gem_softc *sc = ifp->if_softc; + + if ((ifp->if_flags & IFF_UP) == 0) + return; + + mii_pollstat(sc->sc_mii); + ifmr->ifm_active = sc->sc_mii->mii_media_active; + ifmr->ifm_status = sc->sc_mii->mii_media_status; +} + +/* + * Process an ioctl request. + */ +static int +gem_ioctl(ifp, cmd, data) + struct ifnet *ifp; + u_long cmd; + caddr_t data; +{ + struct gem_softc *sc = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *)data; + int s, error = 0; + + switch (cmd) { + case SIOCSIFADDR: + case SIOCGIFADDR: + case SIOCSIFMTU: + error = ether_ioctl(ifp, cmd, data); + break; + case SIOCSIFFLAGS: + if (ifp->if_flags & IFF_UP) { + if ((sc->sc_flags ^ ifp->if_flags) == IFF_PROMISC) + gem_setladrf(sc); + else + gem_init(sc); + } else { + if (ifp->if_flags & IFF_RUNNING) + gem_stop(ifp, 0); + } + sc->sc_flags = ifp->if_flags; + error = 0; + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + gem_setladrf(sc); + error = 0; + break; + case SIOCGIFMEDIA: + case SIOCSIFMEDIA: + error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); + break; + default: + error = ENOTTY; + break; + } + + /* Try to get things going again */ + if (ifp->if_flags & IFF_UP) + gem_start(ifp); + splx(s); + return (error); +} + +/* + * Set up the logical address filter. + */ +static void +gem_setladrf(sc) + struct gem_softc *sc; +{ + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct ifmultiaddr *inm; + struct sockaddr_dl *sdl; + bus_space_tag_t t = sc->sc_bustag; + bus_space_handle_t h = sc->sc_h; + u_char *cp; + u_int32_t crc; + u_int32_t hash[16]; + u_int32_t v; + int len; + + /* Clear hash table */ + memset(hash, 0, sizeof(hash)); + + /* Get current RX configuration */ + v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); + + if ((ifp->if_flags & IFF_PROMISC) != 0) { + /* Turn on promiscuous mode; turn off the hash filter */ + v |= GEM_MAC_RX_PROMISCUOUS; + v &= ~GEM_MAC_RX_HASH_FILTER; + ; + goto chipit; + } + if ((ifp->if_flags & IFF_ALLMULTI) != 0) { + hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; + ifp->if_flags |= IFF_ALLMULTI; + goto chipit; + } + + /* Turn off promiscuous mode; turn on the hash filter */ + v &= ~GEM_MAC_RX_PROMISCUOUS; + v |= GEM_MAC_RX_HASH_FILTER; + + /* + * Set up multicast address filter by passing all multicast addresses + * through a crc generator, and then using the high order 6 bits as an + * index into the 256 bit logical address filter. The high order bit + * selects the word, while the rest of the bits select the bit within + * the word. + */ + + TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) { + if (inm->ifma_addr->sa_family != AF_LINK) + continue; + sdl = (struct sockaddr_dl *)inm->ifma_addr; + cp = LLADDR(sdl); + crc = 0xffffffff; + for (len = sdl->sdl_alen; --len >= 0;) { + int octet = *cp++; + int i; + +#define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ + for (i = 0; i < 8; i++) { + if ((crc & 1) ^ (octet & 1)) { + crc >>= 1; + crc ^= MC_POLY_LE; + } else { + crc >>= 1; + } + octet >>= 1; + } + } + /* Just want the 8 most significant bits. */ + crc >>= 24; + + /* Set the corresponding bit in the filter. */ + hash[crc >> 4] |= 1 << (crc & 0xf); + } + +chipit: + /* Now load the hash table into the chip */ + bus_space_write_4(t, h, GEM_MAC_HASH0, hash[0]); + bus_space_write_4(t, h, GEM_MAC_HASH1, hash[1]); + bus_space_write_4(t, h, GEM_MAC_HASH2, hash[2]); + bus_space_write_4(t, h, GEM_MAC_HASH3, hash[3]); + bus_space_write_4(t, h, GEM_MAC_HASH4, hash[4]); + bus_space_write_4(t, h, GEM_MAC_HASH5, hash[5]); + bus_space_write_4(t, h, GEM_MAC_HASH6, hash[6]); + bus_space_write_4(t, h, GEM_MAC_HASH7, hash[7]); + bus_space_write_4(t, h, GEM_MAC_HASH8, hash[8]); + bus_space_write_4(t, h, GEM_MAC_HASH9, hash[9]); + bus_space_write_4(t, h, GEM_MAC_HASH10, hash[10]); + bus_space_write_4(t, h, GEM_MAC_HASH11, hash[11]); + bus_space_write_4(t, h, GEM_MAC_HASH12, hash[12]); + bus_space_write_4(t, h, GEM_MAC_HASH13, hash[13]); + bus_space_write_4(t, h, GEM_MAC_HASH14, hash[14]); + bus_space_write_4(t, h, GEM_MAC_HASH15, hash[15]); + + bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); +} + +#if notyet + +/* + * gem_power: + * + * Power management (suspend/resume) hook. + */ +void +static gem_power(why, arg) + int why; + void *arg; +{ + struct gem_softc *sc = arg; + struct ifnet *ifp = &sc->sc_arpcom.ac_if; + int s; + + s = splnet(); + switch (why) { + case PWR_SUSPEND: + case PWR_STANDBY: + gem_stop(ifp, 1); + if (sc->sc_power != NULL) + (*sc->sc_power)(sc, why); + break; + case PWR_RESUME: + if (ifp->if_flags & IFF_UP) { + if (sc->sc_power != NULL) + (*sc->sc_power)(sc, why); + gem_init(ifp); + } + break; + case PWR_SOFTSUSPEND: + case PWR_SOFTSTANDBY: + case PWR_SOFTRESUME: + break; + } + splx(s); +} +#endif diff --git a/sys/dev/gem/if_gem_pci.c b/sys/dev/gem/if_gem_pci.c new file mode 100644 index 000000000000..9be2fac74571 --- /dev/null +++ b/sys/dev/gem/if_gem_pci.c @@ -0,0 +1,192 @@ +/* + * Copyright (C) 2001 Eduardo Horvath. + * All rights reserved. + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: if_gem_pci.c,v 1.7 2001/10/18 15:09:15 thorpej Exp + * + * $FreeBSD$ + */ + +/* + * PCI bindings for Sun GEM ethernet controllers. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include +#include + +#include +#include + +#include "miibus_if.h" + +struct gem_pci_softc { + struct gem_softc gsc_gem; /* GEM device */ + struct resource *gsc_sres; + int gsc_srid; + struct resource *gsc_ires; + int gsc_irid; + void *gsc_ih; +}; + +static int gem_pci_probe __P((device_t)); +static int gem_pci_attach __P((device_t)); + + +static device_method_t gem_pci_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, gem_pci_probe), + DEVMETHOD(device_attach, gem_pci_attach), + + /* bus interface */ + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_driver_added, bus_generic_driver_added), + + /* MII interface */ + DEVMETHOD(miibus_readreg, gem_mii_readreg), + DEVMETHOD(miibus_writereg, gem_mii_writereg), + DEVMETHOD(miibus_statchg, gem_mii_statchg), + + { 0, 0 } +}; + +static driver_t gem_pci_driver = { + "gem", + gem_pci_methods, + sizeof(struct gem_pci_softc) +}; + + +DRIVER_MODULE(if_gem, pci, gem_pci_driver, gem_devclass, 0, 0); + +struct gem_pci_dev { + u_int32_t gpd_devid; + char *gpd_desc; +} gem_pci_devlist[] = { + { 0x1101108e, "Sun ERI 10/100 Ethernet Adaptor" }, + { 0x2bad108e, "Sun GEM Gigabit Ethernet Adaptor" }, + { 0x0021106b, "Apple GMAC Ethernet Adaptor" }, + { 0x0024106b, "Apple GMAC2 Ethernet Adaptor" }, + { 0, NULL } +}; + +/* + * Attach routines need to be split out to different bus-specific files. + */ +static int +gem_pci_probe(dev) + device_t dev; +{ + int i; + u_int32_t devid; + + devid = pci_get_devid(dev); + for (i = 0; gem_pci_devlist[i].gpd_desc != NULL; i++) { + if (devid == gem_pci_devlist[i].gpd_devid) { + device_set_desc(dev, gem_pci_devlist[i].gpd_desc); + return (0); + } + } + + return (ENXIO); +} + +static int +gem_pci_attach(dev) + device_t dev; +{ + struct gem_pci_softc *gsc = device_get_softc(dev); + struct gem_softc *sc = &gsc->gsc_gem; + + sc->sc_dev = dev; + sc->sc_pci = 1; /* XXX */ + + gsc->gsc_srid = PCI_GEM_BASEADDR; + gsc->gsc_sres = bus_alloc_resource(dev, SYS_RES_MEMORY, &gsc->gsc_srid, + 0, ~0, 1, RF_ACTIVE); + if (gsc->gsc_sres == NULL) { + device_printf(dev, "failed to allocate bus space resource\n"); + return (ENXIO); + } + + gsc->gsc_irid = 0; + gsc->gsc_ires = bus_alloc_resource(dev, SYS_RES_IRQ, &gsc->gsc_irid, 0, + ~0, 1, RF_SHAREABLE | RF_ACTIVE); + if (gsc->gsc_ires == NULL) { + device_printf(dev, "failed to allocate interrupt resource\n"); + goto fail_sres; + } + + sc->sc_bustag = rman_get_bustag(gsc->gsc_sres); + sc->sc_h = rman_get_bushandle(gsc->gsc_sres); + + /* All platform that this driver is used on must provide this. */ + OF_getetheraddr(dev, sc->sc_arpcom.ac_enaddr); + + /* + * call the main configure + */ + if (gem_attach(sc) != 0) { + device_printf(dev, "could not be configured\n"); + goto fail_ires; + } + + if (bus_setup_intr(dev, gsc->gsc_ires, INTR_TYPE_NET, gem_intr, sc, + &gsc->gsc_ih) != 0) { + device_printf(dev, "failed to set up interrupt\n"); + goto fail_ires; + } + return (0); + +fail_ires: + bus_release_resource(dev, SYS_RES_IRQ, gsc->gsc_irid, gsc->gsc_ires); +fail_sres: + bus_release_resource(dev, SYS_RES_MEMORY, gsc->gsc_srid, gsc->gsc_sres); + return (ENXIO); +} diff --git a/sys/dev/gem/if_gemreg.h b/sys/dev/gem/if_gemreg.h new file mode 100644 index 000000000000..c0f84efdccb1 --- /dev/null +++ b/sys/dev/gem/if_gemreg.h @@ -0,0 +1,538 @@ +/* + * Copyright (C) 2001 Eduardo Horvath. + * All rights reserved. + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: gemreg.h,v 1.2 2001/10/18 03:33:33 thorpej Exp + * + * $FreeBSD$ + */ + +#ifndef _IF_GEMREG_H +#define _IF_GEMREG_H + +/* Register definitions for Sun GEM gigabit ethernet */ + +#define GEM_SEB_STATE 0x0000 /* SEB state reg, R/O */ +#define GEM_CONFIG 0x0004 /* config reg */ +#define GEM_STATUS 0x000c /* status reg */ +/* Note: Reading the status reg clears bits 0-6 */ +#define GEM_INTMASK 0x0010 +#define GEM_INTACK 0x0014 /* Interrupt acknowledge, W/O */ +#define GEM_STATUS_ALIAS 0x001c +/* This is the same as the GEM_STATUS reg but reading it does not clear bits. */ +#define GEM_ERROR_STATUS 0x1000 /* PCI error status R/C */ +#define GEM_ERROR_MASK 0x1004 +#define GEM_BIF_CONFIG 0x1008 /* BIF config reg */ +#define GEM_BIF_DIAG 0x100c +#define GEM_RESET 0x1010 /* Software reset register */ + + +/* Bits in GEM_SEB register */ +#define GEM_SEB_ARB 0x000000002 /* Arbitration status */ +#define GEM_SEB_RXWON 0x000000004 + + +/* Bits in GEM_CONFIG register */ +#define GEM_CONFIG_BURST_64 0x000000000 /* 0->infininte, 1->64KB */ +#define GEM_CONFIG_BURST_INF 0x000000001 /* 0->infininte, 1->64KB */ +#define GEM_CONFIG_TXDMA_LIMIT 0x00000003e +#define GEM_CONFIG_RXDMA_LIMIT 0x0000007c0 + +#define GEM_CONFIG_TXDMA_LIMIT_SHIFT 1 +#define GEM_CONFIG_RXDMA_LIMIT_SHIFT 6 + + +/* Top part of GEM_STATUS has TX completion information */ +#define GEM_STATUS_TX_COMPL 0xfff800000 /* TX completion reg. */ + + +/* Interrupt bits, for both the GEM_STATUS and GEM_INTMASK regs. */ +#define GEM_INTR_TX_INTME 0x000000001 /* Frame w/INTME bit set sent */ +#define GEM_INTR_TX_EMPTY 0x000000002 /* TX ring empty */ +#define GEM_INTR_TX_DONE 0x000000004 /* TX complete */ +#define GEM_INTR_RX_DONE 0x000000010 /* Got a packet */ +#define GEM_INTR_RX_NOBUF 0x000000020 +#define GEM_INTR_RX_TAG_ERR 0x000000040 +#define GEM_INTR_PCS 0x000002000 +#define GEM_INTR_TX_MAC 0x000004000 +#define GEM_INTR_RX_MAC 0x000008000 +#define GEM_INTR_MAC_CONTROL 0x000010000 /* MAC control interrupt */ +#define GEM_INTR_MIF 0x000020000 +#define GEM_INTR_BERR 0x000040000 /* Bus error interrupt */ +#define GEM_INTR_BITS "\177\020" \ + "b\0INTME\0b\1TXEMPTY\0b\2TXDONE\0" \ + "b\4RXDONE\0b\5RXNOBUF\0b\6RX_TAG_ERR\0" \ + "b\15PCS\0b\16TXMAC\0b\17RXMAC\0" \ + "b\20MAC_CONTROL\0b\21MIF\0b\22BERR\0\0" \ + + + +/* GEM_ERROR_STATUS and GEM_ERROR_MASK PCI error bits */ +#define GEM_ERROR_STAT_BADACK 0x000000001 /* No ACK64# */ +#define GEM_ERROR_STAT_DTRTO 0x000000002 /* Delayed xaction timeout */ +#define GEM_ERROR_STAT_OTHERS 0x000000004 + + +/* GEM_BIF_CONFIG register bits */ +#define GEM_BIF_CONFIG_SLOWCLK 0x000000001 /* Parity error timing */ +#define GEM_BIF_CONFIG_HOST_64 0x000000002 /* 64-bit host */ +#define GEM_BIF_CONFIG_B64D_DIS 0x000000004 /* no 64-bit data cycle */ +#define GEM_BIF_CONFIG_M66EN 0x000000008 + + +/* GEM_RESET register bits -- TX and RX self clear when complete. */ +#define GEM_RESET_TX 0x000000001 /* Reset TX half */ +#define GEM_RESET_RX 0x000000002 /* Reset RX half */ +#define GEM_RESET_RSTOUT 0x000000004 /* Force PCI RSTOUT# */ + + +/* GEM TX DMA registers */ +#define GEM_TX_KICK 0x2000 /* Write last valid desc + 1 */ +#define GEM_TX_CONFIG 0x2004 +#define GEM_TX_RING_PTR_LO 0x2008 +#define GEM_TX_RING_PTR_HI 0x200c + +#define GEM_TX_FIFO_WR_PTR 0x2014 /* FIFO write pointer */ +#define GEM_TX_FIFO_SDWR_PTR 0x2018 /* FIFO shadow write pointer */ +#define GEM_TX_FIFO_RD_PTR 0x201c /* FIFO read pointer */ +#define GEM_TX_FIFO_SDRD_PTR 0x2020 /* FIFO shadow read pointer */ +#define GEM_TX_FIFO_PKT_CNT 0x2024 /* FIFO packet counter */ + +#define GEM_TX_STATE_MACHINE 0x2028 /* ETX state machine reg */ +#define GEM_TX_DATA_PTR_LO 0x2030 +#define GEM_TX_DATA_PTR_HI 0x2034 + +#define GEM_TX_COMPLETION 0x2100 +#define GEM_TX_FIFO_ADDRESS 0x2104 +#define GEM_TX_FIFO_TAG 0x2108 +#define GEM_TX_FIFO_DATA_LO 0x210c +#define GEM_TX_FIFO_DATA_HI_T1 0x2110 +#define GEM_TX_FIFO_DATA_HI_T0 0x2114 +#define GEM_TX_FIFO_SIZE 0x2118 +#define GEM_TX_DEBUG 0x3028 + + +/* GEM_TX_CONFIG register bits. */ +#define GEM_TX_CONFIG_TXDMA_EN 0x00000001 /* TX DMA enable */ +#define GEM_TX_CONFIG_TXRING_SZ 0x0000001e /* TX ring size */ +#define GEM_TX_CONFIG_TXFIFO_TH 0x001ffc00 /* TX fifo threshold */ +#define GEM_TX_CONFIG_PACED 0x00200000 /* TX_all_int modifier */ + +#define GEM_RING_SZ_32 (0<<1) /* 32 descriptors */ +#define GEM_RING_SZ_64 (1<<1) +#define GEM_RING_SZ_128 (2<<1) +#define GEM_RING_SZ_256 (3<<1) +#define GEM_RING_SZ_512 (4<<1) +#define GEM_RING_SZ_1024 (5<<1) +#define GEM_RING_SZ_2048 (6<<1) +#define GEM_RING_SZ_4096 (7<<1) +#define GEM_RING_SZ_8192 (8<<1) + + +/* GEM_TX_COMPLETION register bits */ +#define GEM_TX_COMPLETION_MASK 0x00001fff /* # of last descriptor */ + + +/* GEM RX DMA registers */ +#define GEM_RX_CONFIG 0x4000 +#define GEM_RX_RING_PTR_LO 0x4004 /* 64-bits unaligned GAK! */ +#define GEM_RX_RING_PTR_HI 0x4008 /* 64-bits unaligned GAK! */ + +#define GEM_RX_FIFO_WR_PTR 0x400c /* FIFO write pointer */ +#define GEM_RX_FIFO_SDWR_PTR 0x4010 /* FIFO shadow write pointer */ +#define GEM_RX_FIFO_RD_PTR 0x4014 /* FIFO read pointer */ +#define GEM_RX_FIFO_PKT_CNT 0x4018 /* FIFO packet counter */ + +#define GEM_RX_STATE_MACHINE 0x401c /* ERX state machine reg */ +#define GEM_RX_PAUSE_THRESH 0x4020 + +#define GEM_RX_DATA_PTR_LO 0x4024 /* ERX state machine reg */ +#define GEM_RX_DATA_PTR_HI 0x4028 /* Damn thing is unaligned */ + +#define GEM_RX_KICK 0x4100 /* Write last valid desc + 1 */ +#define GEM_RX_COMPLETION 0x4104 /* First pending desc */ +#define GEM_RX_BLANKING 0x4108 /* Interrupt blanking reg */ + +#define GEM_RX_FIFO_ADDRESS 0x410c +#define GEM_RX_FIFO_TAG 0x4110 +#define GEM_RX_FIFO_DATA_LO 0x4114 +#define GEM_RX_FIFO_DATA_HI_T1 0x4118 +#define GEM_RX_FIFO_DATA_HI_T0 0x411c +#define GEM_RX_FIFO_SIZE 0x4120 + + +/* GEM_RX_CONFIG register bits. */ +#define GEM_RX_CONFIG_RXDMA_EN 0x00000001 /* RX DMA enable */ +#define GEM_RX_CONFIG_RXRING_SZ 0x0000001e /* RX ring size */ +#define GEM_RX_CONFIG_BATCH_DIS 0x00000020 /* desc batching disable */ +#define GEM_RX_CONFIG_FBOFF 0x00001c00 /* first byte offset */ +#define GEM_RX_CONFIG_CXM_START 0x000fe000 /* checksum start offset */ +#define GEM_RX_CONFIG_FIFO_THRS 0x07000000 /* fifo threshold size */ + +#define GEM_THRSH_64 0 +#define GEM_THRSH_128 1 +#define GEM_THRSH_256 2 +#define GEM_THRSH_512 3 +#define GEM_THRSH_1024 4 +#define GEM_THRSH_2048 5 + +#define GEM_RX_CONFIG_FIFO_THRS_SHIFT 24 +#define GEM_RX_CONFIG_FBOFF_SHFT 10 +#define GEM_RX_CONFIG_CXM_START_SHFT 13 + + +/* GEM_RX_PAUSE_THRESH register bits -- sizes in multiples of 64 bytes */ +#define GEM_RX_PTH_XOFF_THRESH 0x000001ff +#define GEM_RX_PTH_XON_THRESH 0x07fc0000 + + +/* GEM_RX_BLANKING register bits */ +#define GEM_RX_BLANKING_PACKETS 0x000001ff /* Delay intr for x packets */ +#define GEM_RX_BLANKING_TIME 0x03fc0000 /* Delay intr for x ticks */ +/* One tick is 1048 PCI clocs, or 16us at 66MHz */ + + +/* GEM_MAC registers */ +#define GEM_MAC_TXRESET 0x6000 /* Store 1, cleared when done */ +#define GEM_MAC_RXRESET 0x6004 /* ditto */ +#define GEM_MAC_SEND_PAUSE_CMD 0x6008 +#define GEM_MAC_TX_STATUS 0x6010 +#define GEM_MAC_RX_STATUS 0x6014 +#define GEM_MAC_CONTROL_STATUS 0x6018 /* MAC control status reg */ +#define GEM_MAC_TX_MASK 0x6020 /* TX MAC mask register */ +#define GEM_MAC_RX_MASK 0x6024 +#define GEM_MAC_CONTROL_MASK 0x6028 +#define GEM_MAC_TX_CONFIG 0x6030 +#define GEM_MAC_RX_CONFIG 0x6034 +#define GEM_MAC_CONTROL_CONFIG 0x6038 +#define GEM_MAC_XIF_CONFIG 0x603c +#define GEM_MAC_IPG0 0x6040 /* inter packet gap 0 */ +#define GEM_MAC_IPG1 0x6044 /* inter packet gap 1 */ +#define GEM_MAC_IPG2 0x6048 /* inter packet gap 2 */ +#define GEM_MAC_SLOT_TIME 0x604c +#define GEM_MAC_MAC_MIN_FRAME 0x6050 +#define GEM_MAC_MAC_MAX_FRAME 0x6054 +#define GEM_MAC_PREAMBLE_LEN 0x6058 +#define GEM_MAC_JAM_SIZE 0x605c +#define GEM_MAC_ATTEMPT_LIMIT 0x6060 +#define GEM_MAC_CONTROL_TYPE 0x6064 + +#define GEM_MAC_ADDR0 0x6080 /* Normal MAC address 0 */ +#define GEM_MAC_ADDR1 0x6084 +#define GEM_MAC_ADDR2 0x6088 +#define GEM_MAC_ADDR3 0x608c /* Alternate MAC address 0 */ +#define GEM_MAC_ADDR4 0x6090 +#define GEM_MAC_ADDR5 0x6094 +#define GEM_MAC_ADDR6 0x6098 /* Control MAC address 0 */ +#define GEM_MAC_ADDR7 0x609c +#define GEM_MAC_ADDR8 0x60a0 + +#define GEM_MAC_ADDR_FILTER0 0x60a4 +#define GEM_MAC_ADDR_FILTER1 0x60a8 +#define GEM_MAC_ADDR_FILTER2 0x60ac +#define GEM_MAC_ADR_FLT_MASK1_2 0x60b0 /* Address filter mask 1,2 */ +#define GEM_MAC_ADR_FLT_MASK0 0x60b4 /* Address filter mask 0 reg */ + +#define GEM_MAC_HASH0 0x60c0 /* Hash table 0 */ +#define GEM_MAC_HASH1 0x60c4 +#define GEM_MAC_HASH2 0x60c8 +#define GEM_MAC_HASH3 0x60cc +#define GEM_MAC_HASH4 0x60d0 +#define GEM_MAC_HASH5 0x60d4 +#define GEM_MAC_HASH6 0x60d8 +#define GEM_MAC_HASH7 0x60dc +#define GEM_MAC_HASH8 0x60e0 +#define GEM_MAC_HASH9 0x60e4 +#define GEM_MAC_HASH10 0x60e8 +#define GEM_MAC_HASH11 0x60ec +#define GEM_MAC_HASH12 0x60f0 +#define GEM_MAC_HASH13 0x60f4 +#define GEM_MAC_HASH14 0x60f8 +#define GEM_MAC_HASH15 0x60fc + +#define GEM_MAC_NORM_COLL_CNT 0x6100 /* Normal collision counter */ +#define GEM_MAC_FIRST_COLL_CNT 0x6104 /* 1st successful collision cntr */ +#define GEM_MAC_EXCESS_COLL_CNT 0x6108 /* Excess collision counter */ +#define GEM_MAC_LATE_COLL_CNT 0x610c /* Late collision counter */ +#define GEM_MAC_DEFER_TMR_CNT 0x6110 /* defer timer counter */ +#define GEM_MAC_PEAK_ATTEMPTS 0x6114 +#define GEM_MAC_RX_FRAME_COUNT 0x6118 +#define GEM_MAC_RX_LEN_ERR_CNT 0x611c +#define GEM_MAC_RX_ALIGN_ERR 0x6120 +#define GEM_MAC_RX_CRC_ERR_CNT 0x6124 +#define GEM_MAC_RX_CODE_VIOL 0x6128 +#define GEM_MAC_RANDOM_SEED 0x6130 +#define GEM_MAC_MAC_STATE 0x6134 /* MAC sstate machine reg */ + + +/* GEM_MAC_SEND_PAUSE_CMD register bits */ +#define GEM_MAC_PAUSE_CMD_TIME 0x0000ffff +#define GEM_MAC_PAUSE_CMD_SEND 0x00010000 + + +/* GEM_MAC_TX_STATUS and _MASK register bits */ +#define GEM_MAC_TX_XMIT_DONE 0x00000001 +#define GEM_MAC_TX_UNDERRUN 0x00000002 +#define GEM_MAC_TX_PKT_TOO_LONG 0x00000004 +#define GEM_MAC_TX_NCC_EXP 0x00000008 /* Normal collision cnt exp */ +#define GEM_MAC_TX_ECC_EXP 0x00000010 +#define GEM_MAC_TX_LCC_EXP 0x00000020 +#define GEM_MAC_TX_FCC_EXP 0x00000040 +#define GEM_MAC_TX_DEFER_EXP 0x00000080 +#define GEM_MAC_TX_PEAK_EXP 0x00000100 + + +/* GEM_MAC_RX_STATUS and _MASK register bits */ +#define GEM_MAC_RX_DONE 0x00000001 +#define GEM_MAC_RX_OVERFLOW 0x00000002 +#define GEM_MAC_RX_FRAME_CNT 0x00000004 +#define GEM_MAC_RX_ALIGN_EXP 0x00000008 +#define GEM_MAC_RX_CRC_EXP 0x00000010 +#define GEM_MAC_RX_LEN_EXP 0x00000020 +#define GEM_MAC_RX_CVI_EXP 0x00000040 /* Code violation */ + + +/* GEM_MAC_CONTROL_STATUS and GEM_MAC_CONTROL_MASK register bits */ +#define GEM_MAC_PAUSED 0x00000001 /* Pause received */ +#define GEM_MAC_PAUSE 0x00000002 /* enter pause state */ +#define GEM_MAC_RESUME 0x00000004 /* exit pause state */ +#define GEM_MAC_PAUSE_TIME 0xffff0000 + +/* GEM_MAC_XIF_CONFIG register bits */ +#define GEM_MAC_XIF_TX_MII_ENA 0x00000001 /* Enable XIF output drivers */ +#define GEM_MAC_XIF_MII_LOOPBK 0x00000002 /* Enable MII loopback mode */ +#define GEM_MAC_XIF_ECHO_DISABL 0x00000004 /* Disable echo */ +#define GEM_MAC_XIF_MII_MODE 0x00000008 /* Select GMII/MII mode */ +#define GEM_MAC_XIF_MII_BUF_ENA 0x00000010 /* Enable MII recv buffers */ +#define GEM_MAC_XIF_LINK_LED 0x00000020 /* force link LED active */ +#define GEM_MAC_XIF_FDPLX_LED 0x00000040 /* force FDPLX LED active */ + +/* GEM_MAC_TX_CONFIG register bits */ +#define GEM_MAC_TX_ENABLE 0x00000001 /* TX enable */ +#define GEM_MAC_TX_IGN_CARRIER 0x00000002 /* Ignore carrier sense */ +#define GEM_MAC_TX_IGN_COLLIS 0x00000004 /* ignore collitions */ +#define GEM_MAC_TX_ENA_IPG0 0x00000008 /* extend Rx-to-TX IPG */ +#define GEM_MAC_TX_NGU 0x00000010 /* Never give up */ +#define GEM_MAC_TX_NGU_LIMIT 0x00000020 /* Never give up limit */ +#define GEM_MAC_TX_NO_BACKOFF 0x00000040 +#define GEM_MAC_TX_SLOWDOWN 0x00000080 +#define GEM_MAC_TX_NO_FCS 0x00000100 /* no FCS will be generated */ +#define GEM_MAC_TX_CARR_EXTEND 0x00000200 /* Ena TX Carrier Extension */ +/* Carrier Extension is required for half duplex Gbps operation */ + + +/* GEM_MAC_RX_CONFIG register bits */ +#define GEM_MAC_RX_ENABLE 0x00000001 /* RX enable */ +#define GEM_MAC_RX_STRIP_PAD 0x00000002 /* strip pad bytes */ +#define GEM_MAC_RX_STRIP_CRC 0x00000004 +#define GEM_MAC_RX_PROMISCUOUS 0x00000008 /* promiscuous mode */ +#define GEM_MAC_RX_PROMISC_GRP 0x00000010 /* promiscuous group mode */ +#define GEM_MAC_RX_HASH_FILTER 0x00000020 /* enable hash filter */ +#define GEM_MAC_RX_ADDR_FILTER 0x00000040 /* enable address filter */ +#define GEM_MAC_RX_ERRCHK_DIS 0x00000080 /* disable error checking */ +#define GEM_MAC_RX_CARR_EXTEND 0x00000100 /* Ena RX Carrier Extension */ +/* + * Carrier Extension enables reception of packet bursts generated by + * senders with carrier extension enabled. + */ + + +/* GEM_MAC_CONTROL_CONFIG bits */ +#define GEM_MAC_CC_TX_PAUSE 0x00000001 /* send pause enabled */ +#define GEM_MAC_CC_RX_PAUSE 0x00000002 /* receive pause enabled */ +#define GEM_MAC_CC_PASS_PAUSE 0x00000004 /* pass pause up */ + + +/* GEM MIF registers */ +/* Bit bang registers use low bit only */ +#define GEM_MIF_BB_CLOCK 0x6200 /* bit bang clock */ +#define GEM_MIF_BB_DATA 0x6204 /* bit bang data */ +#define GEM_MIF_BB_OUTPUT_ENAB 0x6208 +#define GEM_MIF_FRAME 0x620c /* MIF frame - ctl and data */ +#define GEM_MIF_CONFIG 0x6210 +#define GEM_MIF_INTERRUPT_MASK 0x6214 +#define GEM_MIF_BASIC_STATUS 0x6218 +#define GEM_MIF_STATE_MACHINE 0x621c + + +/* GEM_MIF_FRAME bits */ +#define GEM_MIF_FRAME_DATA 0x0000ffff +#define GEM_MIF_FRAME_TA0 0x00010000 /* TA bit, 1 for completion */ +#define GEM_MIF_FRAME_TA1 0x00020000 /* TA bits */ +#define GEM_MIF_FRAME_REG_ADDR 0x007c0000 +#define GEM_MIF_FRAME_PHY_ADDR 0x0f800000 /* phy address, should be 0 */ +#define GEM_MIF_FRAME_OP 0x30000000 /* operation - write/read */ +#define GEM_MIF_FRAME_START 0xc0000000 /* START bits */ + +#define GEM_MIF_FRAME_READ 0x60020000 +#define GEM_MIF_FRAME_WRITE 0x50020000 + +#define GEM_MIF_REG_SHIFT 18 +#define GEM_MIF_PHY_SHIFT 23 + + +/* GEM_MIF_CONFIG register bits */ +#define GEM_MIF_CONFIG_PHY_SEL 0x00000001 /* PHY select */ +#define GEM_MIF_CONFIG_POLL_ENA 0x00000002 /* poll enable */ +#define GEM_MIF_CONFIG_BB_ENA 0x00000004 /* bit bang enable */ +#define GEM_MIF_CONFIG_REG_ADR 0x000000f8 /* poll register address */ +#define GEM_MIF_CONFIG_MDI0 0x00000100 /* MDIO_0 Data/MDIO_0 atached */ +#define GEM_MIF_CONFIG_MDI1 0x00000200 /* MDIO_1 Data/MDIO_1 atached */ +#define GEM_MIF_CONFIG_PHY_ADR 0x00007c00 /* poll PHY address */ +/* MDI0 is onboard tranciever MID1 is external, PHYAD for both is 0 */ + + +/* GEM_MIF_BASIC_STATUS and GEM_MIF_INTERRUPT_MASK bits */ +#define GEM_MIF_STATUS 0x0000ffff +#define GEM_MIF_BASIC 0xffff0000 +/* + * The Basic part is the last value read in the POLL field of the config + * register. + * + * The status part indicates the bits that have changed. + */ + + +/* The GEM PCS/Serial link register. */ +#define GEM_MII_CONTROL 0x9000 +#define GEM_MII_STATUS 0x9004 +#define GEM_MII_ANAR 0x9008 /* MII advertisement reg */ +#define GEM_MII_ANLPAR 0x900c /* LP ability reg */ +#define GEM_MII_CONFIG 0x9010 +#define GEM_MII_STATE_MACHINE 0x9014 +#define GEM_MII_INTERRUP_STATUS 0x9018 +#define GEM_MII_DATAPATH_MODE 0x9050 +#define GEM_MII_SLINK_CONTROL 0x9054 /* Serial link control */ +#define GEM_MII_OUTPUT_SELECT 0x9058 +#define GEM_MII_SLINK_STATUS 0x905c /* serial link status */ + + +/* GEM_MII_CONTROL bits */ +/* + * DO NOT TOUCH THIS REGISTER ON ERI -- IT HARD HANGS. + */ +#define GEM_MII_CONTROL_RESET 0x00008000 +#define GEM_MII_CONTROL_LOOPBK 0x00004000 /* 10-bit i/f loopback */ +#define GEM_MII_CONTROL_1000M 0x00002000 /* speed select, always 0 */ +#define GEM_MII_CONTROL_AUTONEG 0x00001000 /* auto negotiation enabled */ +#define GEM_MII_CONTROL_POWERDN 0x00000800 +#define GEM_MII_CONTROL_ISOLATE 0x00000400 /* isolate phy from mii */ +#define GEM_MII_CONTROL_RAN 0x00000200 /* restart auto negotioation */ +#define GEM_MII_CONTROL_FDUPLEX 0x00000100 /* full duplex, always 0 */ +#define GEM_MII_CONTROL_COL_TST 0x00000080 /* collision test */ + + +/* GEM_MII_STATUS reg */ +#define GEM_MII_STATUS_GB_FDX 0x00000400 /* can perform GBit FDX */ +#define GEM_MII_STATUS_GB_HDX 0x00000200 /* can perform GBit HDX */ +#define GEM_MII_STATUS_ANEG_CPT 0x00000020 /* auto negotiate compete */ +#define GEM_MII_STATUS_REM_FLT 0x00000010 /* remote fault detected */ +#define GEM_MII_STATUS_ACFG 0x00000008 /* can auto negotiate */ +#define GEM_MII_STATUS_LINK_STS 0x00000004 /* link status */ +#define GEM_MII_STATUS_JABBER 0x00000002 /* jabber condition detected */ +#define GEM_MII_STATUS_EXTCAP 0x00000001 /* extended register capability */ + + +/* GEM_MII_ANAR and GEM_MII_ANLAR reg bits */ +#define GEM_MII_ANEG_NP 0x00008000 /* next page bit */ +#define GEM_MII_ANEG_ACK 0x00004000 /* ack reception of */ + /* Link Partner Capability */ +#define GEM_MII_ANEG_RF 0x00003000 /* advertise remote fault cap */ +#define GEM_MII_ANEG_ASYM_PAUSE 0x00000100 /* asymmetric pause */ +#define GEM_MII_ANEG_SYM_PAUSE 0x00000080 /* symmetric pause */ +#define GEM_MII_ANEG_HLF_DUPLX 0x00000040 +#define GEM_MII_ANEG_FUL_DUPLX 0x00000020 + + +/* GEM_MII_CONFIG reg */ +#define GEM_MII_CONFIG_TIMER 0x0000001c /* link monitor timer values */ +#define GEM_MII_CONFIG_ENABLE 0x00000001 /* Enable PCS */ + + +/* GEM_MII_DATAPATH_MODE reg */ +#define GEM_MII_DATAPATH_SERIAL 0x00000001 /* Serial link */ +#define GEM_MII_DATAPATH_SERDES 0x00000002 /* Use PCS via 10bit interfac */ +#define GEM_MII_DATAPATH_MII 0x00000004 /* Use MII, not PCS */ +#define GEM_MII_DATAPATH_MIIOUT 0x00000008 /* enable serial output on GMII */ + + +/* GEM_MII_SLINK_CONTROL reg */ +#define GEM_MII_SLINK_LOOPBACK 0x00000001 /* enable loopback at sl */ +#define GEM_MII_SLINK_EN_SYNC_D 0x00000002 /* enable sync detection */ +#define GEM_MII_SLINK_LOCK_REF 0x00000004 /* lock reference clock */ +#define GEM_MII_SLINK_EMPHASIS 0x00000008 /* enable emphasis */ +#define GEM_MII_SLINK_SELFTEST 0x000001c0 +#define GEM_MII_SLINK_POWER_OFF 0x00000200 /* Power down serial link */ + + +/* GEM_MII_SLINK_STATUS reg */ +#define GEM_MII_SLINK_TEST 0x00000000 /* undergoing test */ +#define GEM_MII_SLINK_LOCKED 0x00000001 /* waiting 500us lockrefn */ +#define GEM_MII_SLINK_COMMA 0x00000002 /* waiting for comma detect */ +#define GEM_MII_SLINK_SYNC 0x00000003 /* recv data synchronized */ + + +/* Wired GEM PHY addresses */ +#define GEM_PHYAD_INTERNAL 1 +#define GEM_PHYAD_EXTERNAL 0 + +/* + * GEM descriptor table structures. + */ +struct gem_desc { + uint64_t gd_flags; + uint64_t gd_addr; +}; + +/* Transmit flags */ +#define GEM_TD_BUFSIZE 0x0000000000007fffLL +#define GEM_TD_CXSUM_START 0x00000000001f8000LL /* Cxsum start offset */ +#define GEM_TD_CXSUM_STUFF 0x000000001fe00000LL /* Cxsum stuff offset */ +#define GEM_TD_CXSUM_ENABLE 0x0000000020000000LL /* Cxsum generation enable */ +#define GEM_TD_END_OF_PACKET 0x0000000040000000LL +#define GEM_TD_START_OF_PACKET 0x0000000080000000LL +#define GEM_TD_INTERRUPT_ME 0x0000000100000000LL /* Interrupt me now */ +#define GEM_TD_NO_CRC 0x0000000200000000LL /* do not insert crc */ +/* + * Only need to set GEM_TD_CXSUM_ENABLE, GEM_TD_CXSUM_STUFF, + * GEM_TD_CXSUM_START, and GEM_TD_INTERRUPT_ME in 1st descriptor of a group. + */ + +/* Receive flags */ +#define GEM_RD_CHECKSUM 0x000000000000ffffLL +#define GEM_RD_BUFSIZE 0x000000007fff0000LL +#define GEM_RD_OWN 0x0000000080000000LL /* 1 - owned by h/w */ +#define GEM_RD_HASHVAL 0x0ffff00000000000LL +#define GEM_RD_HASH_PASS 0x1000000000000000LL /* passed hash filter */ +#define GEM_RD_ALTERNATE_MAC 0x2000000000000000LL /* Alternate MAC adrs */ +#define GEM_RD_BAD_CRC 0x4000000000000000LL + +#define GEM_RD_BUFSHIFT 16 +#define GEM_RD_BUFLEN(x) (((x)&GEM_RD_BUFSIZE)>>GEM_RD_BUFSHIFT) + +/* PCI support */ +#define PCI_GEM_BASEADDR 0x10 + +#endif diff --git a/sys/dev/gem/if_gemvar.h b/sys/dev/gem/if_gemvar.h new file mode 100644 index 000000000000..40093503c0ed --- /dev/null +++ b/sys/dev/gem/if_gemvar.h @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2001 Eduardo Horvath. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: gemvar.h,v 1.5 2001/10/18 15:19:22 thorpej Exp + * + * $FreeBSD$ + */ + +#ifndef _IF_GEMVAR_H +#define _IF_GEMVAR_H + + +#include +#include + +/* + * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver. + */ + +/* + * Transmit descriptor list size. This is arbitrary, but allocate + * enough descriptors for 64 pending transmissions and 16 segments + * per packet. + */ +#define GEM_NTXSEGS 16 + +#define GEM_TXQUEUELEN 64 +#define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) +#define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) +#define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) + +/* + * Receive descriptor list size. We have one Rx buffer per incoming + * packet, so this logic is a little simpler. + */ +#define GEM_NRXDESC 128 +#define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) +#define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) + +/* + * Control structures are DMA'd to the GEM chip. We allocate them in + * a single clump that maps to a single DMA segment to make several things + * easier. + */ +struct gem_control_data { + /* + * The transmit descriptors. + */ + struct gem_desc gcd_txdescs[GEM_NTXDESC]; + + /* + * The receive descriptors. + */ + struct gem_desc gcd_rxdescs[GEM_NRXDESC]; +}; + +#define GEM_CDOFF(x) offsetof(struct gem_control_data, x) +#define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) +#define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) + +/* + * Software state for transmit job mbufs (may be elements of mbuf chains). + */ +struct gem_txsoft { + struct mbuf *txs_mbuf; /* head of our mbuf chain */ + bus_dmamap_t txs_dmamap; /* our DMA map */ + int txs_firstdesc; /* first descriptor in packet */ + int txs_lastdesc; /* last descriptor in packet */ + int txs_ndescs; /* number of descriptors */ + STAILQ_ENTRY(gem_txsoft) txs_q; +}; + +STAILQ_HEAD(gem_txsq, gem_txsoft); + +/* Argument structure for busdma callback */ +struct gem_txdma { + struct gem_softc *txd_sc; + int txd_nexttx; + int txd_lasttx; + int txd_nsegs; + int txd_flags; +#define GTXD_FIRST 1 +#define GTXD_LAST 2 + int txd_error; +}; + +/* Transmit job descriptor */ +struct gem_txjob { + int txj_nexttx; + int txj_lasttx; + int txj_nsegs; + STAILQ_HEAD(, gem_txsoft) txj_txsq; +}; + +/* + * Software state for receive jobs. + */ +struct gem_rxsoft { + struct mbuf *rxs_mbuf; /* head of our mbuf chain */ + bus_dmamap_t rxs_dmamap; /* our DMA map */ + bus_addr_t rxs_paddr; /* physical address of the segment */ +}; + +/* + * Software state per device. + */ +struct gem_softc { + struct arpcom sc_arpcom; /* arp common data */ + device_t sc_miibus; + struct mii_data *sc_mii; /* MII media control */ + device_t sc_dev; /* generic device information */ + struct callout sc_tick_ch; /* tick callout */ + + /* The following bus handles are to be provided by the bus front-end */ + bus_space_tag_t sc_bustag; /* bus tag */ + bus_dma_tag_t sc_pdmatag; /* parent bus dma tag */ + bus_dma_tag_t sc_dmatag; /* bus dma tag */ + bus_dma_tag_t sc_cdmatag; /* control data bus dma tag */ + bus_dmamap_t sc_dmamap; /* bus dma handle */ + bus_space_handle_t sc_h; /* bus space handle for all regs */ + + int sc_phys[2]; /* MII instance -> PHY map */ + + int sc_mif_config; /* Selected MII reg setting */ + + int sc_pci; /* XXXXX -- PCI buses are LE. */ + + /* + * Ring buffer DMA stuff. + */ + bus_dma_segment_t sc_cdseg; /* control data memory */ + int sc_cdnseg; /* number of segments */ + bus_dmamap_t sc_cddmamap; /* control data DMA map */ + bus_addr_t sc_cddma; + + /* + * Software state for transmit and receive descriptors. + */ + struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; + struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; + + /* + * Control data structures. + */ + struct gem_control_data *sc_control_data; +#define sc_txdescs sc_control_data->gcd_txdescs +#define sc_rxdescs sc_control_data->gcd_rxdescs + + int sc_txfree; /* number of free Tx descriptors */ + int sc_txnext; /* next ready Tx descriptor */ + + struct gem_txsq sc_txfreeq; /* free Tx descsofts */ + struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */ + + int sc_rxptr; /* next ready RX descriptor/descsoft */ + + /* ========== */ + int sc_inited; + int sc_debug; + int sc_flags; + + /* Special hardware hooks */ + void (*sc_hwreset) __P((struct gem_softc *)); + void (*sc_hwinit) __P((struct gem_softc *)); +}; + +#define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v)) +#define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v)) + +#define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) +#define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) + +#define GEM_CDSPADDR(sc) ((sc)->sc_cddma + GEM_CDSPOFF) + +#define GEM_CDTXSYNC(sc, x, n, ops) \ + bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)); \ + +#define GEM_CDRXSYNC(sc, x, ops) \ + bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)) + +#define GEM_CDSPSYNC(sc, ops) \ + bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)) + +#define GEM_INIT_RXDESC(sc, x) \ +do { \ + struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ + struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ + struct mbuf *__m = __rxs->rxs_mbuf; \ + \ + __m->m_data = __m->m_ext.ext_buf; \ + __rxd->gd_addr = \ + GEM_DMA_WRITE((sc), __rxs->rxs_paddr); \ + __rxd->gd_flags = \ + GEM_DMA_WRITE((sc), \ + (((__m->m_ext.ext_size)<