2008-06-20 19:28:33 +00:00
|
|
|
/*-
|
2010-07-30 17:51:22 +00:00
|
|
|
* Copyright (c) 2007 Sepherosa Ziehau. All rights reserved.
|
2008-06-20 19:28:33 +00:00
|
|
|
*
|
|
|
|
* This code is derived from software contributed to The DragonFly Project
|
|
|
|
* by Sepherosa Ziehau <sepherosa@gmail.com>
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* 3. Neither the name of The DragonFly Project nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific, prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
|
|
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
|
|
|
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
|
|
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
|
|
|
|
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
|
|
|
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $
|
|
|
|
*/
|
|
|
|
|
2009-11-20 20:40:34 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2008-06-20 19:28:33 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/endian.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/rman.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_dl.h>
|
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/bpf.h>
|
|
|
|
#include <net/if_arp.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
#include <net/if_vlan_var.h>
|
|
|
|
|
|
|
|
#include <machine/bus.h>
|
|
|
|
|
2010-10-15 15:00:30 +00:00
|
|
|
#include <dev/mii/mii.h>
|
2008-06-20 19:28:33 +00:00
|
|
|
#include <dev/mii/miivar.h>
|
|
|
|
|
|
|
|
#include <dev/pci/pcireg.h>
|
|
|
|
#include <dev/pci/pcivar.h>
|
|
|
|
|
|
|
|
#include <dev/et/if_etreg.h>
|
|
|
|
#include <dev/et/if_etvar.h>
|
|
|
|
|
|
|
|
#include "miibus_if.h"
|
|
|
|
|
|
|
|
MODULE_DEPEND(et, pci, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(et, ether, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(et, miibus, 1, 1, 1);
|
|
|
|
|
2009-11-19 21:45:06 +00:00
|
|
|
/* Tunables. */
|
|
|
|
static int msi_disable = 0;
|
2009-11-19 22:59:52 +00:00
|
|
|
TUNABLE_INT("hw.et.msi_disable", &msi_disable);
|
2009-11-19 21:45:06 +00:00
|
|
|
|
2009-11-20 20:33:59 +00:00
|
|
|
#define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
|
|
|
|
|
2008-06-20 19:28:33 +00:00
|
|
|
static int et_probe(device_t);
|
|
|
|
static int et_attach(device_t);
|
|
|
|
static int et_detach(device_t);
|
|
|
|
static int et_shutdown(device_t);
|
2011-12-05 22:22:39 +00:00
|
|
|
static int et_suspend(device_t);
|
|
|
|
static int et_resume(device_t);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
static int et_miibus_readreg(device_t, int, int);
|
|
|
|
static int et_miibus_writereg(device_t, int, int, int);
|
|
|
|
static void et_miibus_statchg(device_t);
|
|
|
|
|
|
|
|
static void et_init_locked(struct et_softc *);
|
|
|
|
static void et_init(void *);
|
|
|
|
static int et_ioctl(struct ifnet *, u_long, caddr_t);
|
|
|
|
static void et_start_locked(struct ifnet *);
|
|
|
|
static void et_start(struct ifnet *);
|
|
|
|
static void et_watchdog(struct et_softc *);
|
|
|
|
static int et_ifmedia_upd_locked(struct ifnet *);
|
|
|
|
static int et_ifmedia_upd(struct ifnet *);
|
|
|
|
static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *);
|
|
|
|
|
|
|
|
static void et_add_sysctls(struct et_softc *);
|
|
|
|
static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS);
|
|
|
|
static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS);
|
|
|
|
|
|
|
|
static void et_intr(void *);
|
|
|
|
static void et_enable_intrs(struct et_softc *, uint32_t);
|
|
|
|
static void et_disable_intrs(struct et_softc *);
|
|
|
|
static void et_rxeof(struct et_softc *);
|
|
|
|
static void et_txeof(struct et_softc *);
|
|
|
|
|
|
|
|
static int et_dma_alloc(device_t);
|
|
|
|
static void et_dma_free(device_t);
|
|
|
|
static int et_dma_mem_create(device_t, bus_size_t, bus_dma_tag_t *,
|
|
|
|
void **, bus_addr_t *, bus_dmamap_t *);
|
|
|
|
static void et_dma_mem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
|
|
|
|
static int et_dma_mbuf_create(device_t);
|
|
|
|
static void et_dma_mbuf_destroy(device_t, int, const int[]);
|
|
|
|
static void et_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
|
|
|
|
static void et_dma_buf_addr(void *, bus_dma_segment_t *, int,
|
|
|
|
bus_size_t, int);
|
|
|
|
static int et_init_tx_ring(struct et_softc *);
|
|
|
|
static int et_init_rx_ring(struct et_softc *);
|
|
|
|
static void et_free_tx_ring(struct et_softc *);
|
|
|
|
static void et_free_rx_ring(struct et_softc *);
|
|
|
|
static int et_encap(struct et_softc *, struct mbuf **);
|
|
|
|
static int et_newbuf(struct et_rxbuf_data *, int, int, int);
|
|
|
|
static int et_newbuf_cluster(struct et_rxbuf_data *, int, int);
|
|
|
|
static int et_newbuf_hdr(struct et_rxbuf_data *, int, int);
|
|
|
|
|
|
|
|
static void et_stop(struct et_softc *);
|
|
|
|
static int et_chip_init(struct et_softc *);
|
|
|
|
static void et_chip_attach(struct et_softc *);
|
|
|
|
static void et_init_mac(struct et_softc *);
|
|
|
|
static void et_init_rxmac(struct et_softc *);
|
|
|
|
static void et_init_txmac(struct et_softc *);
|
|
|
|
static int et_init_rxdma(struct et_softc *);
|
|
|
|
static int et_init_txdma(struct et_softc *);
|
|
|
|
static int et_start_rxdma(struct et_softc *);
|
|
|
|
static int et_start_txdma(struct et_softc *);
|
|
|
|
static int et_stop_rxdma(struct et_softc *);
|
|
|
|
static int et_stop_txdma(struct et_softc *);
|
|
|
|
static int et_enable_txrx(struct et_softc *, int);
|
|
|
|
static void et_reset(struct et_softc *);
|
2009-11-19 22:53:41 +00:00
|
|
|
static int et_bus_config(struct et_softc *);
|
2008-06-20 19:28:33 +00:00
|
|
|
static void et_get_eaddr(device_t, uint8_t[]);
|
|
|
|
static void et_setmulti(struct et_softc *);
|
|
|
|
static void et_tick(void *);
|
|
|
|
static void et_setmedia(struct et_softc *);
|
|
|
|
static void et_setup_rxdesc(struct et_rxbuf_data *, int, bus_addr_t);
|
|
|
|
|
|
|
|
static const struct et_dev {
|
|
|
|
uint16_t vid;
|
|
|
|
uint16_t did;
|
|
|
|
const char *desc;
|
|
|
|
} et_devices[] = {
|
|
|
|
{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310,
|
|
|
|
"Agere ET1310 Gigabit Ethernet" },
|
|
|
|
{ PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST,
|
|
|
|
"Agere ET1310 Fast Ethernet" },
|
|
|
|
{ 0, 0, NULL }
|
|
|
|
};
|
|
|
|
|
|
|
|
static device_method_t et_methods[] = {
|
|
|
|
DEVMETHOD(device_probe, et_probe),
|
|
|
|
DEVMETHOD(device_attach, et_attach),
|
|
|
|
DEVMETHOD(device_detach, et_detach),
|
|
|
|
DEVMETHOD(device_shutdown, et_shutdown),
|
2011-12-05 22:22:39 +00:00
|
|
|
DEVMETHOD(device_suspend, et_suspend),
|
|
|
|
DEVMETHOD(device_resume, et_resume),
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
DEVMETHOD(miibus_readreg, et_miibus_readreg),
|
|
|
|
DEVMETHOD(miibus_writereg, et_miibus_writereg),
|
|
|
|
DEVMETHOD(miibus_statchg, et_miibus_statchg),
|
|
|
|
|
2011-11-22 21:28:20 +00:00
|
|
|
DEVMETHOD_END
|
2008-06-20 19:28:33 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t et_driver = {
|
|
|
|
"et",
|
|
|
|
et_methods,
|
|
|
|
sizeof(struct et_softc)
|
|
|
|
};
|
|
|
|
|
|
|
|
static devclass_t et_devclass;
|
|
|
|
|
|
|
|
DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0);
|
|
|
|
DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0);
|
|
|
|
|
|
|
|
static int et_rx_intr_npkts = 32;
|
|
|
|
static int et_rx_intr_delay = 20; /* x10 usec */
|
|
|
|
static int et_tx_intr_nsegs = 126;
|
|
|
|
static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */
|
|
|
|
|
|
|
|
TUNABLE_INT("hw.et.timer", &et_timer);
|
|
|
|
TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts);
|
|
|
|
TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay);
|
|
|
|
TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs);
|
|
|
|
|
|
|
|
struct et_bsize {
|
|
|
|
int bufsize;
|
|
|
|
et_newbuf_t newbuf;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct et_bsize et_bufsize_std[ET_RX_NRING] = {
|
|
|
|
{ .bufsize = ET_RXDMA_CTRL_RING0_128,
|
|
|
|
.newbuf = et_newbuf_hdr },
|
|
|
|
{ .bufsize = ET_RXDMA_CTRL_RING1_2048,
|
|
|
|
.newbuf = et_newbuf_cluster },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_probe(device_t dev)
|
|
|
|
{
|
|
|
|
const struct et_dev *d;
|
|
|
|
uint16_t did, vid;
|
|
|
|
|
|
|
|
vid = pci_get_vendor(dev);
|
|
|
|
did = pci_get_device(dev);
|
|
|
|
|
|
|
|
for (d = et_devices; d->desc != NULL; ++d) {
|
|
|
|
if (vid == d->vid && did == d->did) {
|
|
|
|
device_set_desc(dev, d->desc);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
}
|
2009-11-19 21:53:21 +00:00
|
|
|
return (ENXIO);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_attach(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
uint8_t eaddr[ETHER_ADDR_LEN];
|
2009-11-19 21:45:06 +00:00
|
|
|
int cap, error, msic;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
|
|
|
sc->dev = dev;
|
|
|
|
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
|
|
|
|
MTX_DEF);
|
|
|
|
|
|
|
|
ifp = sc->ifp = if_alloc(IFT_ETHER);
|
|
|
|
if (ifp == NULL) {
|
|
|
|
device_printf(dev, "can not if_alloc()\n");
|
|
|
|
error = ENOSPC;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize tunables
|
|
|
|
*/
|
|
|
|
sc->sc_rx_intr_npkts = et_rx_intr_npkts;
|
|
|
|
sc->sc_rx_intr_delay = et_rx_intr_delay;
|
|
|
|
sc->sc_tx_intr_nsegs = et_tx_intr_nsegs;
|
|
|
|
sc->sc_timer = et_timer;
|
|
|
|
|
|
|
|
/* Enable bus mastering */
|
|
|
|
pci_enable_busmaster(dev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate IO memory
|
|
|
|
*/
|
|
|
|
sc->sc_mem_rid = ET_PCIR_BAR;
|
|
|
|
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
|
|
|
|
&sc->sc_mem_rid, RF_ACTIVE);
|
|
|
|
if (sc->sc_mem_res == NULL) {
|
|
|
|
device_printf(dev, "can't allocate IO memory\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (ENXIO);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
2009-11-19 21:45:06 +00:00
|
|
|
msic = 0;
|
2011-03-23 13:10:15 +00:00
|
|
|
if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) {
|
2009-11-19 21:45:06 +00:00
|
|
|
sc->sc_expcap = cap;
|
|
|
|
sc->sc_flags |= ET_FLAG_PCIE;
|
|
|
|
msic = pci_msi_count(dev);
|
|
|
|
if (bootverbose)
|
2009-11-19 22:59:52 +00:00
|
|
|
device_printf(dev, "MSI count: %d\n", msic);
|
2009-11-19 21:45:06 +00:00
|
|
|
}
|
|
|
|
if (msic > 0 && msi_disable == 0) {
|
|
|
|
msic = 1;
|
|
|
|
if (pci_alloc_msi(dev, &msic) == 0) {
|
|
|
|
if (msic == 1) {
|
|
|
|
device_printf(dev, "Using %d MSI message\n",
|
|
|
|
msic);
|
|
|
|
sc->sc_flags |= ET_FLAG_MSI;
|
|
|
|
} else
|
|
|
|
pci_release_msi(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-20 19:28:33 +00:00
|
|
|
/*
|
|
|
|
* Allocate IRQ
|
|
|
|
*/
|
2009-11-19 21:45:06 +00:00
|
|
|
if ((sc->sc_flags & ET_FLAG_MSI) == 0) {
|
|
|
|
sc->sc_irq_rid = 0;
|
|
|
|
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
|
|
|
|
&sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE);
|
|
|
|
} else {
|
|
|
|
sc->sc_irq_rid = 1;
|
|
|
|
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
|
|
|
|
&sc->sc_irq_rid, RF_ACTIVE);
|
|
|
|
}
|
2008-06-20 19:28:33 +00:00
|
|
|
if (sc->sc_irq_res == NULL) {
|
|
|
|
device_printf(dev, "can't allocate irq\n");
|
|
|
|
error = ENXIO;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2009-11-19 22:53:41 +00:00
|
|
|
error = et_bus_config(sc);
|
2008-06-20 19:28:33 +00:00
|
|
|
if (error)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
et_get_eaddr(dev, eaddr);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_PM,
|
|
|
|
ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE);
|
|
|
|
|
|
|
|
et_reset(sc);
|
|
|
|
|
|
|
|
et_disable_intrs(sc);
|
|
|
|
|
|
|
|
error = et_dma_alloc(dev);
|
|
|
|
if (error)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
ifp->if_softc = sc;
|
|
|
|
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
|
|
|
|
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
|
|
|
|
ifp->if_init = et_init;
|
|
|
|
ifp->if_ioctl = et_ioctl;
|
|
|
|
ifp->if_start = et_start;
|
|
|
|
ifp->if_mtu = ETHERMTU;
|
2009-11-20 20:43:16 +00:00
|
|
|
ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU;
|
2008-06-20 19:28:33 +00:00
|
|
|
ifp->if_capenable = ifp->if_capabilities;
|
2011-12-05 22:55:52 +00:00
|
|
|
ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1;
|
|
|
|
IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1);
|
2008-06-20 19:28:33 +00:00
|
|
|
IFQ_SET_READY(&ifp->if_snd);
|
|
|
|
|
|
|
|
et_chip_attach(sc);
|
|
|
|
|
2010-10-15 15:00:30 +00:00
|
|
|
error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd,
|
|
|
|
et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
|
2008-06-20 19:28:33 +00:00
|
|
|
if (error) {
|
2010-10-15 15:00:30 +00:00
|
|
|
device_printf(dev, "attaching PHYs failed\n");
|
2008-06-20 19:28:33 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ether_ifattach(ifp, eaddr);
|
|
|
|
callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
|
|
|
|
|
|
|
|
error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE,
|
2009-11-19 21:45:06 +00:00
|
|
|
NULL, et_intr, sc, &sc->sc_irq_handle);
|
2008-06-20 19:28:33 +00:00
|
|
|
if (error) {
|
|
|
|
ether_ifdetach(ifp);
|
|
|
|
device_printf(dev, "can't setup intr\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
et_add_sysctls(sc);
|
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
fail:
|
|
|
|
et_detach(dev);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_detach(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
|
|
|
|
if (device_is_attached(dev)) {
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
|
|
|
|
ET_LOCK(sc);
|
|
|
|
et_stop(sc);
|
|
|
|
bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
|
|
|
|
ether_ifdetach(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sc->sc_miibus != NULL)
|
|
|
|
device_delete_child(dev, sc->sc_miibus);
|
|
|
|
bus_generic_detach(dev);
|
|
|
|
|
|
|
|
if (sc->sc_irq_res != NULL) {
|
|
|
|
bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
|
|
|
|
sc->sc_irq_res);
|
|
|
|
}
|
2009-11-19 21:45:06 +00:00
|
|
|
if ((sc->sc_flags & ET_FLAG_MSI) != 0)
|
|
|
|
pci_release_msi(dev);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
if (sc->sc_mem_res != NULL) {
|
|
|
|
bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
|
|
|
|
sc->sc_mem_res);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sc->ifp != NULL)
|
|
|
|
if_free(sc->ifp);
|
|
|
|
|
|
|
|
et_dma_free(dev);
|
2009-11-19 21:39:43 +00:00
|
|
|
|
|
|
|
mtx_destroy(&sc->sc_mtx);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_shutdown(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
|
|
|
|
ET_LOCK(sc);
|
|
|
|
et_stop(sc);
|
|
|
|
ET_UNLOCK(sc);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_miibus_readreg(device_t dev, int phy, int reg)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
uint32_t val;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
/* Stop any pending operations */
|
|
|
|
CSR_WRITE_4(sc, ET_MII_CMD, 0);
|
|
|
|
|
2009-11-19 20:57:35 +00:00
|
|
|
val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
|
|
|
|
val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
|
2008-06-20 19:28:33 +00:00
|
|
|
CSR_WRITE_4(sc, ET_MII_ADDR, val);
|
|
|
|
|
|
|
|
/* Start reading */
|
|
|
|
CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ);
|
|
|
|
|
|
|
|
#define NRETRY 50
|
|
|
|
|
|
|
|
for (i = 0; i < NRETRY; ++i) {
|
|
|
|
val = CSR_READ_4(sc, ET_MII_IND);
|
|
|
|
if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0)
|
|
|
|
break;
|
|
|
|
DELAY(50);
|
|
|
|
}
|
|
|
|
if (i == NRETRY) {
|
|
|
|
if_printf(sc->ifp,
|
|
|
|
"read phy %d, reg %d timed out\n", phy, reg);
|
|
|
|
ret = 0;
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef NRETRY
|
|
|
|
|
|
|
|
val = CSR_READ_4(sc, ET_MII_STAT);
|
2009-11-19 20:57:35 +00:00
|
|
|
ret = val & ET_MII_STAT_VALUE_MASK;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
back:
|
|
|
|
/* Make sure that the current operation is stopped */
|
|
|
|
CSR_WRITE_4(sc, ET_MII_CMD, 0);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (ret);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_miibus_writereg(device_t dev, int phy, int reg, int val0)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
uint32_t val;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Stop any pending operations */
|
|
|
|
CSR_WRITE_4(sc, ET_MII_CMD, 0);
|
|
|
|
|
2009-11-19 20:57:35 +00:00
|
|
|
val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK;
|
|
|
|
val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK;
|
2008-06-20 19:28:33 +00:00
|
|
|
CSR_WRITE_4(sc, ET_MII_ADDR, val);
|
|
|
|
|
|
|
|
/* Start writing */
|
2009-11-19 20:57:35 +00:00
|
|
|
CSR_WRITE_4(sc, ET_MII_CTRL,
|
|
|
|
(val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
#define NRETRY 100
|
|
|
|
|
|
|
|
for (i = 0; i < NRETRY; ++i) {
|
|
|
|
val = CSR_READ_4(sc, ET_MII_IND);
|
|
|
|
if ((val & ET_MII_IND_BUSY) == 0)
|
|
|
|
break;
|
|
|
|
DELAY(50);
|
|
|
|
}
|
|
|
|
if (i == NRETRY) {
|
|
|
|
if_printf(sc->ifp,
|
|
|
|
"write phy %d, reg %d timed out\n", phy, reg);
|
|
|
|
et_miibus_readreg(dev, phy, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef NRETRY
|
|
|
|
|
|
|
|
/* Make sure that the current operation is stopped */
|
|
|
|
CSR_WRITE_4(sc, ET_MII_CMD, 0);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_miibus_statchg(device_t dev)
|
|
|
|
{
|
|
|
|
et_setmedia(device_get_softc(dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_ifmedia_upd_locked(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = ifp->if_softc;
|
|
|
|
struct mii_data *mii = device_get_softc(sc->sc_miibus);
|
- Remove attempts to implement setting of BMCR_LOOP/MIIF_NOLOOP
(reporting IFM_LOOP based on BMCR_LOOP is left in place though as
it might provide useful for debugging). For most mii(4) drivers it
was unclear whether the PHYs driven by them actually support
loopback or not. Moreover, typically loopback mode also needs to
be activated on the MAC, which none of the Ethernet drivers using
mii(4) implements. Given that loopback media has no real use (and
obviously hardly had a chance to actually work) besides for driver
development (which just loopback mode should be sufficient for
though, i.e one doesn't necessary need support for loopback media)
support for it is just dropped as both NetBSD and OpenBSD already
did quite some time ago.
- Let mii_phy_add_media() also announce the support of IFM_NONE.
- Restructure the PHY entry points to use a structure of entry points
instead of discrete function pointers, and extend this to include
a "reset" entry point. Make sure any PHY-specific reset routine is
always used, and provide one for lxtphy(4) which disables MII
interrupts (as is done for a few other PHYs we have drivers for).
This includes changing NIC drivers which previously just called the
generic mii_phy_reset() to now actually call the PHY-specific reset
routine, which might be crucial in some cases. While at it, the
redundant checks in these NIC drivers for mii->mii_instance not being
zero before calling the reset routines were removed because as soon
as one PHY driver attaches mii->mii_instance is incremented and we
hardly can end up in their media change callbacks etc if no PHY driver
has attached as mii_attach() would have failed in that case and not
attach a miibus(4) instance.
Consequently, NIC drivers now no longer should call mii_phy_reset()
directly, so it was removed from EXPORT_SYMS.
- Add a mii_phy_dev_attach() as a companion helper to mii_phy_dev_probe().
The purpose of that function is to perform the common steps to attach
a PHY driver instance and to hook it up to the miibus(4) instance and to
optionally also handle the probing, addition and initialization of the
supported media. So all a PHY driver without any special requirements
has to do in its bus attach method is to call mii_phy_dev_attach()
along with PHY-specific MIIF_* flags, a pointer to its PHY functions
and the add_media set to one. All PHY drivers were updated to take
advantage of mii_phy_dev_attach() as appropriate. Along with these
changes the capability mask was added to the mii_softc structure so
PHY drivers taking advantage of mii_phy_dev_attach() but still
handling media on their own do not need to fiddle with the MII attach
arguments anyway.
- Keep track of the PHY offset in the mii_softc structure. This is done
for compatibility with NetBSD/OpenBSD.
- Keep track of the PHY's OUI, model and revision in the mii_softc
structure. Several PHY drivers require this information also after
attaching and previously had to wrap their own softc around mii_softc.
NetBSD/OpenBSD also keep track of the model and revision on their
mii_softc structure. All PHY drivers were updated to take advantage
as appropriate.
- Convert the mebers of the MII data structure to unsigned where
appropriate. This is partly inspired by NetBSD/OpenBSD.
- According to IEEE 802.3-2002 the bits actually have to be reversed
when mapping an OUI to the MII ID registers. All PHY drivers and
miidevs where changed as necessary. Actually this now again allows to
largely share miidevs with NetBSD, which fixed this problem already
9 years ago. Consequently miidevs was synced as far as possible.
- Add MIIF_NOMANPAUSE and mii_phy_flowstatus() calls to drivers that
weren't explicitly converted to support flow control before. It's
unclear whether flow control actually works with these but typically
it should and their net behavior should be more correct with these
changes in place than without if the MAC driver sets MIIF_DOPAUSE.
Obtained from: NetBSD (partially)
Reviewed by: yongari (earlier version), silence on arch@ and net@
2011-05-03 19:51:29 +00:00
|
|
|
struct mii_softc *miisc;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
- Remove attempts to implement setting of BMCR_LOOP/MIIF_NOLOOP
(reporting IFM_LOOP based on BMCR_LOOP is left in place though as
it might provide useful for debugging). For most mii(4) drivers it
was unclear whether the PHYs driven by them actually support
loopback or not. Moreover, typically loopback mode also needs to
be activated on the MAC, which none of the Ethernet drivers using
mii(4) implements. Given that loopback media has no real use (and
obviously hardly had a chance to actually work) besides for driver
development (which just loopback mode should be sufficient for
though, i.e one doesn't necessary need support for loopback media)
support for it is just dropped as both NetBSD and OpenBSD already
did quite some time ago.
- Let mii_phy_add_media() also announce the support of IFM_NONE.
- Restructure the PHY entry points to use a structure of entry points
instead of discrete function pointers, and extend this to include
a "reset" entry point. Make sure any PHY-specific reset routine is
always used, and provide one for lxtphy(4) which disables MII
interrupts (as is done for a few other PHYs we have drivers for).
This includes changing NIC drivers which previously just called the
generic mii_phy_reset() to now actually call the PHY-specific reset
routine, which might be crucial in some cases. While at it, the
redundant checks in these NIC drivers for mii->mii_instance not being
zero before calling the reset routines were removed because as soon
as one PHY driver attaches mii->mii_instance is incremented and we
hardly can end up in their media change callbacks etc if no PHY driver
has attached as mii_attach() would have failed in that case and not
attach a miibus(4) instance.
Consequently, NIC drivers now no longer should call mii_phy_reset()
directly, so it was removed from EXPORT_SYMS.
- Add a mii_phy_dev_attach() as a companion helper to mii_phy_dev_probe().
The purpose of that function is to perform the common steps to attach
a PHY driver instance and to hook it up to the miibus(4) instance and to
optionally also handle the probing, addition and initialization of the
supported media. So all a PHY driver without any special requirements
has to do in its bus attach method is to call mii_phy_dev_attach()
along with PHY-specific MIIF_* flags, a pointer to its PHY functions
and the add_media set to one. All PHY drivers were updated to take
advantage of mii_phy_dev_attach() as appropriate. Along with these
changes the capability mask was added to the mii_softc structure so
PHY drivers taking advantage of mii_phy_dev_attach() but still
handling media on their own do not need to fiddle with the MII attach
arguments anyway.
- Keep track of the PHY offset in the mii_softc structure. This is done
for compatibility with NetBSD/OpenBSD.
- Keep track of the PHY's OUI, model and revision in the mii_softc
structure. Several PHY drivers require this information also after
attaching and previously had to wrap their own softc around mii_softc.
NetBSD/OpenBSD also keep track of the model and revision on their
mii_softc structure. All PHY drivers were updated to take advantage
as appropriate.
- Convert the mebers of the MII data structure to unsigned where
appropriate. This is partly inspired by NetBSD/OpenBSD.
- According to IEEE 802.3-2002 the bits actually have to be reversed
when mapping an OUI to the MII ID registers. All PHY drivers and
miidevs where changed as necessary. Actually this now again allows to
largely share miidevs with NetBSD, which fixed this problem already
9 years ago. Consequently miidevs was synced as far as possible.
- Add MIIF_NOMANPAUSE and mii_phy_flowstatus() calls to drivers that
weren't explicitly converted to support flow control before. It's
unclear whether flow control actually works with these but typically
it should and their net behavior should be more correct with these
changes in place than without if the MAC driver sets MIIF_DOPAUSE.
Obtained from: NetBSD (partially)
Reviewed by: yongari (earlier version), silence on arch@ and net@
2011-05-03 19:51:29 +00:00
|
|
|
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
|
|
|
|
PHY_RESET(miisc);
|
2011-10-17 20:03:38 +00:00
|
|
|
return (mii_mediachg(mii));
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_ifmedia_upd(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = ifp->if_softc;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
ET_LOCK(sc);
|
|
|
|
res = et_ifmedia_upd_locked(ifp);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (res);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = ifp->if_softc;
|
|
|
|
struct mii_data *mii = device_get_softc(sc->sc_miibus);
|
|
|
|
|
2011-10-17 19:58:34 +00:00
|
|
|
ET_LOCK(sc);
|
2008-06-20 19:28:33 +00:00
|
|
|
mii_pollstat(mii);
|
|
|
|
ifmr->ifm_active = mii->mii_media_active;
|
|
|
|
ifmr->ifm_status = mii->mii_media_status;
|
2011-10-17 19:58:34 +00:00
|
|
|
ET_UNLOCK(sc);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_stop(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
|
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
|
|
|
|
callout_stop(&sc->sc_tick);
|
|
|
|
|
|
|
|
et_stop_rxdma(sc);
|
|
|
|
et_stop_txdma(sc);
|
|
|
|
|
|
|
|
et_disable_intrs(sc);
|
|
|
|
|
|
|
|
et_free_tx_ring(sc);
|
|
|
|
et_free_rx_ring(sc);
|
|
|
|
|
|
|
|
et_reset(sc);
|
|
|
|
|
|
|
|
sc->sc_tx = 0;
|
|
|
|
sc->sc_tx_intr = 0;
|
|
|
|
sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED;
|
|
|
|
|
|
|
|
sc->watchdog_timer = 0;
|
|
|
|
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2009-11-19 22:53:41 +00:00
|
|
|
et_bus_config(struct et_softc *sc)
|
2008-06-20 19:28:33 +00:00
|
|
|
{
|
|
|
|
uint32_t val, max_plsz;
|
|
|
|
uint16_t ack_latency, replay_timer;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Test whether EEPROM is valid
|
|
|
|
* NOTE: Read twice to get the correct value
|
|
|
|
*/
|
2009-11-19 22:53:41 +00:00
|
|
|
pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
|
|
|
|
val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1);
|
2008-06-20 19:28:33 +00:00
|
|
|
if (val & ET_PCIM_EEPROM_STATUS_ERROR) {
|
2009-11-19 22:53:41 +00:00
|
|
|
device_printf(sc->dev, "EEPROM status error 0x%02x\n", val);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (ENXIO);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: LED */
|
|
|
|
|
2009-11-19 22:53:41 +00:00
|
|
|
if ((sc->sc_flags & ET_FLAG_PCIE) == 0)
|
|
|
|
return (0);
|
|
|
|
|
2008-06-20 19:28:33 +00:00
|
|
|
/*
|
|
|
|
* Configure ACK latency and replay timer according to
|
|
|
|
* max playload size
|
|
|
|
*/
|
2009-11-19 22:53:41 +00:00
|
|
|
val = pci_read_config(sc->dev,
|
|
|
|
sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4);
|
|
|
|
max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
switch (max_plsz) {
|
|
|
|
case ET_PCIV_DEVICE_CAPS_PLSZ_128:
|
|
|
|
ack_latency = ET_PCIV_ACK_LATENCY_128;
|
|
|
|
replay_timer = ET_PCIV_REPLAY_TIMER_128;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ET_PCIV_DEVICE_CAPS_PLSZ_256:
|
|
|
|
ack_latency = ET_PCIV_ACK_LATENCY_256;
|
|
|
|
replay_timer = ET_PCIV_REPLAY_TIMER_256;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2009-11-19 22:53:41 +00:00
|
|
|
ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2);
|
|
|
|
replay_timer = pci_read_config(sc->dev,
|
|
|
|
ET_PCIR_REPLAY_TIMER, 2);
|
|
|
|
device_printf(sc->dev, "ack latency %u, replay timer %u\n",
|
2008-06-20 19:28:33 +00:00
|
|
|
ack_latency, replay_timer);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ack_latency != 0) {
|
2009-11-19 22:53:41 +00:00
|
|
|
pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2);
|
|
|
|
pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer,
|
|
|
|
2);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set L0s and L1 latency timer to 2us
|
|
|
|
*/
|
2009-11-19 22:53:41 +00:00
|
|
|
val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4);
|
2009-11-19 20:57:35 +00:00
|
|
|
val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT);
|
|
|
|
/* L0s exit latency : 2us */
|
|
|
|
val |= 0x00005000;
|
|
|
|
/* L1 exit latency : 2us */
|
|
|
|
val |= 0x00028000;
|
2009-11-19 22:53:41 +00:00
|
|
|
pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set max read request size to 2048 bytes
|
|
|
|
*/
|
2009-11-19 22:53:41 +00:00
|
|
|
val = pci_read_config(sc->dev,
|
|
|
|
sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
|
|
|
|
val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
|
2008-06-20 19:28:33 +00:00
|
|
|
val |= ET_PCIV_DEVICE_CTRL_RRSZ_2K;
|
2009-11-19 22:53:41 +00:00
|
|
|
pci_write_config(sc->dev,
|
|
|
|
sc->sc_expcap + PCIR_EXPRESS_DEVICE_CTL, val, 2);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_get_eaddr(device_t dev, uint8_t eaddr[])
|
|
|
|
{
|
|
|
|
uint32_t val;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4);
|
|
|
|
for (i = 0; i < 4; ++i)
|
|
|
|
eaddr[i] = (val >> (8 * i)) & 0xff;
|
|
|
|
|
|
|
|
val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2);
|
|
|
|
for (; i < ETHER_ADDR_LEN; ++i)
|
|
|
|
eaddr[i] = (val >> (8 * (i - 4))) & 0xff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_reset(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1,
|
|
|
|
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
|
|
|
|
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
|
|
|
|
ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_SWRST,
|
|
|
|
ET_SWRST_TXDMA | ET_SWRST_RXDMA |
|
|
|
|
ET_SWRST_TXMAC | ET_SWRST_RXMAC |
|
|
|
|
ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1,
|
|
|
|
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
|
|
|
|
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC);
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_disable_intrs(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_enable_intrs(struct et_softc *sc, uint32_t intrs)
|
|
|
|
{
|
|
|
|
CSR_WRITE_4(sc, ET_INTR_MASK, ~intrs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_dma_alloc(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
|
|
|
|
struct et_txstatus_data *txsd = &sc->sc_tx_status;
|
|
|
|
struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
|
|
|
|
struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
|
|
|
|
int i, error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create top level DMA tag
|
|
|
|
*/
|
|
|
|
error = bus_dma_tag_create(NULL, 1, 0,
|
|
|
|
BUS_SPACE_MAXADDR_32BIT,
|
|
|
|
BUS_SPACE_MAXADDR,
|
|
|
|
NULL, NULL,
|
|
|
|
MAXBSIZE,
|
|
|
|
BUS_SPACE_UNRESTRICTED,
|
|
|
|
BUS_SPACE_MAXSIZE_32BIT,
|
|
|
|
0, NULL, NULL, &sc->sc_dtag);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create DMA tag\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create TX ring DMA stuffs
|
|
|
|
*/
|
|
|
|
error = et_dma_mem_create(dev, ET_TX_RING_SIZE, &tx_ring->tr_dtag,
|
|
|
|
(void **)&tx_ring->tr_desc,
|
|
|
|
&tx_ring->tr_paddr, &tx_ring->tr_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create TX ring DMA stuffs\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create TX status DMA stuffs
|
|
|
|
*/
|
|
|
|
error = et_dma_mem_create(dev, sizeof(uint32_t), &txsd->txsd_dtag,
|
|
|
|
(void **)&txsd->txsd_status,
|
|
|
|
&txsd->txsd_paddr, &txsd->txsd_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create TX status DMA stuffs\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create DMA stuffs for RX rings
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ET_RX_NRING; ++i) {
|
|
|
|
static const uint32_t rx_ring_posreg[ET_RX_NRING] =
|
|
|
|
{ ET_RX_RING0_POS, ET_RX_RING1_POS };
|
|
|
|
|
|
|
|
struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
|
|
|
|
|
|
|
|
error = et_dma_mem_create(dev, ET_RX_RING_SIZE,
|
|
|
|
&rx_ring->rr_dtag,
|
|
|
|
(void **)&rx_ring->rr_desc,
|
|
|
|
&rx_ring->rr_paddr,
|
|
|
|
&rx_ring->rr_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create DMA stuffs for "
|
|
|
|
"the %d RX ring\n", i);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
rx_ring->rr_posreg = rx_ring_posreg[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create RX stat ring DMA stuffs
|
|
|
|
*/
|
|
|
|
error = et_dma_mem_create(dev, ET_RXSTAT_RING_SIZE,
|
|
|
|
&rxst_ring->rsr_dtag,
|
|
|
|
(void **)&rxst_ring->rsr_stat,
|
|
|
|
&rxst_ring->rsr_paddr, &rxst_ring->rsr_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create RX stat ring DMA stuffs\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create RX status DMA stuffs
|
|
|
|
*/
|
|
|
|
error = et_dma_mem_create(dev, sizeof(struct et_rxstatus),
|
|
|
|
&rxsd->rxsd_dtag,
|
|
|
|
(void **)&rxsd->rxsd_status,
|
|
|
|
&rxsd->rxsd_paddr, &rxsd->rxsd_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create RX status DMA stuffs\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create mbuf DMA stuffs
|
|
|
|
*/
|
|
|
|
error = et_dma_mbuf_create(dev);
|
|
|
|
if (error)
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_dma_free(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
|
|
|
|
struct et_txstatus_data *txsd = &sc->sc_tx_status;
|
|
|
|
struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
|
|
|
|
struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
|
|
|
|
int i, rx_done[ET_RX_NRING];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy TX ring DMA stuffs
|
|
|
|
*/
|
|
|
|
et_dma_mem_destroy(tx_ring->tr_dtag, tx_ring->tr_desc,
|
|
|
|
tx_ring->tr_dmap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy TX status DMA stuffs
|
|
|
|
*/
|
|
|
|
et_dma_mem_destroy(txsd->txsd_dtag, txsd->txsd_status,
|
|
|
|
txsd->txsd_dmap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy DMA stuffs for RX rings
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ET_RX_NRING; ++i) {
|
|
|
|
struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[i];
|
|
|
|
|
|
|
|
et_dma_mem_destroy(rx_ring->rr_dtag, rx_ring->rr_desc,
|
|
|
|
rx_ring->rr_dmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy RX stat ring DMA stuffs
|
|
|
|
*/
|
|
|
|
et_dma_mem_destroy(rxst_ring->rsr_dtag, rxst_ring->rsr_stat,
|
|
|
|
rxst_ring->rsr_dmap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy RX status DMA stuffs
|
|
|
|
*/
|
|
|
|
et_dma_mem_destroy(rxsd->rxsd_dtag, rxsd->rxsd_status,
|
|
|
|
rxsd->rxsd_dmap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy mbuf DMA stuffs
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ET_RX_NRING; ++i)
|
|
|
|
rx_done[i] = ET_RX_NDESC;
|
|
|
|
et_dma_mbuf_destroy(dev, ET_TX_NDESC, rx_done);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy top level DMA tag
|
|
|
|
*/
|
|
|
|
if (sc->sc_dtag != NULL)
|
|
|
|
bus_dma_tag_destroy(sc->sc_dtag);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_dma_mbuf_create(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
struct et_txbuf_data *tbd = &sc->sc_tx_data;
|
|
|
|
int i, error, rx_done[ET_RX_NRING];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create mbuf DMA tag
|
|
|
|
*/
|
|
|
|
error = bus_dma_tag_create(sc->sc_dtag, 1, 0,
|
|
|
|
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
|
|
|
|
NULL, NULL,
|
|
|
|
ET_JUMBO_FRAMELEN, ET_NSEG_MAX,
|
|
|
|
BUS_SPACE_MAXSIZE_32BIT,
|
|
|
|
BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_mbuf_dtag);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create mbuf DMA tag\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create spare DMA map for RX mbufs
|
|
|
|
*/
|
|
|
|
error = bus_dmamap_create(sc->sc_mbuf_dtag, 0, &sc->sc_mbuf_tmp_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create spare mbuf DMA map\n");
|
|
|
|
bus_dma_tag_destroy(sc->sc_mbuf_dtag);
|
|
|
|
sc->sc_mbuf_dtag = NULL;
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create DMA maps for RX mbufs
|
|
|
|
*/
|
|
|
|
bzero(rx_done, sizeof(rx_done));
|
|
|
|
for (i = 0; i < ET_RX_NRING; ++i) {
|
|
|
|
struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < ET_RX_NDESC; ++j) {
|
|
|
|
error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
|
|
|
|
&rbd->rbd_buf[j].rb_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create %d RX mbuf "
|
|
|
|
"for %d RX ring\n", j, i);
|
|
|
|
rx_done[i] = j;
|
|
|
|
et_dma_mbuf_destroy(dev, 0, rx_done);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
rx_done[i] = ET_RX_NDESC;
|
|
|
|
|
|
|
|
rbd->rbd_softc = sc;
|
|
|
|
rbd->rbd_ring = &sc->sc_rx_ring[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create DMA maps for TX mbufs
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ET_TX_NDESC; ++i) {
|
|
|
|
error = bus_dmamap_create(sc->sc_mbuf_dtag, 0,
|
|
|
|
&tbd->tbd_buf[i].tb_dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create %d TX mbuf "
|
|
|
|
"DMA map\n", i);
|
|
|
|
et_dma_mbuf_destroy(dev, i, rx_done);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_dma_mbuf_destroy(device_t dev, int tx_done, const int rx_done[])
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
struct et_txbuf_data *tbd = &sc->sc_tx_data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (sc->sc_mbuf_dtag == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy DMA maps for RX mbufs
|
|
|
|
*/
|
|
|
|
for (i = 0; i < ET_RX_NRING; ++i) {
|
|
|
|
struct et_rxbuf_data *rbd = &sc->sc_rx_data[i];
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < rx_done[i]; ++j) {
|
|
|
|
struct et_rxbuf *rb = &rbd->rbd_buf[j];
|
|
|
|
|
|
|
|
KASSERT(rb->rb_mbuf == NULL,
|
|
|
|
("RX mbuf in %d RX ring is not freed yet\n", i));
|
|
|
|
bus_dmamap_destroy(sc->sc_mbuf_dtag, rb->rb_dmap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy DMA maps for TX mbufs
|
|
|
|
*/
|
|
|
|
for (i = 0; i < tx_done; ++i) {
|
|
|
|
struct et_txbuf *tb = &tbd->tbd_buf[i];
|
|
|
|
|
|
|
|
KASSERT(tb->tb_mbuf == NULL, ("TX mbuf is not freed yet\n"));
|
|
|
|
bus_dmamap_destroy(sc->sc_mbuf_dtag, tb->tb_dmap);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy spare mbuf DMA map
|
|
|
|
*/
|
|
|
|
bus_dmamap_destroy(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Destroy mbuf DMA tag
|
|
|
|
*/
|
|
|
|
bus_dma_tag_destroy(sc->sc_mbuf_dtag);
|
|
|
|
sc->sc_mbuf_dtag = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_dma_mem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
|
|
|
|
void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = device_get_softc(dev);
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = bus_dma_tag_create(sc->sc_dtag, ET_ALIGN, 0,
|
|
|
|
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
|
|
|
|
NULL, NULL,
|
|
|
|
size, 1, BUS_SPACE_MAXSIZE_32BIT,
|
|
|
|
0, NULL, NULL, dtag);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't create DMA tag\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
|
|
|
|
dmap);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't allocate DMA mem\n");
|
|
|
|
bus_dma_tag_destroy(*dtag);
|
|
|
|
*dtag = NULL;
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamap_load(*dtag, *dmap, *addr, size,
|
|
|
|
et_dma_ring_addr, paddr, BUS_DMA_WAITOK);
|
|
|
|
if (error) {
|
|
|
|
device_printf(dev, "can't load DMA mem\n");
|
|
|
|
bus_dmamem_free(*dtag, *addr, *dmap);
|
|
|
|
bus_dma_tag_destroy(*dtag);
|
|
|
|
*dtag = NULL;
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_dma_mem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
|
|
|
|
{
|
|
|
|
if (dtag != NULL) {
|
|
|
|
bus_dmamap_unload(dtag, dmap);
|
|
|
|
bus_dmamem_free(dtag, addr, dmap);
|
|
|
|
bus_dma_tag_destroy(dtag);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
|
|
|
|
{
|
|
|
|
KASSERT(nseg == 1, ("too many segments\n"));
|
|
|
|
*((bus_addr_t *)arg) = seg->ds_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_chip_attach(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
uint32_t val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform minimal initialization
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Disable loopback */
|
|
|
|
CSR_WRITE_4(sc, ET_LOOPBACK, 0);
|
|
|
|
|
|
|
|
/* Reset MAC */
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1,
|
|
|
|
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
|
|
|
|
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
|
|
|
|
ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup half duplex mode
|
|
|
|
*/
|
2009-11-19 20:57:35 +00:00
|
|
|
val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
|
|
|
|
(15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
|
|
|
|
(55 << ET_MAC_HDX_COLLWIN_SHIFT) |
|
|
|
|
ET_MAC_HDX_EXC_DEFER;
|
2008-06-20 19:28:33 +00:00
|
|
|
CSR_WRITE_4(sc, ET_MAC_HDX, val);
|
|
|
|
|
|
|
|
/* Clear MAC control */
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
|
|
|
|
|
|
|
|
/* Reset MII */
|
|
|
|
CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
|
|
|
|
|
|
|
|
/* Bring MAC out of reset state */
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
|
|
|
|
|
|
|
|
/* Enable memory controllers */
|
|
|
|
CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_intr(void *xsc)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = xsc;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
uint32_t intrs;
|
|
|
|
|
|
|
|
ET_LOCK(sc);
|
|
|
|
ifp = sc->ifp;
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
et_disable_intrs(sc);
|
|
|
|
|
|
|
|
intrs = CSR_READ_4(sc, ET_INTR_STATUS);
|
|
|
|
intrs &= ET_INTRS;
|
|
|
|
if (intrs == 0) /* Not interested */
|
|
|
|
goto back;
|
|
|
|
|
|
|
|
if (intrs & ET_INTR_RXEOF)
|
|
|
|
et_rxeof(sc);
|
|
|
|
if (intrs & (ET_INTR_TXEOF | ET_INTR_TIMER))
|
|
|
|
et_txeof(sc);
|
|
|
|
if (intrs & ET_INTR_TIMER)
|
|
|
|
CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
|
|
|
|
back:
|
|
|
|
et_enable_intrs(sc, ET_INTRS);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_init_locked(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
const struct et_bsize *arr;
|
|
|
|
int error, i;
|
|
|
|
|
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
|
|
|
return;
|
|
|
|
|
|
|
|
et_stop(sc);
|
|
|
|
|
|
|
|
arr = et_bufsize_std;
|
|
|
|
for (i = 0; i < ET_RX_NRING; ++i) {
|
|
|
|
sc->sc_rx_data[i].rbd_bufsize = arr[i].bufsize;
|
|
|
|
sc->sc_rx_data[i].rbd_newbuf = arr[i].newbuf;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = et_init_tx_ring(sc);
|
|
|
|
if (error)
|
|
|
|
goto back;
|
|
|
|
|
|
|
|
error = et_init_rx_ring(sc);
|
|
|
|
if (error)
|
|
|
|
goto back;
|
|
|
|
|
|
|
|
error = et_chip_init(sc);
|
|
|
|
if (error)
|
|
|
|
goto back;
|
|
|
|
|
|
|
|
error = et_enable_txrx(sc, 1);
|
|
|
|
if (error)
|
|
|
|
goto back;
|
|
|
|
|
|
|
|
et_enable_intrs(sc, ET_INTRS);
|
|
|
|
|
|
|
|
callout_reset(&sc->sc_tick, hz, et_tick, sc);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer);
|
|
|
|
|
|
|
|
ifp->if_drv_flags |= IFF_DRV_RUNNING;
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
|
|
|
back:
|
|
|
|
if (error)
|
|
|
|
et_stop(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_init(void *xsc)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = xsc;
|
|
|
|
|
|
|
|
ET_LOCK(sc);
|
|
|
|
et_init_locked(sc);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = ifp->if_softc;
|
|
|
|
struct mii_data *mii = device_get_softc(sc->sc_miibus);
|
|
|
|
struct ifreq *ifr = (struct ifreq *)data;
|
2009-11-20 20:33:59 +00:00
|
|
|
int error = 0, mask, max_framelen;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
/* XXX LOCKSUSED */
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCSIFFLAGS:
|
|
|
|
ET_LOCK(sc);
|
|
|
|
if (ifp->if_flags & IFF_UP) {
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
|
|
|
if ((ifp->if_flags ^ sc->sc_if_flags) &
|
|
|
|
(IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST))
|
|
|
|
et_setmulti(sc);
|
|
|
|
} else {
|
|
|
|
et_init_locked(sc);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
|
|
|
et_stop(sc);
|
|
|
|
}
|
|
|
|
sc->sc_if_flags = ifp->if_flags;
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCSIFMEDIA:
|
|
|
|
case SIOCGIFMEDIA:
|
|
|
|
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
|
|
|
ET_LOCK(sc);
|
|
|
|
et_setmulti(sc);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCSIFMTU:
|
|
|
|
#if 0
|
|
|
|
if (sc->sc_flags & ET_FLAG_JUMBO)
|
|
|
|
max_framelen = ET_JUMBO_FRAMELEN;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
max_framelen = MCLBYTES - 1;
|
|
|
|
|
|
|
|
if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) {
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ifp->if_mtu != ifr->ifr_mtu) {
|
|
|
|
ifp->if_mtu = ifr->ifr_mtu;
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
|
|
|
et_init(sc);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2009-11-20 20:33:59 +00:00
|
|
|
case SIOCSIFCAP:
|
|
|
|
ET_LOCK(sc);
|
|
|
|
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
|
|
|
|
if ((mask & IFCAP_TXCSUM) != 0 &&
|
|
|
|
(IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
|
|
|
|
ifp->if_capenable ^= IFCAP_TXCSUM;
|
|
|
|
if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
|
|
|
|
ifp->if_hwassist |= ET_CSUM_FEATURES;
|
|
|
|
else
|
|
|
|
ifp->if_hwassist &= ~ET_CSUM_FEATURES;
|
|
|
|
}
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
break;
|
|
|
|
|
2008-06-20 19:28:33 +00:00
|
|
|
default:
|
|
|
|
error = ether_ioctl(ifp, cmd, data);
|
|
|
|
break;
|
|
|
|
}
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_start_locked(struct ifnet *ifp)
|
|
|
|
{
|
2011-12-05 22:55:52 +00:00
|
|
|
struct et_softc *sc;
|
|
|
|
struct mbuf *m_head = NULL;
|
2008-06-20 19:28:33 +00:00
|
|
|
struct et_txbuf_data *tbd;
|
2011-12-05 22:55:52 +00:00
|
|
|
int enq;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
2011-12-05 22:55:52 +00:00
|
|
|
sc = ifp->if_softc;
|
2008-06-20 19:28:33 +00:00
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
|
|
|
|
if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
|
|
|
|
return;
|
|
|
|
|
2011-12-05 22:55:52 +00:00
|
|
|
tbd = &sc->sc_tx_data;
|
|
|
|
for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
|
|
|
|
if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) {
|
2008-06-20 19:28:33 +00:00
|
|
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-12-05 22:55:52 +00:00
|
|
|
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
|
|
|
|
if (m_head == NULL)
|
2008-06-20 19:28:33 +00:00
|
|
|
break;
|
|
|
|
|
2011-12-05 22:55:52 +00:00
|
|
|
if (et_encap(sc, &m_head)) {
|
|
|
|
if (m_head == NULL) {
|
|
|
|
ifp->if_oerrors++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
|
|
|
|
if (tbd->tbd_used > 0)
|
|
|
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
2008-06-20 19:28:33 +00:00
|
|
|
break;
|
|
|
|
}
|
2011-12-05 22:55:52 +00:00
|
|
|
enq++;
|
|
|
|
ETHER_BPF_MTAP(ifp, m_head);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
2011-12-05 22:55:52 +00:00
|
|
|
if (enq > 0)
|
2008-06-20 19:28:33 +00:00
|
|
|
sc->watchdog_timer = 5;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_start(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = ifp->if_softc;
|
|
|
|
|
|
|
|
ET_LOCK(sc);
|
|
|
|
et_start_locked(ifp);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_watchdog(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
|
|
|
|
if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if_printf(sc->ifp, "watchdog timed out\n");
|
|
|
|
|
2010-09-21 17:31:14 +00:00
|
|
|
sc->ifp->if_oerrors++;
|
|
|
|
sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
2008-06-20 19:28:33 +00:00
|
|
|
et_init_locked(sc);
|
|
|
|
et_start_locked(sc->ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_stop_rxdma(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
CSR_WRITE_4(sc, ET_RXDMA_CTRL,
|
|
|
|
ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE);
|
|
|
|
|
|
|
|
DELAY(5);
|
|
|
|
if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) {
|
|
|
|
if_printf(sc->ifp, "can't stop RX DMA engine\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (ETIMEDOUT);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_stop_txdma(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
CSR_WRITE_4(sc, ET_TXDMA_CTRL,
|
|
|
|
ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_free_tx_ring(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct et_txbuf_data *tbd = &sc->sc_tx_data;
|
|
|
|
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ET_TX_NDESC; ++i) {
|
|
|
|
struct et_txbuf *tb = &tbd->tbd_buf[i];
|
|
|
|
|
|
|
|
if (tb->tb_mbuf != NULL) {
|
|
|
|
bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
|
|
|
|
m_freem(tb->tb_mbuf);
|
|
|
|
tb->tb_mbuf = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
|
|
|
|
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_free_rx_ring(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
for (n = 0; n < ET_RX_NRING; ++n) {
|
|
|
|
struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
|
|
|
|
struct et_rxdesc_ring *rx_ring = &sc->sc_rx_ring[n];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ET_RX_NDESC; ++i) {
|
|
|
|
struct et_rxbuf *rb = &rbd->rbd_buf[i];
|
|
|
|
|
|
|
|
if (rb->rb_mbuf != NULL) {
|
2009-11-19 21:46:58 +00:00
|
|
|
bus_dmamap_unload(sc->sc_mbuf_dtag,
|
2008-07-11 18:26:12 +00:00
|
|
|
rb->rb_dmap);
|
2008-06-20 19:28:33 +00:00
|
|
|
m_freem(rb->rb_mbuf);
|
|
|
|
rb->rb_mbuf = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bzero(rx_ring->rr_desc, ET_RX_RING_SIZE);
|
|
|
|
bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_setmulti(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
uint32_t hash[4] = { 0, 0, 0, 0 };
|
|
|
|
uint32_t rxmac_ctrl, pktfilt;
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
int i, count;
|
|
|
|
|
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
ifp = sc->ifp;
|
|
|
|
|
|
|
|
pktfilt = CSR_READ_4(sc, ET_PKTFILT);
|
|
|
|
rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL);
|
|
|
|
|
|
|
|
pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST);
|
|
|
|
if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
|
|
|
|
rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT;
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
|
|
|
|
count = 0;
|
2009-06-26 11:45:06 +00:00
|
|
|
if_maddr_rlock(ifp);
|
2008-06-20 19:28:33 +00:00
|
|
|
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
|
|
|
uint32_t *hp, h;
|
|
|
|
|
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
|
|
|
|
ifma->ifma_addr), ETHER_ADDR_LEN);
|
|
|
|
h = (h & 0x3f800000) >> 23;
|
|
|
|
|
|
|
|
hp = &hash[0];
|
|
|
|
if (h >= 32 && h < 64) {
|
|
|
|
h -= 32;
|
|
|
|
hp = &hash[1];
|
|
|
|
} else if (h >= 64 && h < 96) {
|
|
|
|
h -= 64;
|
|
|
|
hp = &hash[2];
|
|
|
|
} else if (h >= 96) {
|
|
|
|
h -= 96;
|
|
|
|
hp = &hash[3];
|
|
|
|
}
|
|
|
|
*hp |= (1 << h);
|
|
|
|
|
|
|
|
++count;
|
|
|
|
}
|
2009-06-26 11:45:06 +00:00
|
|
|
if_maddr_runlock(ifp);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
for (i = 0; i < 4; ++i)
|
|
|
|
CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]);
|
|
|
|
|
|
|
|
if (count > 0)
|
|
|
|
pktfilt |= ET_PKTFILT_MCAST;
|
|
|
|
rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT;
|
|
|
|
back:
|
|
|
|
CSR_WRITE_4(sc, ET_PKTFILT, pktfilt);
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_chip_init(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
uint32_t rxq_end;
|
|
|
|
int error, frame_len, rxmem_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Split 16Kbytes internal memory between TX and RX
|
|
|
|
* according to frame length.
|
|
|
|
*/
|
|
|
|
frame_len = ET_FRAMELEN(ifp->if_mtu);
|
|
|
|
if (frame_len < 2048) {
|
|
|
|
rxmem_size = ET_MEM_RXSIZE_DEFAULT;
|
|
|
|
} else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) {
|
|
|
|
rxmem_size = ET_MEM_SIZE / 2;
|
|
|
|
} else {
|
|
|
|
rxmem_size = ET_MEM_SIZE -
|
|
|
|
roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT);
|
|
|
|
}
|
|
|
|
rxq_end = ET_QUEUE_ADDR(rxmem_size);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START);
|
|
|
|
CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end);
|
|
|
|
CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1);
|
|
|
|
CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END);
|
|
|
|
|
|
|
|
/* No loopback */
|
|
|
|
CSR_WRITE_4(sc, ET_LOOPBACK, 0);
|
|
|
|
|
|
|
|
/* Clear MSI configure */
|
2009-11-19 21:45:06 +00:00
|
|
|
if ((sc->sc_flags & ET_FLAG_MSI) == 0)
|
|
|
|
CSR_WRITE_4(sc, ET_MSI_CFG, 0);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
/* Disable timer */
|
|
|
|
CSR_WRITE_4(sc, ET_TIMER, 0);
|
|
|
|
|
|
|
|
/* Initialize MAC */
|
|
|
|
et_init_mac(sc);
|
|
|
|
|
|
|
|
/* Enable memory controllers */
|
|
|
|
CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE);
|
|
|
|
|
|
|
|
/* Initialize RX MAC */
|
|
|
|
et_init_rxmac(sc);
|
|
|
|
|
|
|
|
/* Initialize TX MAC */
|
|
|
|
et_init_txmac(sc);
|
|
|
|
|
|
|
|
/* Initialize RX DMA engine */
|
|
|
|
error = et_init_rxdma(sc);
|
|
|
|
if (error)
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
/* Initialize TX DMA engine */
|
|
|
|
error = et_init_txdma(sc);
|
|
|
|
if (error)
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_init_tx_ring(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
|
|
|
|
struct et_txstatus_data *txsd = &sc->sc_tx_status;
|
|
|
|
struct et_txbuf_data *tbd = &sc->sc_tx_data;
|
|
|
|
|
|
|
|
bzero(tx_ring->tr_desc, ET_TX_RING_SIZE);
|
|
|
|
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
|
|
|
|
tbd->tbd_start_index = 0;
|
|
|
|
tbd->tbd_start_wrap = 0;
|
|
|
|
tbd->tbd_used = 0;
|
|
|
|
|
|
|
|
bzero(txsd->txsd_status, sizeof(uint32_t));
|
|
|
|
bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_init_rx_ring(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
|
|
|
|
struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
for (n = 0; n < ET_RX_NRING; ++n) {
|
|
|
|
struct et_rxbuf_data *rbd = &sc->sc_rx_data[n];
|
|
|
|
int i, error;
|
|
|
|
|
|
|
|
for (i = 0; i < ET_RX_NDESC; ++i) {
|
|
|
|
error = rbd->rbd_newbuf(rbd, i, 1);
|
|
|
|
if (error) {
|
|
|
|
if_printf(sc->ifp, "%d ring %d buf, "
|
|
|
|
"newbuf failed: %d\n", n, i, error);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus));
|
|
|
|
bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
|
|
|
|
bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE);
|
|
|
|
bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_dma_buf_addr(void *xctx, bus_dma_segment_t *segs, int nsegs,
|
|
|
|
bus_size_t mapsz __unused, int error)
|
|
|
|
{
|
|
|
|
struct et_dmamap_ctx *ctx = xctx;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (nsegs > ctx->nsegs) {
|
|
|
|
ctx->nsegs = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx->nsegs = nsegs;
|
|
|
|
for (i = 0; i < nsegs; ++i)
|
|
|
|
ctx->segs[i] = segs[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_init_rxdma(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct et_rxstatus_data *rxsd = &sc->sc_rx_status;
|
|
|
|
struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring;
|
|
|
|
struct et_rxdesc_ring *rx_ring;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = et_stop_rxdma(sc);
|
|
|
|
if (error) {
|
|
|
|
if_printf(sc->ifp, "can't init RX DMA engine\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install RX status
|
|
|
|
*/
|
|
|
|
CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install RX stat ring
|
|
|
|
*/
|
|
|
|
CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1);
|
|
|
|
CSR_WRITE_4(sc, ET_RXSTAT_POS, 0);
|
|
|
|
CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1);
|
|
|
|
|
|
|
|
/* Match ET_RXSTAT_POS */
|
|
|
|
rxst_ring->rsr_index = 0;
|
|
|
|
rxst_ring->rsr_wrap = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install the 2nd RX descriptor ring
|
|
|
|
*/
|
|
|
|
rx_ring = &sc->sc_rx_ring[1];
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1);
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP);
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
|
|
|
|
|
|
|
|
/* Match ET_RX_RING1_POS */
|
|
|
|
rx_ring->rr_index = 0;
|
|
|
|
rx_ring->rr_wrap = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install the 1st RX descriptor ring
|
|
|
|
*/
|
|
|
|
rx_ring = &sc->sc_rx_ring[0];
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1);
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP);
|
|
|
|
CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1);
|
|
|
|
|
|
|
|
/* Match ET_RX_RING0_POS */
|
|
|
|
rx_ring->rr_index = 0;
|
|
|
|
rx_ring->rr_wrap = 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RX intr moderation
|
|
|
|
*/
|
|
|
|
CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts);
|
|
|
|
CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay);
|
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_init_txdma(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
|
|
|
|
struct et_txstatus_data *txsd = &sc->sc_tx_status;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = et_stop_txdma(sc);
|
|
|
|
if (error) {
|
|
|
|
if_printf(sc->ifp, "can't init TX DMA engine\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install TX descriptor ring
|
|
|
|
*/
|
|
|
|
CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install TX status
|
|
|
|
*/
|
|
|
|
CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr));
|
|
|
|
CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr));
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_TX_READY_POS, 0);
|
|
|
|
|
|
|
|
/* Match ET_TX_READY_POS */
|
|
|
|
tx_ring->tr_ready_index = 0;
|
|
|
|
tx_ring->tr_ready_wrap = 0;
|
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_init_mac(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
const uint8_t *eaddr = IF_LLADDR(ifp);
|
|
|
|
uint32_t val;
|
|
|
|
|
|
|
|
/* Reset MAC */
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1,
|
|
|
|
ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC |
|
|
|
|
ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC |
|
|
|
|
ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup inter packet gap
|
|
|
|
*/
|
2009-11-19 20:57:35 +00:00
|
|
|
val = (56 << ET_IPG_NONB2B_1_SHIFT) |
|
|
|
|
(88 << ET_IPG_NONB2B_2_SHIFT) |
|
|
|
|
(80 << ET_IPG_MINIFG_SHIFT) |
|
|
|
|
(96 << ET_IPG_B2B_SHIFT);
|
2008-06-20 19:28:33 +00:00
|
|
|
CSR_WRITE_4(sc, ET_IPG, val);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup half duplex mode
|
|
|
|
*/
|
2009-11-19 20:57:35 +00:00
|
|
|
val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) |
|
|
|
|
(15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) |
|
|
|
|
(55 << ET_MAC_HDX_COLLWIN_SHIFT) |
|
|
|
|
ET_MAC_HDX_EXC_DEFER;
|
2008-06-20 19:28:33 +00:00
|
|
|
CSR_WRITE_4(sc, ET_MAC_HDX, val);
|
|
|
|
|
|
|
|
/* Clear MAC control */
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CTRL, 0);
|
|
|
|
|
|
|
|
/* Reset MII */
|
|
|
|
CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set MAC address
|
|
|
|
*/
|
|
|
|
val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24);
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_ADDR1, val);
|
|
|
|
val = (eaddr[0] << 16) | (eaddr[1] << 24);
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_ADDR2, val);
|
|
|
|
|
|
|
|
/* Set max frame length */
|
|
|
|
CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu));
|
|
|
|
|
|
|
|
/* Bring MAC out of reset state */
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_init_rxmac(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
const uint8_t *eaddr = IF_LLADDR(ifp);
|
|
|
|
uint32_t val;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Disable RX MAC and WOL */
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear all WOL related registers
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 3; ++i)
|
|
|
|
CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0);
|
|
|
|
for (i = 0; i < 20; ++i)
|
|
|
|
CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set WOL source address. XXX is this necessary?
|
|
|
|
*/
|
|
|
|
val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5];
|
|
|
|
CSR_WRITE_4(sc, ET_WOL_SA_LO, val);
|
|
|
|
val = (eaddr[0] << 8) | eaddr[1];
|
|
|
|
CSR_WRITE_4(sc, ET_WOL_SA_HI, val);
|
|
|
|
|
|
|
|
/* Clear packet filters */
|
|
|
|
CSR_WRITE_4(sc, ET_PKTFILT, 0);
|
|
|
|
|
|
|
|
/* No ucast filtering */
|
|
|
|
CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0);
|
|
|
|
CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0);
|
|
|
|
CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0);
|
|
|
|
|
|
|
|
if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) {
|
|
|
|
/*
|
|
|
|
* In order to transmit jumbo packets greater than
|
|
|
|
* ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between
|
|
|
|
* RX MAC and RX DMA needs to be reduced in size to
|
|
|
|
* (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In
|
|
|
|
* order to implement this, we must use "cut through"
|
|
|
|
* mode in the RX MAC, which chops packets down into
|
|
|
|
* segments. In this case we selected 256 bytes,
|
|
|
|
* since this is the size of the PCI-Express TLP's
|
|
|
|
* that the ET1310 uses.
|
|
|
|
*/
|
2009-11-19 20:57:35 +00:00
|
|
|
val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) |
|
2008-06-20 19:28:33 +00:00
|
|
|
ET_RXMAC_MC_SEGSZ_ENABLE;
|
|
|
|
} else {
|
|
|
|
val = 0;
|
|
|
|
}
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0);
|
|
|
|
|
|
|
|
/* Initialize RX MAC management register */
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_MGT, 0);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0);
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_MGT,
|
|
|
|
ET_RXMAC_MGT_PASS_ECRC |
|
|
|
|
ET_RXMAC_MGT_PASS_ELEN |
|
|
|
|
ET_RXMAC_MGT_PASS_ETRUNC |
|
|
|
|
ET_RXMAC_MGT_CHECK_PKT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure runt filtering (may not work on certain chip generation)
|
|
|
|
*/
|
2009-11-19 20:57:35 +00:00
|
|
|
val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) &
|
|
|
|
ET_PKTFILT_MINLEN_MASK;
|
|
|
|
val |= ET_PKTFILT_FRAG;
|
2008-06-20 19:28:33 +00:00
|
|
|
CSR_WRITE_4(sc, ET_PKTFILT, val);
|
|
|
|
|
|
|
|
/* Enable RX MAC but leave WOL disabled */
|
|
|
|
CSR_WRITE_4(sc, ET_RXMAC_CTRL,
|
|
|
|
ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup multicast hash and allmulti/promisc mode
|
|
|
|
*/
|
|
|
|
et_setmulti(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_init_txmac(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
/* Disable TX MAC and FC(?) */
|
|
|
|
CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE);
|
|
|
|
|
|
|
|
/* No flow control yet */
|
|
|
|
CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0);
|
|
|
|
|
|
|
|
/* Enable TX MAC but leave FC(?) diabled */
|
|
|
|
CSR_WRITE_4(sc, ET_TXMAC_CTRL,
|
|
|
|
ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_start_rxdma(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
uint32_t val = 0;
|
|
|
|
|
2009-11-19 20:57:35 +00:00
|
|
|
val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) |
|
2008-06-20 19:28:33 +00:00
|
|
|
ET_RXDMA_CTRL_RING0_ENABLE;
|
2009-11-19 20:57:35 +00:00
|
|
|
val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) |
|
2008-06-20 19:28:33 +00:00
|
|
|
ET_RXDMA_CTRL_RING1_ENABLE;
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_RXDMA_CTRL, val);
|
|
|
|
|
|
|
|
DELAY(5);
|
|
|
|
|
|
|
|
if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) {
|
|
|
|
if_printf(sc->ifp, "can't start RX DMA engine\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (ETIMEDOUT);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_start_txdma(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_enable_txrx(struct et_softc *sc, int media_upd)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
uint32_t val;
|
|
|
|
int i, error;
|
|
|
|
|
|
|
|
val = CSR_READ_4(sc, ET_MAC_CFG1);
|
|
|
|
val |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN;
|
|
|
|
val &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW |
|
|
|
|
ET_MAC_CFG1_LOOPBACK);
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG1, val);
|
|
|
|
|
|
|
|
if (media_upd)
|
|
|
|
et_ifmedia_upd_locked(ifp);
|
|
|
|
else
|
|
|
|
et_setmedia(sc);
|
|
|
|
|
|
|
|
#define NRETRY 50
|
|
|
|
|
|
|
|
for (i = 0; i < NRETRY; ++i) {
|
|
|
|
val = CSR_READ_4(sc, ET_MAC_CFG1);
|
|
|
|
if ((val & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) ==
|
|
|
|
(ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN))
|
|
|
|
break;
|
|
|
|
|
|
|
|
DELAY(100);
|
|
|
|
}
|
|
|
|
if (i == NRETRY) {
|
|
|
|
if_printf(ifp, "can't enable RX/TX\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
sc->sc_flags |= ET_FLAG_TXRX_ENABLED;
|
|
|
|
|
|
|
|
#undef NRETRY
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start TX/RX DMA engine
|
|
|
|
*/
|
|
|
|
error = et_start_rxdma(sc);
|
|
|
|
if (error)
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
error = et_start_txdma(sc);
|
|
|
|
if (error)
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
2009-11-19 21:53:21 +00:00
|
|
|
return (0);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_rxeof(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct et_rxstatus_data *rxsd;
|
|
|
|
struct et_rxstat_ring *rxst_ring;
|
2009-11-20 20:18:53 +00:00
|
|
|
uint32_t rxs_stat_ring, rxst_info2;
|
2008-06-20 19:28:33 +00:00
|
|
|
int rxst_wrap, rxst_index;
|
|
|
|
|
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
ifp = sc->ifp;
|
|
|
|
rxsd = &sc->sc_rx_status;
|
|
|
|
rxst_ring = &sc->sc_rxstat_ring;
|
|
|
|
|
|
|
|
if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
|
|
|
bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
|
|
|
|
2009-11-20 20:18:53 +00:00
|
|
|
rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring);
|
2008-06-20 19:28:33 +00:00
|
|
|
rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0;
|
2009-11-19 20:57:35 +00:00
|
|
|
rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >>
|
|
|
|
ET_RXS_STATRING_INDEX_SHIFT;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
while (rxst_index != rxst_ring->rsr_index ||
|
|
|
|
rxst_wrap != rxst_ring->rsr_wrap) {
|
|
|
|
struct et_rxbuf_data *rbd;
|
|
|
|
struct et_rxdesc_ring *rx_ring;
|
|
|
|
struct et_rxstat *st;
|
|
|
|
struct mbuf *m;
|
|
|
|
int buflen, buf_idx, ring_idx;
|
|
|
|
uint32_t rxstat_pos, rxring_pos;
|
|
|
|
|
|
|
|
MPASS(rxst_ring->rsr_index < ET_RX_NSTAT);
|
|
|
|
st = &rxst_ring->rsr_stat[rxst_ring->rsr_index];
|
2009-11-20 20:18:53 +00:00
|
|
|
rxst_info2 = le32toh(st->rxst_info2);
|
|
|
|
buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >>
|
2009-11-19 20:57:35 +00:00
|
|
|
ET_RXST_INFO2_LEN_SHIFT;
|
2009-11-20 20:18:53 +00:00
|
|
|
buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >>
|
2009-11-19 20:57:35 +00:00
|
|
|
ET_RXST_INFO2_BUFIDX_SHIFT;
|
2009-11-20 20:18:53 +00:00
|
|
|
ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >>
|
2009-11-19 20:57:35 +00:00
|
|
|
ET_RXST_INFO2_RINGIDX_SHIFT;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
if (++rxst_ring->rsr_index == ET_RX_NSTAT) {
|
|
|
|
rxst_ring->rsr_index = 0;
|
|
|
|
rxst_ring->rsr_wrap ^= 1;
|
|
|
|
}
|
2009-11-19 20:57:35 +00:00
|
|
|
rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK;
|
2008-06-20 19:28:33 +00:00
|
|
|
if (rxst_ring->rsr_wrap)
|
|
|
|
rxstat_pos |= ET_RXSTAT_POS_WRAP;
|
|
|
|
CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos);
|
|
|
|
|
|
|
|
if (ring_idx >= ET_RX_NRING) {
|
|
|
|
ifp->if_ierrors++;
|
|
|
|
if_printf(ifp, "invalid ring index %d\n", ring_idx);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (buf_idx >= ET_RX_NDESC) {
|
|
|
|
ifp->if_ierrors++;
|
|
|
|
if_printf(ifp, "invalid buf index %d\n", buf_idx);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
rbd = &sc->sc_rx_data[ring_idx];
|
|
|
|
m = rbd->rbd_buf[buf_idx].rb_mbuf;
|
|
|
|
|
|
|
|
if (rbd->rbd_newbuf(rbd, buf_idx, 0) == 0) {
|
|
|
|
if (buflen < ETHER_CRC_LEN) {
|
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
|
|
|
ifp->if_ierrors++;
|
|
|
|
} else {
|
2009-11-20 20:25:21 +00:00
|
|
|
m->m_pkthdr.len = m->m_len =
|
|
|
|
buflen - ETHER_CRC_LEN;
|
2008-06-20 19:28:33 +00:00
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
ifp->if_ipackets++;
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
ifp->if_input(ifp, m);
|
|
|
|
ET_LOCK(sc);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ifp->if_ierrors++;
|
|
|
|
}
|
|
|
|
m = NULL; /* Catch invalid reference */
|
|
|
|
|
|
|
|
rx_ring = &sc->sc_rx_ring[ring_idx];
|
|
|
|
|
|
|
|
if (buf_idx != rx_ring->rr_index) {
|
|
|
|
if_printf(ifp, "WARNING!! ring %d, "
|
|
|
|
"buf_idx %d, rr_idx %d\n",
|
|
|
|
ring_idx, buf_idx, rx_ring->rr_index);
|
|
|
|
}
|
|
|
|
|
|
|
|
MPASS(rx_ring->rr_index < ET_RX_NDESC);
|
|
|
|
if (++rx_ring->rr_index == ET_RX_NDESC) {
|
|
|
|
rx_ring->rr_index = 0;
|
|
|
|
rx_ring->rr_wrap ^= 1;
|
|
|
|
}
|
2009-11-19 20:57:35 +00:00
|
|
|
rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK;
|
2008-06-20 19:28:33 +00:00
|
|
|
if (rx_ring->rr_wrap)
|
|
|
|
rxring_pos |= ET_RX_RING_POS_WRAP;
|
|
|
|
CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_encap(struct et_softc *sc, struct mbuf **m0)
|
|
|
|
{
|
|
|
|
struct mbuf *m = *m0;
|
|
|
|
bus_dma_segment_t segs[ET_NSEG_MAX];
|
|
|
|
struct et_dmamap_ctx ctx;
|
|
|
|
struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring;
|
|
|
|
struct et_txbuf_data *tbd = &sc->sc_tx_data;
|
|
|
|
struct et_txdesc *td;
|
|
|
|
bus_dmamap_t map;
|
|
|
|
int error, maxsegs, first_idx, last_idx, i;
|
2009-11-20 20:33:59 +00:00
|
|
|
uint32_t csum_flags, tx_ready_pos, last_td_ctrl2;
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
maxsegs = ET_TX_NDESC - tbd->tbd_used;
|
|
|
|
if (maxsegs > ET_NSEG_MAX)
|
|
|
|
maxsegs = ET_NSEG_MAX;
|
|
|
|
KASSERT(maxsegs >= ET_NSEG_SPARE,
|
|
|
|
("not enough spare TX desc (%d)\n", maxsegs));
|
|
|
|
|
|
|
|
MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
|
|
|
|
first_idx = tx_ring->tr_ready_index;
|
|
|
|
map = tbd->tbd_buf[first_idx].tb_dmap;
|
|
|
|
|
|
|
|
ctx.nsegs = maxsegs;
|
|
|
|
ctx.segs = segs;
|
|
|
|
error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
|
|
|
|
et_dma_buf_addr, &ctx, BUS_DMA_NOWAIT);
|
|
|
|
if (!error && ctx.nsegs == 0) {
|
|
|
|
bus_dmamap_unload(sc->sc_mbuf_dtag, map);
|
|
|
|
error = EFBIG;
|
|
|
|
}
|
|
|
|
if (error && error != EFBIG) {
|
|
|
|
if_printf(sc->ifp, "can't load TX mbuf, error %d\n",
|
|
|
|
error);
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
if (error) { /* error == EFBIG */
|
|
|
|
struct mbuf *m_new;
|
|
|
|
|
|
|
|
m_new = m_defrag(m, M_DONTWAIT);
|
|
|
|
if (m_new == NULL) {
|
|
|
|
if_printf(sc->ifp, "can't defrag TX mbuf\n");
|
|
|
|
error = ENOBUFS;
|
|
|
|
goto back;
|
|
|
|
} else {
|
|
|
|
*m0 = m = m_new;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx.nsegs = maxsegs;
|
|
|
|
ctx.segs = segs;
|
|
|
|
error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, map, m,
|
|
|
|
et_dma_buf_addr, &ctx,
|
|
|
|
BUS_DMA_NOWAIT);
|
|
|
|
if (error || ctx.nsegs == 0) {
|
|
|
|
if (ctx.nsegs == 0) {
|
|
|
|
bus_dmamap_unload(sc->sc_mbuf_dtag, map);
|
|
|
|
error = EFBIG;
|
|
|
|
}
|
|
|
|
if_printf(sc->ifp,
|
|
|
|
"can't load defraged TX mbuf\n");
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bus_dmamap_sync(sc->sc_mbuf_dtag, map, BUS_DMASYNC_PREWRITE);
|
|
|
|
|
|
|
|
last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG;
|
|
|
|
sc->sc_tx += ctx.nsegs;
|
|
|
|
if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) {
|
|
|
|
sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs;
|
|
|
|
last_td_ctrl2 |= ET_TDCTRL2_INTR;
|
|
|
|
}
|
|
|
|
|
2009-11-20 20:33:59 +00:00
|
|
|
csum_flags = 0;
|
|
|
|
if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) {
|
|
|
|
if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
|
|
|
|
csum_flags |= ET_TDCTRL2_CSUM_IP;
|
|
|
|
if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
|
|
|
|
csum_flags |= ET_TDCTRL2_CSUM_UDP;
|
|
|
|
else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
|
|
|
|
csum_flags |= ET_TDCTRL2_CSUM_TCP;
|
|
|
|
}
|
2008-06-20 19:28:33 +00:00
|
|
|
last_idx = -1;
|
|
|
|
for (i = 0; i < ctx.nsegs; ++i) {
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
idx = (first_idx + i) % ET_TX_NDESC;
|
|
|
|
td = &tx_ring->tr_desc[idx];
|
2009-11-20 20:18:53 +00:00
|
|
|
td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr));
|
|
|
|
td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr));
|
|
|
|
td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK);
|
2008-06-20 19:28:33 +00:00
|
|
|
if (i == ctx.nsegs - 1) { /* Last frag */
|
2009-11-20 20:33:59 +00:00
|
|
|
td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags);
|
2008-06-20 19:28:33 +00:00
|
|
|
last_idx = idx;
|
2009-11-20 20:33:59 +00:00
|
|
|
} else
|
|
|
|
td->td_ctrl2 = htole32(csum_flags);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
MPASS(tx_ring->tr_ready_index < ET_TX_NDESC);
|
|
|
|
if (++tx_ring->tr_ready_index == ET_TX_NDESC) {
|
|
|
|
tx_ring->tr_ready_index = 0;
|
|
|
|
tx_ring->tr_ready_wrap ^= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
td = &tx_ring->tr_desc[first_idx];
|
2009-11-20 20:18:53 +00:00
|
|
|
td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG); /* First frag */
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
MPASS(last_idx >= 0);
|
|
|
|
tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap;
|
|
|
|
tbd->tbd_buf[last_idx].tb_dmap = map;
|
|
|
|
tbd->tbd_buf[last_idx].tb_mbuf = m;
|
|
|
|
|
|
|
|
tbd->tbd_used += ctx.nsegs;
|
|
|
|
MPASS(tbd->tbd_used <= ET_TX_NDESC);
|
|
|
|
|
|
|
|
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
|
2009-11-19 20:57:35 +00:00
|
|
|
tx_ready_pos = tx_ring->tr_ready_index & ET_TX_READY_POS_INDEX_MASK;
|
2008-06-20 19:28:33 +00:00
|
|
|
if (tx_ring->tr_ready_wrap)
|
|
|
|
tx_ready_pos |= ET_TX_READY_POS_WRAP;
|
|
|
|
CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos);
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
back:
|
|
|
|
if (error) {
|
|
|
|
m_freem(m);
|
|
|
|
*m0 = NULL;
|
|
|
|
}
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_txeof(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct et_txdesc_ring *tx_ring;
|
|
|
|
struct et_txbuf_data *tbd;
|
|
|
|
uint32_t tx_done;
|
|
|
|
int end, wrap;
|
|
|
|
|
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
ifp = sc->ifp;
|
|
|
|
tx_ring = &sc->sc_tx_ring;
|
|
|
|
tbd = &sc->sc_tx_data;
|
|
|
|
|
|
|
|
if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (tbd->tbd_used == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tx_done = CSR_READ_4(sc, ET_TX_DONE_POS);
|
2009-11-19 20:57:35 +00:00
|
|
|
end = tx_done & ET_TX_DONE_POS_INDEX_MASK;
|
2008-06-20 19:28:33 +00:00
|
|
|
wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0;
|
|
|
|
|
|
|
|
while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) {
|
|
|
|
struct et_txbuf *tb;
|
|
|
|
|
|
|
|
MPASS(tbd->tbd_start_index < ET_TX_NDESC);
|
|
|
|
tb = &tbd->tbd_buf[tbd->tbd_start_index];
|
|
|
|
|
|
|
|
bzero(&tx_ring->tr_desc[tbd->tbd_start_index],
|
|
|
|
sizeof(struct et_txdesc));
|
|
|
|
bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
|
|
|
|
if (tb->tb_mbuf != NULL) {
|
|
|
|
bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap);
|
|
|
|
m_freem(tb->tb_mbuf);
|
|
|
|
tb->tb_mbuf = NULL;
|
|
|
|
ifp->if_opackets++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (++tbd->tbd_start_index == ET_TX_NDESC) {
|
|
|
|
tbd->tbd_start_index = 0;
|
|
|
|
tbd->tbd_start_wrap ^= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
MPASS(tbd->tbd_used > 0);
|
|
|
|
tbd->tbd_used--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tbd->tbd_used == 0)
|
|
|
|
sc->watchdog_timer = 0;
|
|
|
|
if (tbd->tbd_used + ET_NSEG_SPARE <= ET_TX_NDESC)
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
|
|
|
|
|
|
|
et_start_locked(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_tick(void *xsc)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = xsc;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct mii_data *mii;
|
|
|
|
|
|
|
|
ET_LOCK_ASSERT(sc);
|
|
|
|
ifp = sc->ifp;
|
|
|
|
mii = device_get_softc(sc->sc_miibus);
|
|
|
|
|
|
|
|
mii_tick(mii);
|
|
|
|
if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0 &&
|
|
|
|
(mii->mii_media_status & IFM_ACTIVE) &&
|
|
|
|
IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
|
|
|
|
if_printf(ifp, "Link up, enable TX/RX\n");
|
|
|
|
if (et_enable_txrx(sc, 0) == 0)
|
|
|
|
et_start_locked(ifp);
|
|
|
|
}
|
|
|
|
et_watchdog(sc);
|
|
|
|
callout_reset(&sc->sc_tick, hz, et_tick, sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx, int init)
|
|
|
|
{
|
2009-11-19 21:53:21 +00:00
|
|
|
return (et_newbuf(rbd, buf_idx, init, MCLBYTES));
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx, int init)
|
|
|
|
{
|
2009-11-19 21:53:21 +00:00
|
|
|
return (et_newbuf(rbd, buf_idx, init, MHLEN));
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_newbuf(struct et_rxbuf_data *rbd, int buf_idx, int init, int len0)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = rbd->rbd_softc;
|
|
|
|
struct et_rxbuf *rb;
|
|
|
|
struct mbuf *m;
|
|
|
|
struct et_dmamap_ctx ctx;
|
|
|
|
bus_dma_segment_t seg;
|
|
|
|
bus_dmamap_t dmap;
|
|
|
|
int error, len;
|
|
|
|
|
|
|
|
MPASS(buf_idx < ET_RX_NDESC);
|
|
|
|
rb = &rbd->rbd_buf[buf_idx];
|
|
|
|
|
|
|
|
m = m_getl(len0, /* init ? M_WAIT :*/ M_DONTWAIT, MT_DATA, M_PKTHDR, &len);
|
|
|
|
if (m == NULL) {
|
|
|
|
error = ENOBUFS;
|
|
|
|
|
|
|
|
if (init) {
|
|
|
|
if_printf(sc->ifp,
|
|
|
|
"m_getl failed, size %d\n", len0);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
} else {
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m->m_len = m->m_pkthdr.len = len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try load RX mbuf into temporary DMA tag
|
|
|
|
*/
|
|
|
|
ctx.nsegs = 1;
|
|
|
|
ctx.segs = &seg;
|
|
|
|
error = bus_dmamap_load_mbuf(sc->sc_mbuf_dtag, sc->sc_mbuf_tmp_dmap, m,
|
|
|
|
et_dma_buf_addr, &ctx,
|
|
|
|
init ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
|
|
|
|
if (error || ctx.nsegs == 0) {
|
|
|
|
if (!error) {
|
|
|
|
bus_dmamap_unload(sc->sc_mbuf_dtag,
|
|
|
|
sc->sc_mbuf_tmp_dmap);
|
|
|
|
error = EFBIG;
|
|
|
|
if_printf(sc->ifp, "too many segments?!\n");
|
|
|
|
}
|
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
|
|
|
|
|
|
|
if (init) {
|
|
|
|
if_printf(sc->ifp, "can't load RX mbuf\n");
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
} else {
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!init) {
|
|
|
|
bus_dmamap_sync(sc->sc_mbuf_dtag, rb->rb_dmap,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
|
|
|
bus_dmamap_unload(sc->sc_mbuf_dtag, rb->rb_dmap);
|
|
|
|
}
|
|
|
|
rb->rb_mbuf = m;
|
|
|
|
rb->rb_paddr = seg.ds_addr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Swap RX buf's DMA map with the loaded temporary one
|
|
|
|
*/
|
|
|
|
dmap = rb->rb_dmap;
|
|
|
|
rb->rb_dmap = sc->sc_mbuf_tmp_dmap;
|
|
|
|
sc->sc_mbuf_tmp_dmap = dmap;
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
back:
|
|
|
|
et_setup_rxdesc(rbd, buf_idx, rb->rb_paddr);
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create sysctl tree
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
et_add_sysctls(struct et_softc * sc)
|
|
|
|
{
|
|
|
|
struct sysctl_ctx_list *ctx;
|
|
|
|
struct sysctl_oid_list *children;
|
|
|
|
|
|
|
|
ctx = device_get_sysctl_ctx(sc->dev);
|
|
|
|
children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
|
|
|
|
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I",
|
|
|
|
"RX IM, # packets per RX interrupt");
|
|
|
|
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay",
|
|
|
|
CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I",
|
|
|
|
"RX IM, RX interrupt delay (x10 usec)");
|
|
|
|
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs",
|
|
|
|
CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0,
|
|
|
|
"TX IM, # segments per TX interrupt");
|
|
|
|
SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer",
|
|
|
|
CTLFLAG_RW, &sc->sc_timer, 0, "TX timer");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = arg1;
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
int error = 0, v;
|
|
|
|
|
|
|
|
v = sc->sc_rx_intr_npkts;
|
|
|
|
error = sysctl_handle_int(oidp, &v, 0, req);
|
|
|
|
if (error || req->newptr == NULL)
|
|
|
|
goto back;
|
|
|
|
if (v <= 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sc->sc_rx_intr_npkts != v) {
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
|
|
|
CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v);
|
|
|
|
sc->sc_rx_intr_npkts = v;
|
|
|
|
}
|
|
|
|
back:
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct et_softc *sc = arg1;
|
|
|
|
struct ifnet *ifp = sc->ifp;
|
|
|
|
int error = 0, v;
|
|
|
|
|
|
|
|
v = sc->sc_rx_intr_delay;
|
|
|
|
error = sysctl_handle_int(oidp, &v, 0, req);
|
|
|
|
if (error || req->newptr == NULL)
|
|
|
|
goto back;
|
|
|
|
if (v <= 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto back;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sc->sc_rx_intr_delay != v) {
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
|
|
|
CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v);
|
|
|
|
sc->sc_rx_intr_delay = v;
|
|
|
|
}
|
|
|
|
back:
|
2009-11-19 21:53:21 +00:00
|
|
|
return (error);
|
2008-06-20 19:28:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_setmedia(struct et_softc *sc)
|
|
|
|
{
|
|
|
|
struct mii_data *mii = device_get_softc(sc->sc_miibus);
|
|
|
|
uint32_t cfg2, ctrl;
|
|
|
|
|
|
|
|
cfg2 = CSR_READ_4(sc, ET_MAC_CFG2);
|
|
|
|
cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII |
|
|
|
|
ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM);
|
|
|
|
cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC |
|
2009-11-19 20:57:35 +00:00
|
|
|
((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) &
|
|
|
|
ET_MAC_CFG2_PREAMBLE_LEN_MASK);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
ctrl = CSR_READ_4(sc, ET_MAC_CTRL);
|
|
|
|
ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII);
|
|
|
|
|
|
|
|
if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) {
|
|
|
|
cfg2 |= ET_MAC_CFG2_MODE_GMII;
|
|
|
|
} else {
|
|
|
|
cfg2 |= ET_MAC_CFG2_MODE_MII;
|
|
|
|
ctrl |= ET_MAC_CTRL_MODE_MII;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
|
|
|
|
cfg2 |= ET_MAC_CFG2_FDX;
|
|
|
|
else
|
|
|
|
ctrl |= ET_MAC_CTRL_GHDX;
|
|
|
|
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl);
|
|
|
|
CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
et_setup_rxdesc(struct et_rxbuf_data *rbd, int buf_idx, bus_addr_t paddr)
|
|
|
|
{
|
|
|
|
struct et_rxdesc_ring *rx_ring = rbd->rbd_ring;
|
|
|
|
struct et_rxdesc *desc;
|
|
|
|
|
|
|
|
MPASS(buf_idx < ET_RX_NDESC);
|
|
|
|
desc = &rx_ring->rr_desc[buf_idx];
|
|
|
|
|
2009-11-20 20:18:53 +00:00
|
|
|
desc->rd_addr_hi = htole32(ET_ADDR_HI(paddr));
|
|
|
|
desc->rd_addr_lo = htole32(ET_ADDR_LO(paddr));
|
|
|
|
desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK);
|
2008-06-20 19:28:33 +00:00
|
|
|
|
|
|
|
bus_dmamap_sync(rx_ring->rr_dtag, rx_ring->rr_dmap,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
}
|
2011-12-05 22:22:39 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
et_suspend(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc;
|
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
|
|
|
ET_LOCK(sc);
|
|
|
|
if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
|
|
|
|
et_stop(sc);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
et_resume(device_t dev)
|
|
|
|
{
|
|
|
|
struct et_softc *sc;
|
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
|
|
|
ET_LOCK(sc);
|
|
|
|
if ((sc->ifp->if_flags & IFF_UP) != 0)
|
|
|
|
et_init_locked(sc);
|
|
|
|
ET_UNLOCK(sc);
|
|
|
|
return (0);
|
|
|
|
}
|