2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
1999-08-21 18:34:58 +00:00
|
|
|
* Copyright (c) 1997, 1998, 1999
|
|
|
|
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by Bill Paul.
|
|
|
|
* 4. Neither the name of the author nor the names of any co-contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
|
|
|
|
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
|
|
|
* THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-04-03 21:36:33 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2005-10-05 10:09:17 +00:00
|
|
|
#ifdef HAVE_KERNEL_OPTION_HEADERS
|
|
|
|
#include "opt_device_polling.h"
|
|
|
|
#endif
|
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2009-12-22 18:57:07 +00:00
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/endian.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
#include <sys/kernel.h>
|
2009-12-22 18:57:07 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
2004-05-30 20:00:41 +00:00
|
|
|
#include <sys/module.h>
|
2009-12-22 18:57:07 +00:00
|
|
|
#include <sys/rman.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
#include <sys/socket.h>
|
2009-12-22 18:57:07 +00:00
|
|
|
#include <sys/sockio.h>
|
2004-03-31 21:10:01 +00:00
|
|
|
#include <sys/sysctl.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
#include <net/bpf.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
#include <net/if.h>
|
2013-10-26 17:58:36 +00:00
|
|
|
#include <net/if_var.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
#include <net/if_arp.h>
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/if_dl.h>
|
|
|
|
#include <net/if_media.h>
|
2005-06-10 16:49:24 +00:00
|
|
|
#include <net/if_types.h>
|
2002-08-07 22:31:27 +00:00
|
|
|
#include <net/if_vlan_var.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <machine/resource.h>
|
|
|
|
|
|
|
|
#include <dev/mii/mii.h>
|
2011-11-01 16:13:59 +00:00
|
|
|
#include <dev/mii/mii_bitbang.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
#include <dev/mii/miivar.h>
|
|
|
|
|
2003-08-22 07:20:27 +00:00
|
|
|
#include <dev/pci/pcireg.h>
|
|
|
|
#include <dev/pci/pcivar.h>
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
#include <dev/ste/if_stereg.h>
|
|
|
|
|
2005-10-22 05:06:55 +00:00
|
|
|
/* "device miibus" required. See GENERIC if you get errors here. */
|
1999-08-21 18:34:58 +00:00
|
|
|
#include "miibus_if.h"
|
|
|
|
|
2003-04-15 06:37:30 +00:00
|
|
|
MODULE_DEPEND(ste, pci, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(ste, ether, 1, 1, 1);
|
2000-04-29 13:41:57 +00:00
|
|
|
MODULE_DEPEND(ste, miibus, 1, 1, 1);
|
|
|
|
|
2009-12-22 23:57:10 +00:00
|
|
|
/* Define to show Tx error status. */
|
|
|
|
#define STE_SHOW_TXERRORS
|
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
/*
|
|
|
|
* Various supported device vendors/types and their names.
|
|
|
|
*/
|
2012-11-05 19:16:27 +00:00
|
|
|
static const struct ste_type ste_devs[] = {
|
2007-03-10 03:10:34 +00:00
|
|
|
{ ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" },
|
|
|
|
{ ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" },
|
2002-12-23 21:50:47 +00:00
|
|
|
{ DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" },
|
1999-08-21 18:34:58 +00:00
|
|
|
{ 0, 0, NULL }
|
|
|
|
};
|
|
|
|
|
2009-12-21 20:42:23 +00:00
|
|
|
static int ste_attach(device_t);
|
|
|
|
static int ste_detach(device_t);
|
|
|
|
static int ste_probe(device_t);
|
2009-12-24 18:17:53 +00:00
|
|
|
static int ste_resume(device_t);
|
2009-12-21 20:42:23 +00:00
|
|
|
static int ste_shutdown(device_t);
|
2009-12-24 18:17:53 +00:00
|
|
|
static int ste_suspend(device_t);
|
2009-12-21 20:42:23 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
static int ste_dma_alloc(struct ste_softc *);
|
|
|
|
static void ste_dma_free(struct ste_softc *);
|
|
|
|
static void ste_dmamap_cb(void *, bus_dma_segment_t *, int, int);
|
2009-12-21 20:42:23 +00:00
|
|
|
static int ste_eeprom_wait(struct ste_softc *);
|
2009-12-22 18:57:07 +00:00
|
|
|
static int ste_encap(struct ste_softc *, struct mbuf **,
|
|
|
|
struct ste_chain *);
|
2009-12-21 20:42:23 +00:00
|
|
|
static int ste_ifmedia_upd(struct ifnet *);
|
|
|
|
static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *);
|
|
|
|
static void ste_init(void *);
|
|
|
|
static void ste_init_locked(struct ste_softc *);
|
|
|
|
static int ste_init_rx_list(struct ste_softc *);
|
|
|
|
static void ste_init_tx_list(struct ste_softc *);
|
|
|
|
static void ste_intr(void *);
|
|
|
|
static int ste_ioctl(struct ifnet *, u_long, caddr_t);
|
2011-11-01 16:13:59 +00:00
|
|
|
static uint32_t ste_mii_bitbang_read(device_t);
|
|
|
|
static void ste_mii_bitbang_write(device_t, uint32_t);
|
2009-12-21 20:42:23 +00:00
|
|
|
static int ste_miibus_readreg(device_t, int, int);
|
|
|
|
static void ste_miibus_statchg(device_t);
|
|
|
|
static int ste_miibus_writereg(device_t, int, int, int);
|
2009-12-22 18:57:07 +00:00
|
|
|
static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *);
|
2010-01-08 02:39:53 +00:00
|
|
|
static int ste_read_eeprom(struct ste_softc *, uint16_t *, int, int);
|
2009-12-21 20:42:23 +00:00
|
|
|
static void ste_reset(struct ste_softc *);
|
2009-12-22 23:57:10 +00:00
|
|
|
static void ste_restart_tx(struct ste_softc *);
|
2009-12-22 18:57:07 +00:00
|
|
|
static int ste_rxeof(struct ste_softc *, int);
|
2009-12-23 18:24:22 +00:00
|
|
|
static void ste_rxfilter(struct ste_softc *);
|
2009-12-24 18:17:53 +00:00
|
|
|
static void ste_setwol(struct ste_softc *);
|
2009-12-21 20:42:23 +00:00
|
|
|
static void ste_start(struct ifnet *);
|
|
|
|
static void ste_start_locked(struct ifnet *);
|
2009-12-23 19:18:07 +00:00
|
|
|
static void ste_stats_clear(struct ste_softc *);
|
2009-12-22 20:11:56 +00:00
|
|
|
static void ste_stats_update(struct ste_softc *);
|
2009-12-21 20:42:23 +00:00
|
|
|
static void ste_stop(struct ste_softc *);
|
2009-12-23 19:18:07 +00:00
|
|
|
static void ste_sysctl_node(struct ste_softc *);
|
2009-12-22 20:11:56 +00:00
|
|
|
static void ste_tick(void *);
|
2009-12-21 20:42:23 +00:00
|
|
|
static void ste_txeoc(struct ste_softc *);
|
|
|
|
static void ste_txeof(struct ste_softc *);
|
|
|
|
static void ste_wait(struct ste_softc *);
|
|
|
|
static void ste_watchdog(struct ste_softc *);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
/*
|
|
|
|
* MII bit-bang glue
|
|
|
|
*/
|
|
|
|
static const struct mii_bitbang_ops ste_mii_bitbang_ops = {
|
|
|
|
ste_mii_bitbang_read,
|
|
|
|
ste_mii_bitbang_write,
|
|
|
|
{
|
|
|
|
STE_PHYCTL_MDATA, /* MII_BIT_MDO */
|
|
|
|
STE_PHYCTL_MDATA, /* MII_BIT_MDI */
|
|
|
|
STE_PHYCTL_MCLK, /* MII_BIT_MDC */
|
|
|
|
STE_PHYCTL_MDIR, /* MII_BIT_DIR_HOST_PHY */
|
|
|
|
0, /* MII_BIT_DIR_PHY_HOST */
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
static device_method_t ste_methods[] = {
|
|
|
|
/* Device interface */
|
|
|
|
DEVMETHOD(device_probe, ste_probe),
|
|
|
|
DEVMETHOD(device_attach, ste_attach),
|
|
|
|
DEVMETHOD(device_detach, ste_detach),
|
|
|
|
DEVMETHOD(device_shutdown, ste_shutdown),
|
2009-12-24 18:17:53 +00:00
|
|
|
DEVMETHOD(device_suspend, ste_suspend),
|
|
|
|
DEVMETHOD(device_resume, ste_resume),
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
/* MII interface */
|
|
|
|
DEVMETHOD(miibus_readreg, ste_miibus_readreg),
|
|
|
|
DEVMETHOD(miibus_writereg, ste_miibus_writereg),
|
|
|
|
DEVMETHOD(miibus_statchg, ste_miibus_statchg),
|
|
|
|
|
2011-11-22 21:28:20 +00:00
|
|
|
DEVMETHOD_END
|
1999-08-21 18:34:58 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t ste_driver = {
|
1999-09-20 08:47:11 +00:00
|
|
|
"ste",
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_methods,
|
|
|
|
sizeof(struct ste_softc)
|
|
|
|
};
|
|
|
|
|
|
|
|
static devclass_t ste_devclass;
|
|
|
|
|
2003-04-15 06:37:30 +00:00
|
|
|
DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0);
|
1999-09-20 19:06:45 +00:00
|
|
|
DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
#define STE_SETBIT4(sc, reg, x) \
|
2002-11-09 12:55:07 +00:00
|
|
|
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x))
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
#define STE_CLRBIT4(sc, reg, x) \
|
2002-11-09 12:55:07 +00:00
|
|
|
CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x))
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
#define STE_SETBIT2(sc, reg, x) \
|
2002-11-09 12:55:07 +00:00
|
|
|
CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x))
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
#define STE_CLRBIT2(sc, reg, x) \
|
2002-11-09 12:55:07 +00:00
|
|
|
CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x))
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
#define STE_SETBIT1(sc, reg, x) \
|
2002-11-09 12:55:07 +00:00
|
|
|
CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x))
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
#define STE_CLRBIT1(sc, reg, x) \
|
2002-11-09 12:55:07 +00:00
|
|
|
CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x))
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
/*
|
2011-11-01 16:13:59 +00:00
|
|
|
* Read the MII serial port for the MII bit-bang module.
|
1999-08-21 18:34:58 +00:00
|
|
|
*/
|
2011-11-01 16:13:59 +00:00
|
|
|
static uint32_t
|
|
|
|
ste_mii_bitbang_read(device_t dev)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2011-11-01 16:13:59 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
uint32_t val;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
sc = device_get_softc(dev);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
val = CSR_READ_1(sc, STE_PHYCTL);
|
|
|
|
CSR_BARRIER(sc, STE_PHYCTL, 1,
|
|
|
|
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
return (val);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-11-01 16:13:59 +00:00
|
|
|
* Write the MII serial port for the MII bit-bang module.
|
1999-08-21 18:34:58 +00:00
|
|
|
*/
|
2011-11-01 16:13:59 +00:00
|
|
|
static void
|
|
|
|
ste_mii_bitbang_write(device_t dev, uint32_t val)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2011-11-01 16:13:59 +00:00
|
|
|
struct ste_softc *sc;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
sc = device_get_softc(dev);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
CSR_WRITE_1(sc, STE_PHYCTL, val);
|
|
|
|
CSR_BARRIER(sc, STE_PHYCTL, 1,
|
|
|
|
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_miibus_readreg(device_t dev, int phy, int reg)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
return (mii_bitbang_readreg(dev, &ste_mii_bitbang_ops, phy, reg));
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_miibus_writereg(device_t dev, int phy, int reg, int data)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
|
|
|
|
2011-11-01 16:13:59 +00:00
|
|
|
mii_bitbang_writereg(dev, &ste_mii_bitbang_ops, phy, reg, data);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_miibus_statchg(device_t dev)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
struct mii_data *mii;
|
2009-12-22 20:11:56 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
uint16_t cfg;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
2005-08-30 20:35:08 +00:00
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
mii = device_get_softc(sc->ste_miibus);
|
2009-12-22 20:11:56 +00:00
|
|
|
ifp = sc->ste_ifp;
|
|
|
|
if (mii == NULL || ifp == NULL ||
|
|
|
|
(ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
|
|
|
return;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 20:11:56 +00:00
|
|
|
sc->ste_flags &= ~STE_FLAG_LINK;
|
|
|
|
if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
|
|
|
|
(IFM_ACTIVE | IFM_AVALID)) {
|
|
|
|
switch (IFM_SUBTYPE(mii->mii_media_active)) {
|
|
|
|
case IFM_10_T:
|
|
|
|
case IFM_100_TX:
|
|
|
|
case IFM_100_FX:
|
|
|
|
case IFM_100_T4:
|
|
|
|
sc->ste_flags |= STE_FLAG_LINK;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Program MACs with resolved speed/duplex/flow-control. */
|
|
|
|
if ((sc->ste_flags & STE_FLAG_LINK) != 0) {
|
|
|
|
cfg = CSR_READ_2(sc, STE_MACCTL0);
|
|
|
|
cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX);
|
|
|
|
if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
|
|
|
|
/*
|
|
|
|
* ST201 data sheet says driver should enable receiving
|
|
|
|
* MAC control frames bit of receive mode register to
|
|
|
|
* receive flow-control frames but the register has no
|
|
|
|
* such bits. In addition the controller has no ability
|
|
|
|
* to send pause frames so it should be handled in
|
|
|
|
* driver. Implementing pause timer handling in driver
|
|
|
|
* layer is not trivial, so don't enable flow-control
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
cfg |= STE_MACCTL0_FULLDUPLEX;
|
|
|
|
}
|
|
|
|
CSR_WRITE_2(sc, STE_MACCTL0, cfg);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
}
|
2009-12-21 20:02:12 +00:00
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_ifmedia_upd(struct ifnet *ifp)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
2009-12-23 18:42:25 +00:00
|
|
|
struct mii_data *mii;
|
|
|
|
struct mii_softc *miisc;
|
|
|
|
int error;
|
2005-08-30 20:35:08 +00:00
|
|
|
|
|
|
|
sc = ifp->if_softc;
|
|
|
|
STE_LOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
mii = device_get_softc(sc->ste_miibus);
|
- Remove attempts to implement setting of BMCR_LOOP/MIIF_NOLOOP
(reporting IFM_LOOP based on BMCR_LOOP is left in place though as
it might provide useful for debugging). For most mii(4) drivers it
was unclear whether the PHYs driven by them actually support
loopback or not. Moreover, typically loopback mode also needs to
be activated on the MAC, which none of the Ethernet drivers using
mii(4) implements. Given that loopback media has no real use (and
obviously hardly had a chance to actually work) besides for driver
development (which just loopback mode should be sufficient for
though, i.e one doesn't necessary need support for loopback media)
support for it is just dropped as both NetBSD and OpenBSD already
did quite some time ago.
- Let mii_phy_add_media() also announce the support of IFM_NONE.
- Restructure the PHY entry points to use a structure of entry points
instead of discrete function pointers, and extend this to include
a "reset" entry point. Make sure any PHY-specific reset routine is
always used, and provide one for lxtphy(4) which disables MII
interrupts (as is done for a few other PHYs we have drivers for).
This includes changing NIC drivers which previously just called the
generic mii_phy_reset() to now actually call the PHY-specific reset
routine, which might be crucial in some cases. While at it, the
redundant checks in these NIC drivers for mii->mii_instance not being
zero before calling the reset routines were removed because as soon
as one PHY driver attaches mii->mii_instance is incremented and we
hardly can end up in their media change callbacks etc if no PHY driver
has attached as mii_attach() would have failed in that case and not
attach a miibus(4) instance.
Consequently, NIC drivers now no longer should call mii_phy_reset()
directly, so it was removed from EXPORT_SYMS.
- Add a mii_phy_dev_attach() as a companion helper to mii_phy_dev_probe().
The purpose of that function is to perform the common steps to attach
a PHY driver instance and to hook it up to the miibus(4) instance and to
optionally also handle the probing, addition and initialization of the
supported media. So all a PHY driver without any special requirements
has to do in its bus attach method is to call mii_phy_dev_attach()
along with PHY-specific MIIF_* flags, a pointer to its PHY functions
and the add_media set to one. All PHY drivers were updated to take
advantage of mii_phy_dev_attach() as appropriate. Along with these
changes the capability mask was added to the mii_softc structure so
PHY drivers taking advantage of mii_phy_dev_attach() but still
handling media on their own do not need to fiddle with the MII attach
arguments anyway.
- Keep track of the PHY offset in the mii_softc structure. This is done
for compatibility with NetBSD/OpenBSD.
- Keep track of the PHY's OUI, model and revision in the mii_softc
structure. Several PHY drivers require this information also after
attaching and previously had to wrap their own softc around mii_softc.
NetBSD/OpenBSD also keep track of the model and revision on their
mii_softc structure. All PHY drivers were updated to take advantage
as appropriate.
- Convert the mebers of the MII data structure to unsigned where
appropriate. This is partly inspired by NetBSD/OpenBSD.
- According to IEEE 802.3-2002 the bits actually have to be reversed
when mapping an OUI to the MII ID registers. All PHY drivers and
miidevs where changed as necessary. Actually this now again allows to
largely share miidevs with NetBSD, which fixed this problem already
9 years ago. Consequently miidevs was synced as far as possible.
- Add MIIF_NOMANPAUSE and mii_phy_flowstatus() calls to drivers that
weren't explicitly converted to support flow control before. It's
unclear whether flow control actually works with these but typically
it should and their net behavior should be more correct with these
changes in place than without if the MAC driver sets MIIF_DOPAUSE.
Obtained from: NetBSD (partially)
Reviewed by: yongari (earlier version), silence on arch@ and net@
2011-05-03 19:51:29 +00:00
|
|
|
LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
|
|
|
|
PHY_RESET(miisc);
|
2009-12-23 18:42:25 +00:00
|
|
|
error = mii_mediachg(mii);
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
|
|
|
|
return (error);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
struct mii_data *mii;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = ifp->if_softc;
|
|
|
|
mii = device_get_softc(sc->ste_miibus);
|
|
|
|
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK(sc);
|
2009-12-23 18:28:47 +00:00
|
|
|
if ((ifp->if_flags & IFF_UP) == 0) {
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
return;
|
|
|
|
}
|
1999-08-21 18:34:58 +00:00
|
|
|
mii_pollstat(mii);
|
|
|
|
ifmr->ifm_active = mii->mii_media_active;
|
|
|
|
ifmr->ifm_status = mii->mii_media_status;
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_UNLOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_wait(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
int i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
for (i = 0; i < STE_TIMEOUT; i++) {
|
|
|
|
if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG))
|
|
|
|
break;
|
2009-12-22 19:00:18 +00:00
|
|
|
DELAY(1);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (i == STE_TIMEOUT)
|
2006-09-15 10:40:54 +00:00
|
|
|
device_printf(sc->ste_dev, "command never completed!\n");
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The EEPROM is slow: give it time to come ready after issuing
|
|
|
|
* it a command.
|
|
|
|
*/
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_eeprom_wait(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
int i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
DELAY(1000);
|
|
|
|
|
|
|
|
for (i = 0; i < 100; i++) {
|
|
|
|
if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY)
|
|
|
|
DELAY(1000);
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == 100) {
|
2006-09-15 10:40:54 +00:00
|
|
|
device_printf(sc->ste_dev, "eeprom failed to come ready\n");
|
2009-12-21 20:18:01 +00:00
|
|
|
return (1);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a sequence of words from the EEPROM. Note that ethernet address
|
|
|
|
* data is stored in the EEPROM in network byte order.
|
|
|
|
*/
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2010-01-08 02:39:53 +00:00
|
|
|
ste_read_eeprom(struct ste_softc *sc, uint16_t *dest, int off, int cnt)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
int err = 0, i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
if (ste_eeprom_wait(sc))
|
2009-12-21 20:18:01 +00:00
|
|
|
return (1);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i));
|
|
|
|
err = ste_eeprom_wait(sc);
|
|
|
|
if (err)
|
|
|
|
break;
|
2010-01-08 02:39:53 +00:00
|
|
|
*dest = le16toh(CSR_READ_2(sc, STE_EEPROM_DATA));
|
|
|
|
dest++;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (err ? 1 : 0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-23 18:24:22 +00:00
|
|
|
ste_rxfilter(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
uint32_t hashes[2] = { 0, 0 };
|
2009-12-23 18:24:22 +00:00
|
|
|
uint8_t rxcfg;
|
2009-12-21 20:18:01 +00:00
|
|
|
int h;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-23 18:24:22 +00:00
|
|
|
STE_LOCK_ASSERT(sc);
|
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
2009-12-23 18:24:22 +00:00
|
|
|
rxcfg = CSR_READ_1(sc, STE_RX_MODE);
|
|
|
|
rxcfg |= STE_RXMODE_UNICAST;
|
|
|
|
rxcfg &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_MULTIHASH |
|
|
|
|
STE_RXMODE_BROADCAST | STE_RXMODE_PROMISC);
|
|
|
|
if (ifp->if_flags & IFF_BROADCAST)
|
|
|
|
rxcfg |= STE_RXMODE_BROADCAST;
|
|
|
|
if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
|
|
|
|
if ((ifp->if_flags & IFF_ALLMULTI) != 0)
|
|
|
|
rxcfg |= STE_RXMODE_ALLMULTI;
|
|
|
|
if ((ifp->if_flags & IFF_PROMISC) != 0)
|
|
|
|
rxcfg |= STE_RXMODE_PROMISC;
|
|
|
|
goto chipit;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-12-23 18:24:22 +00:00
|
|
|
rxcfg |= STE_RXMODE_MULTIHASH;
|
|
|
|
/* Now program new ones. */
|
2009-06-26 11:45:06 +00:00
|
|
|
if_maddr_rlock(ifp);
|
2001-02-06 10:12:15 +00:00
|
|
|
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
1999-08-21 18:34:58 +00:00
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
2004-06-09 14:34:04 +00:00
|
|
|
h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
|
|
|
|
ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F;
|
1999-08-21 18:34:58 +00:00
|
|
|
if (h < 32)
|
|
|
|
hashes[0] |= (1 << h);
|
|
|
|
else
|
|
|
|
hashes[1] |= (1 << (h - 32));
|
|
|
|
}
|
2009-06-26 11:45:06 +00:00
|
|
|
if_maddr_runlock(ifp);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-23 18:24:22 +00:00
|
|
|
chipit:
|
2001-08-23 18:22:55 +00:00
|
|
|
CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF);
|
|
|
|
CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF);
|
|
|
|
CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF);
|
|
|
|
CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF);
|
2009-12-23 18:24:22 +00:00
|
|
|
CSR_WRITE_1(sc, STE_RX_MODE, rxcfg);
|
|
|
|
CSR_READ_1(sc, STE_RX_MODE);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2004-03-31 20:39:20 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
2005-08-30 20:35:08 +00:00
|
|
|
static poll_handler_t ste_poll, ste_poll_locked;
|
2004-03-31 20:39:20 +00:00
|
|
|
|
2009-05-30 15:14:44 +00:00
|
|
|
static int
|
2004-03-31 20:39:20 +00:00
|
|
|
ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
|
|
|
|
{
|
|
|
|
struct ste_softc *sc = ifp->if_softc;
|
2009-05-30 15:14:44 +00:00
|
|
|
int rx_npkts = 0;
|
2004-03-31 20:39:20 +00:00
|
|
|
|
|
|
|
STE_LOCK(sc);
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
2009-05-30 15:14:44 +00:00
|
|
|
rx_npkts = ste_poll_locked(ifp, cmd, count);
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_UNLOCK(sc);
|
2009-05-30 15:14:44 +00:00
|
|
|
return (rx_npkts);
|
2005-08-30 20:35:08 +00:00
|
|
|
}
|
|
|
|
|
2009-05-30 15:14:44 +00:00
|
|
|
static int
|
2005-08-30 20:35:08 +00:00
|
|
|
ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count)
|
|
|
|
{
|
|
|
|
struct ste_softc *sc = ifp->if_softc;
|
2009-05-30 15:14:44 +00:00
|
|
|
int rx_npkts;
|
2005-08-30 20:35:08 +00:00
|
|
|
|
|
|
|
STE_LOCK_ASSERT(sc);
|
2004-03-31 20:39:20 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
rx_npkts = ste_rxeof(sc, count);
|
2004-03-31 20:39:20 +00:00
|
|
|
ste_txeof(sc);
|
2009-12-22 23:57:10 +00:00
|
|
|
ste_txeoc(sc);
|
2005-07-08 13:05:59 +00:00
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
2005-08-30 20:35:08 +00:00
|
|
|
ste_start_locked(ifp);
|
2004-03-31 20:39:20 +00:00
|
|
|
|
2004-04-06 11:04:54 +00:00
|
|
|
if (cmd == POLL_AND_CHECK_STATUS) {
|
2009-12-21 20:00:27 +00:00
|
|
|
uint16_t status;
|
2004-03-31 20:39:20 +00:00
|
|
|
|
|
|
|
status = CSR_READ_2(sc, STE_ISR_ACK);
|
|
|
|
|
2009-12-22 20:11:56 +00:00
|
|
|
if (status & STE_ISR_STATS_OFLOW)
|
2004-03-31 20:39:20 +00:00
|
|
|
ste_stats_update(sc);
|
|
|
|
|
2009-12-23 17:46:11 +00:00
|
|
|
if (status & STE_ISR_HOSTERR) {
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
2005-08-30 20:35:08 +00:00
|
|
|
ste_init_locked(sc);
|
2009-12-23 17:46:11 +00:00
|
|
|
}
|
2004-03-31 20:39:20 +00:00
|
|
|
}
|
2009-05-30 15:14:44 +00:00
|
|
|
return (rx_npkts);
|
2004-03-31 20:39:20 +00:00
|
|
|
}
|
|
|
|
#endif /* DEVICE_POLLING */
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_intr(void *xsc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
struct ifnet *ifp;
|
2009-12-24 17:22:15 +00:00
|
|
|
uint16_t intrs, status;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = xsc;
|
2000-10-13 18:35:49 +00:00
|
|
|
STE_LOCK(sc);
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2004-03-31 20:39:20 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_POLLING) {
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
return;
|
2004-03-31 20:39:20 +00:00
|
|
|
}
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
#endif
|
2009-12-24 17:22:15 +00:00
|
|
|
/* Reading STE_ISR_ACK clears STE_IMR register. */
|
|
|
|
status = CSR_READ_2(sc, STE_ISR_ACK);
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
|
2000-10-13 18:35:49 +00:00
|
|
|
STE_UNLOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
return;
|
2000-10-13 18:35:49 +00:00
|
|
|
}
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-24 17:22:15 +00:00
|
|
|
intrs = STE_INTRS;
|
|
|
|
if (status == 0xFFFF || (status & intrs) == 0)
|
|
|
|
goto done;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-24 17:22:15 +00:00
|
|
|
if (sc->ste_int_rx_act > 0) {
|
|
|
|
status &= ~STE_ISR_RX_DMADONE;
|
|
|
|
intrs &= ~STE_IMR_RX_DMADONE;
|
|
|
|
}
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-24 17:22:15 +00:00
|
|
|
if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) {
|
|
|
|
ste_rxeof(sc, -1);
|
|
|
|
/*
|
|
|
|
* The controller has no ability to Rx interrupt
|
|
|
|
* moderation feature. Receiving 64 bytes frames
|
|
|
|
* from wire generates too many interrupts which in
|
|
|
|
* turn make system useless to process other useful
|
|
|
|
* things. Fortunately ST201 supports single shot
|
|
|
|
* timer so use the timer to implement Rx interrupt
|
|
|
|
* moderation in driver. This adds more register
|
|
|
|
* access but it greatly reduces number of Rx
|
|
|
|
* interrupts under high network load.
|
|
|
|
*/
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
|
|
|
|
(sc->ste_int_rx_mod != 0)) {
|
|
|
|
if ((status & STE_ISR_RX_DMADONE) != 0) {
|
|
|
|
CSR_WRITE_2(sc, STE_COUNTDOWN,
|
|
|
|
STE_TIMER_USECS(sc->ste_int_rx_mod));
|
|
|
|
intrs &= ~STE_IMR_RX_DMADONE;
|
|
|
|
sc->ste_int_rx_act = 1;
|
|
|
|
} else {
|
|
|
|
intrs |= STE_IMR_RX_DMADONE;
|
|
|
|
sc->ste_int_rx_act = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
|
|
|
|
if ((status & STE_ISR_TX_DMADONE) != 0)
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_txeof(sc);
|
2009-12-24 17:22:15 +00:00
|
|
|
if ((status & STE_ISR_TX_DONE) != 0)
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_txeoc(sc);
|
2009-12-24 17:22:15 +00:00
|
|
|
if ((status & STE_ISR_STATS_OFLOW) != 0)
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_stats_update(sc);
|
2009-12-24 17:22:15 +00:00
|
|
|
if ((status & STE_ISR_HOSTERR) != 0) {
|
2009-12-23 17:46:11 +00:00
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
2009-12-24 17:22:15 +00:00
|
|
|
ste_init_locked(sc);
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
return;
|
2009-12-23 17:46:11 +00:00
|
|
|
}
|
2009-12-24 17:22:15 +00:00
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
|
|
|
ste_start_locked(ifp);
|
|
|
|
done:
|
|
|
|
/* Re-enable interrupts */
|
|
|
|
CSR_WRITE_2(sc, STE_IMR, intrs);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
2000-10-13 18:35:49 +00:00
|
|
|
STE_UNLOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A frame has been uploaded: pass the resulting mbuf chain up to
|
|
|
|
* the higher level protocols.
|
|
|
|
*/
|
2009-05-30 15:14:44 +00:00
|
|
|
static int
|
2009-12-22 18:57:07 +00:00
|
|
|
ste_rxeof(struct ste_softc *sc, int count)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ste_chain_onefrag *cur_rx;
|
|
|
|
uint32_t rxstat;
|
2009-12-22 18:57:07 +00:00
|
|
|
int total_len, rx_npkts;
|
2003-11-14 19:00:32 +00:00
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
|
|
|
|
sc->ste_cdata.ste_rx_list_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
|
|
|
|
|
|
|
cur_rx = sc->ste_cdata.ste_rx_head;
|
|
|
|
for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++,
|
|
|
|
cur_rx = cur_rx->ste_next) {
|
|
|
|
rxstat = le32toh(cur_rx->ste_ptr->ste_status);
|
|
|
|
if ((rxstat & STE_RXSTAT_DMADONE) == 0)
|
|
|
|
break;
|
2004-03-31 20:39:20 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_POLLING) {
|
2009-12-22 18:57:07 +00:00
|
|
|
if (count == 0)
|
2004-03-31 20:39:20 +00:00
|
|
|
break;
|
2009-12-22 18:57:07 +00:00
|
|
|
count--;
|
2004-03-31 20:39:20 +00:00
|
|
|
}
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
#endif
|
2009-12-22 18:57:07 +00:00
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
2002-08-07 22:31:27 +00:00
|
|
|
break;
|
1999-08-21 18:34:58 +00:00
|
|
|
/*
|
|
|
|
* If an error occurs, update stats, clear the
|
|
|
|
* status word and leave the mbuf cluster in place:
|
|
|
|
* it should simply get re-used next time this descriptor
|
|
|
|
* comes up in the ring.
|
|
|
|
*/
|
|
|
|
if (rxstat & STE_RXSTAT_FRAME_ERR) {
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
|
1999-08-21 18:34:58 +00:00
|
|
|
cur_rx->ste_ptr->ste_status = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-12-21 20:02:12 +00:00
|
|
|
/* No errors; receive the packet. */
|
1999-08-21 18:34:58 +00:00
|
|
|
m = cur_rx->ste_mbuf;
|
2009-12-22 18:57:07 +00:00
|
|
|
total_len = STE_RX_BYTES(rxstat);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to conjure up a new mbuf cluster. If that
|
|
|
|
* fails, it means we have an out of memory condition and
|
|
|
|
* should leave the buffer in place and continue. This will
|
|
|
|
* result in a lost packet, but there's little else we
|
|
|
|
* can do in this situation.
|
|
|
|
*/
|
2009-12-22 18:57:07 +00:00
|
|
|
if (ste_newbuf(sc, cur_rx) != 0) {
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
|
1999-08-21 18:34:58 +00:00
|
|
|
cur_rx->ste_ptr->ste_status = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
m->m_pkthdr.len = m->m_len = total_len;
|
|
|
|
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
|
2003-11-14 19:00:32 +00:00
|
|
|
STE_UNLOCK(sc);
|
2002-11-14 23:49:09 +00:00
|
|
|
(*ifp->if_input)(ifp, m);
|
2003-11-14 19:00:32 +00:00
|
|
|
STE_LOCK(sc);
|
2009-12-22 18:57:07 +00:00
|
|
|
}
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
if (rx_npkts > 0) {
|
|
|
|
sc->ste_cdata.ste_rx_head = cur_rx;
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
|
|
|
|
sc->ste_cdata.ste_rx_list_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-05-30 15:14:44 +00:00
|
|
|
return (rx_npkts);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_txeoc(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-22 23:57:10 +00:00
|
|
|
uint16_t txstat;
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ifnet *ifp;
|
2009-12-22 23:57:10 +00:00
|
|
|
|
|
|
|
STE_LOCK_ASSERT(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 23:57:10 +00:00
|
|
|
/*
|
|
|
|
* STE_TX_STATUS register implements a queue of up to 31
|
|
|
|
* transmit status byte. Writing an arbitrary value to the
|
|
|
|
* register will advance the queue to the next transmit
|
|
|
|
* status byte. This means if driver does not read
|
|
|
|
* STE_TX_STATUS register after completing sending more
|
|
|
|
* than 31 frames the controller would be stalled so driver
|
|
|
|
* should re-wake the Tx MAC. This is the most severe
|
|
|
|
* limitation of ST201 based controller.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
txstat = CSR_READ_2(sc, STE_TX_STATUS);
|
|
|
|
if ((txstat & STE_TXSTATUS_TXDONE) == 0)
|
|
|
|
break;
|
|
|
|
if ((txstat & (STE_TXSTATUS_UNDERRUN |
|
|
|
|
STE_TXSTATUS_EXCESSCOLLS | STE_TXSTATUS_RECLAIMERR |
|
|
|
|
STE_TXSTATUS_STATSOFLOW)) != 0) {
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
2009-12-22 23:57:10 +00:00
|
|
|
#ifdef STE_SHOW_TXERRORS
|
|
|
|
device_printf(sc->ste_dev, "TX error : 0x%b\n",
|
|
|
|
txstat & 0xFF, STE_ERR_BITS);
|
|
|
|
#endif
|
|
|
|
if ((txstat & STE_TXSTATUS_UNDERRUN) != 0 &&
|
1999-08-21 18:34:58 +00:00
|
|
|
sc->ste_tx_thresh < STE_PACKET_SIZE) {
|
|
|
|
sc->ste_tx_thresh += STE_MIN_FRAMELEN;
|
2009-12-22 23:57:10 +00:00
|
|
|
if (sc->ste_tx_thresh > STE_PACKET_SIZE)
|
|
|
|
sc->ste_tx_thresh = STE_PACKET_SIZE;
|
2006-09-15 10:40:54 +00:00
|
|
|
device_printf(sc->ste_dev,
|
2009-12-22 23:57:10 +00:00
|
|
|
"TX underrun, increasing TX"
|
1999-08-21 18:34:58 +00:00
|
|
|
" start threshold to %d bytes\n",
|
2005-08-17 14:37:39 +00:00
|
|
|
sc->ste_tx_thresh);
|
2009-12-22 23:57:10 +00:00
|
|
|
/* Make sure to disable active DMA cycles. */
|
|
|
|
STE_SETBIT4(sc, STE_DMACTL,
|
|
|
|
STE_DMACTL_TXDMA_STALL);
|
|
|
|
ste_wait(sc);
|
2009-12-23 17:46:11 +00:00
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
2009-12-22 23:57:10 +00:00
|
|
|
ste_init_locked(sc);
|
|
|
|
break;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
2009-12-22 23:57:10 +00:00
|
|
|
/* Restart Tx. */
|
|
|
|
ste_restart_tx(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
2009-12-22 23:57:10 +00:00
|
|
|
/*
|
|
|
|
* Advance to next status and ACK TxComplete
|
|
|
|
* interrupt. ST201 data sheet was wrong here, to
|
|
|
|
* get next Tx status, we have to write both
|
|
|
|
* STE_TX_STATUS and STE_TX_FRAMEID register.
|
|
|
|
* Otherwise controller returns the same status
|
|
|
|
* as well as not acknowledge Tx completion
|
|
|
|
* interrupt.
|
|
|
|
*/
|
1999-08-21 18:34:58 +00:00
|
|
|
CSR_WRITE_2(sc, STE_TX_STATUS, txstat);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-22 20:11:56 +00:00
|
|
|
static void
|
|
|
|
ste_tick(void *arg)
|
|
|
|
{
|
|
|
|
struct ste_softc *sc;
|
|
|
|
struct mii_data *mii;
|
|
|
|
|
|
|
|
sc = (struct ste_softc *)arg;
|
|
|
|
|
|
|
|
STE_LOCK_ASSERT(sc);
|
|
|
|
|
|
|
|
mii = device_get_softc(sc->ste_miibus);
|
|
|
|
mii_tick(mii);
|
|
|
|
/*
|
|
|
|
* ukphy(4) does not seem to generate CB that reports
|
|
|
|
* resolved link state so if we know we lost a link,
|
|
|
|
* explicitly check the link state.
|
|
|
|
*/
|
|
|
|
if ((sc->ste_flags & STE_FLAG_LINK) == 0)
|
|
|
|
ste_miibus_statchg(sc->ste_dev);
|
2009-12-23 19:38:22 +00:00
|
|
|
/*
|
|
|
|
* Because we are not generating Tx completion
|
|
|
|
* interrupt for every frame, reclaim transmitted
|
|
|
|
* buffers here.
|
|
|
|
*/
|
|
|
|
ste_txeof(sc);
|
|
|
|
ste_txeoc(sc);
|
2009-12-22 20:11:56 +00:00
|
|
|
ste_stats_update(sc);
|
|
|
|
ste_watchdog(sc);
|
|
|
|
callout_reset(&sc->ste_callout, hz, ste_tick, sc);
|
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_txeof(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ste_chain *cur_tx;
|
2009-12-22 18:57:07 +00:00
|
|
|
uint32_t txstat;
|
2009-12-21 20:18:01 +00:00
|
|
|
int idx;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
STE_LOCK_ASSERT(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
ifp = sc->ste_ifp;
|
1999-12-07 20:14:42 +00:00
|
|
|
idx = sc->ste_cdata.ste_tx_cons;
|
2009-12-22 18:57:07 +00:00
|
|
|
if (idx == sc->ste_cdata.ste_tx_prod)
|
|
|
|
return;
|
|
|
|
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
sc->ste_cdata.ste_tx_list_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
while (idx != sc->ste_cdata.ste_tx_prod) {
|
1999-12-07 20:14:42 +00:00
|
|
|
cur_tx = &sc->ste_cdata.ste_tx_chain[idx];
|
2009-12-22 18:57:07 +00:00
|
|
|
txstat = le32toh(cur_tx->ste_ptr->ste_ctl);
|
|
|
|
if ((txstat & STE_TXCTL_DMADONE) == 0)
|
1999-08-21 18:34:58 +00:00
|
|
|
break;
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map,
|
|
|
|
BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map);
|
|
|
|
KASSERT(cur_tx->ste_mbuf != NULL,
|
|
|
|
("%s: freeing NULL mbuf!\n", __func__));
|
Performance tuning.
Moved the RX ring resyncing code to ste_rxeoc(), and only run it
if we were asked to POLL_AND_CHECK_STATUS, under DEVICE_POLLING.
(This significantly reduces the CPU load.)
Improved the RX ring resyncing code by re-checking if the head
is still empty before doing resyncing. This mostly affects the
DEVICE_POLLING mode, where we run this code periodically. We
could start checking with an empty head (well, an empty ring
even), and after doing a few iterations, the chip might write
a few entries, including the head, and we would bogusly consider
this case as requiring resyncing. On a test box, this reduced
the number of resyncs done by a factor of 10.
In ste_txeof(sc), only reset the watchdog timer to zero when
the TX list is completely empty.
Converted ste_tx_prev_idx to a pointer -- faster.
Removed some bitrot.
2004-04-02 23:36:49 +00:00
|
|
|
m_freem(cur_tx->ste_mbuf);
|
|
|
|
cur_tx->ste_mbuf = NULL;
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
|
2009-12-22 18:57:07 +00:00
|
|
|
sc->ste_cdata.ste_tx_cnt--;
|
1999-12-07 20:14:42 +00:00
|
|
|
STE_INC(idx, STE_TX_LIST_CNT);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2004-04-06 07:58:32 +00:00
|
|
|
sc->ste_cdata.ste_tx_cons = idx;
|
2009-12-22 18:57:07 +00:00
|
|
|
if (sc->ste_cdata.ste_tx_cnt == 0)
|
2009-11-19 22:06:40 +00:00
|
|
|
sc->ste_timer = 0;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-12-23 19:18:07 +00:00
|
|
|
static void
|
|
|
|
ste_stats_clear(struct ste_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
STE_LOCK_ASSERT(sc);
|
|
|
|
|
|
|
|
/* Rx stats. */
|
|
|
|
CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO);
|
|
|
|
CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI);
|
|
|
|
CSR_READ_2(sc, STE_STAT_RX_FRAMES);
|
|
|
|
CSR_READ_1(sc, STE_STAT_RX_BCAST);
|
|
|
|
CSR_READ_1(sc, STE_STAT_RX_MCAST);
|
|
|
|
CSR_READ_1(sc, STE_STAT_RX_LOST);
|
|
|
|
/* Tx stats. */
|
|
|
|
CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO);
|
|
|
|
CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI);
|
|
|
|
CSR_READ_2(sc, STE_STAT_TX_FRAMES);
|
|
|
|
CSR_READ_1(sc, STE_STAT_TX_BCAST);
|
|
|
|
CSR_READ_1(sc, STE_STAT_TX_MCAST);
|
|
|
|
CSR_READ_1(sc, STE_STAT_CARRIER_ERR);
|
|
|
|
CSR_READ_1(sc, STE_STAT_SINGLE_COLLS);
|
|
|
|
CSR_READ_1(sc, STE_STAT_MULTI_COLLS);
|
|
|
|
CSR_READ_1(sc, STE_STAT_LATE_COLLS);
|
|
|
|
CSR_READ_1(sc, STE_STAT_TX_DEFER);
|
|
|
|
CSR_READ_1(sc, STE_STAT_TX_EXDEFER);
|
|
|
|
CSR_READ_1(sc, STE_STAT_TX_ABORT);
|
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-22 20:11:56 +00:00
|
|
|
ste_stats_update(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ifnet *ifp;
|
2009-12-23 19:18:07 +00:00
|
|
|
struct ste_hw_stats *stats;
|
|
|
|
uint32_t val;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK_ASSERT(sc);
|
2000-10-13 18:35:49 +00:00
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
2009-12-23 19:18:07 +00:00
|
|
|
stats = &sc->ste_stats;
|
|
|
|
/* Rx stats. */
|
|
|
|
val = (uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO) |
|
|
|
|
((uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI)) << 16;
|
|
|
|
val &= 0x000FFFFF;
|
|
|
|
stats->rx_bytes += val;
|
|
|
|
stats->rx_frames += CSR_READ_2(sc, STE_STAT_RX_FRAMES);
|
|
|
|
stats->rx_bcast_frames += CSR_READ_1(sc, STE_STAT_RX_BCAST);
|
|
|
|
stats->rx_mcast_frames += CSR_READ_1(sc, STE_STAT_RX_MCAST);
|
|
|
|
stats->rx_lost_frames += CSR_READ_1(sc, STE_STAT_RX_LOST);
|
|
|
|
/* Tx stats. */
|
|
|
|
val = (uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO) |
|
|
|
|
((uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI)) << 16;
|
|
|
|
val &= 0x000FFFFF;
|
|
|
|
stats->tx_bytes += val;
|
|
|
|
stats->tx_frames += CSR_READ_2(sc, STE_STAT_TX_FRAMES);
|
|
|
|
stats->tx_bcast_frames += CSR_READ_1(sc, STE_STAT_TX_BCAST);
|
|
|
|
stats->tx_mcast_frames += CSR_READ_1(sc, STE_STAT_TX_MCAST);
|
|
|
|
stats->tx_carrsense_errs += CSR_READ_1(sc, STE_STAT_CARRIER_ERR);
|
|
|
|
val = CSR_READ_1(sc, STE_STAT_SINGLE_COLLS);
|
|
|
|
stats->tx_single_colls += val;
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, val);
|
2009-12-23 19:18:07 +00:00
|
|
|
val = CSR_READ_1(sc, STE_STAT_MULTI_COLLS);
|
|
|
|
stats->tx_multi_colls += val;
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, val);
|
2009-12-23 19:18:07 +00:00
|
|
|
val += CSR_READ_1(sc, STE_STAT_LATE_COLLS);
|
|
|
|
stats->tx_late_colls += val;
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_COLLISIONS, val);
|
2009-12-23 19:18:07 +00:00
|
|
|
stats->tx_frames_defered += CSR_READ_1(sc, STE_STAT_TX_DEFER);
|
|
|
|
stats->tx_excess_defers += CSR_READ_1(sc, STE_STAT_TX_EXDEFER);
|
|
|
|
stats->tx_abort += CSR_READ_1(sc, STE_STAT_TX_ABORT);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Probe for a Sundance ST201 chip. Check the PCI vendor and device
|
|
|
|
* IDs against our list and return a device name if we find a match.
|
|
|
|
*/
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_probe(device_t dev)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2011-11-01 16:13:59 +00:00
|
|
|
const struct ste_type *t;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
t = ste_devs;
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
while (t->ste_name != NULL) {
|
1999-08-21 18:34:58 +00:00
|
|
|
if ((pci_get_vendor(dev) == t->ste_vid) &&
|
|
|
|
(pci_get_device(dev) == t->ste_did)) {
|
|
|
|
device_set_desc(dev, t->ste_name);
|
2005-02-24 21:32:56 +00:00
|
|
|
return (BUS_PROBE_DEFAULT);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
t++;
|
|
|
|
}
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (ENXIO);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach the interface. Allocate softc structures, do ifmedia
|
|
|
|
* setup and ethernet/BPF attach.
|
|
|
|
*/
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_attach(device_t dev)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
struct ifnet *ifp;
|
2010-01-08 02:39:53 +00:00
|
|
|
uint16_t eaddr[ETHER_ADDR_LEN / 2];
|
2010-10-15 14:52:11 +00:00
|
|
|
int error = 0, phy, pmc, prefer_iomap, rid;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
2002-08-07 22:31:27 +00:00
|
|
|
sc->ste_dev = dev;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2002-08-19 16:54:26 +00:00
|
|
|
/*
|
2002-09-11 21:26:22 +00:00
|
|
|
* Only use one PHY since this chip reports multiple
|
|
|
|
* Note on the DFE-550 the PHY is at 1 on the DFE-580
|
|
|
|
* it is at 0 & 1. It is rev 0x12.
|
2002-08-19 16:54:26 +00:00
|
|
|
*/
|
|
|
|
if (pci_get_vendor(dev) == DL_VENDORID &&
|
2002-12-23 21:50:47 +00:00
|
|
|
pci_get_device(dev) == DL_DEVICEID_DL10050 &&
|
2002-09-11 21:26:22 +00:00
|
|
|
pci_get_revid(dev) == 0x12 )
|
2009-12-22 19:32:16 +00:00
|
|
|
sc->ste_flags |= STE_FLAG_ONE_PHY;
|
2002-08-19 16:54:26 +00:00
|
|
|
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
|
2005-08-30 20:35:08 +00:00
|
|
|
MTX_DEF);
|
1999-08-21 18:34:58 +00:00
|
|
|
/*
|
|
|
|
* Map control/status registers.
|
|
|
|
*/
|
2001-02-21 20:54:22 +00:00
|
|
|
pci_enable_busmaster(dev);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2010-08-09 01:47:09 +00:00
|
|
|
/*
|
|
|
|
* Prefer memory space register mapping over IO space but use
|
|
|
|
* IO space for a device that is known to have issues on memory
|
|
|
|
* mapping.
|
|
|
|
*/
|
|
|
|
prefer_iomap = 0;
|
|
|
|
if (pci_get_device(dev) == ST_DEVICEID_ST201_1)
|
|
|
|
prefer_iomap = 1;
|
|
|
|
else
|
|
|
|
resource_int_value(device_get_name(sc->ste_dev),
|
|
|
|
device_get_unit(sc->ste_dev), "prefer_iomap",
|
|
|
|
&prefer_iomap);
|
|
|
|
if (prefer_iomap == 0) {
|
|
|
|
sc->ste_res_id = PCIR_BAR(1);
|
|
|
|
sc->ste_res_type = SYS_RES_MEMORY;
|
|
|
|
sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type,
|
|
|
|
&sc->ste_res_id, RF_ACTIVE);
|
|
|
|
}
|
|
|
|
if (prefer_iomap || sc->ste_res == NULL) {
|
2009-12-22 21:39:34 +00:00
|
|
|
sc->ste_res_id = PCIR_BAR(0);
|
|
|
|
sc->ste_res_type = SYS_RES_IOPORT;
|
|
|
|
sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type,
|
|
|
|
&sc->ste_res_id, RF_ACTIVE);
|
|
|
|
}
|
1999-08-21 18:34:58 +00:00
|
|
|
if (sc->ste_res == NULL) {
|
2005-08-17 14:37:39 +00:00
|
|
|
device_printf(dev, "couldn't map ports/memory\n");
|
1999-08-21 18:34:58 +00:00
|
|
|
error = ENXIO;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2003-03-31 17:29:43 +00:00
|
|
|
/* Allocate interrupt */
|
1999-08-21 18:34:58 +00:00
|
|
|
rid = 0;
|
2004-03-17 17:50:55 +00:00
|
|
|
sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
|
1999-08-21 18:34:58 +00:00
|
|
|
RF_SHAREABLE | RF_ACTIVE);
|
|
|
|
|
|
|
|
if (sc->ste_irq == NULL) {
|
2005-08-17 14:37:39 +00:00
|
|
|
device_printf(dev, "couldn't map interrupt\n");
|
1999-08-21 18:34:58 +00:00
|
|
|
error = ENXIO;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2009-12-22 20:11:56 +00:00
|
|
|
callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
/* Reset the adapter. */
|
|
|
|
ste_reset(sc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get station address from the EEPROM.
|
|
|
|
*/
|
2010-01-08 02:39:53 +00:00
|
|
|
if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) {
|
2005-08-17 14:37:39 +00:00
|
|
|
device_printf(dev, "failed to read station address\n");
|
2010-01-07 21:01:37 +00:00
|
|
|
error = ENXIO;
|
1999-08-21 18:34:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2009-12-23 19:18:07 +00:00
|
|
|
ste_sysctl_node(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
if ((error = ste_dma_alloc(sc)) != 0)
|
1999-08-21 18:34:58 +00:00
|
|
|
goto fail;
|
|
|
|
|
2005-06-11 01:37:46 +00:00
|
|
|
ifp = sc->ste_ifp = if_alloc(IFT_ETHER);
|
|
|
|
if (ifp == NULL) {
|
2005-08-17 14:37:39 +00:00
|
|
|
device_printf(dev, "can not if_alloc()\n");
|
2005-06-11 01:37:46 +00:00
|
|
|
error = ENOSPC;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
/* Do MII setup. */
|
2010-10-15 14:52:11 +00:00
|
|
|
phy = MII_PHY_ANY;
|
|
|
|
if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0)
|
|
|
|
phy = 0;
|
|
|
|
error = mii_attach(dev, &sc->ste_miibus, ifp, ste_ifmedia_upd,
|
|
|
|
ste_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(dev, "attaching PHYs failed\n");
|
1999-08-21 18:34:58 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifp->if_softc = sc;
|
2003-10-31 18:32:15 +00:00
|
|
|
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
|
2005-08-30 20:35:08 +00:00
|
|
|
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
|
1999-08-21 18:34:58 +00:00
|
|
|
ifp->if_ioctl = ste_ioctl;
|
|
|
|
ifp->if_start = ste_start;
|
|
|
|
ifp->if_init = ste_init;
|
2005-07-08 13:05:59 +00:00
|
|
|
IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1);
|
|
|
|
ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1;
|
|
|
|
IFQ_SET_READY(&ifp->if_snd);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2002-08-07 22:31:27 +00:00
|
|
|
sc->ste_tx_thresh = STE_TXSTART_THRESH;
|
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
/*
|
2000-07-13 22:54:34 +00:00
|
|
|
* Call MI attach routine.
|
1999-08-21 18:34:58 +00:00
|
|
|
*/
|
2010-01-08 02:39:53 +00:00
|
|
|
ether_ifattach(ifp, (uint8_t *)eaddr);
|
2002-08-07 22:31:27 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell the upper layer(s) we support long frames.
|
|
|
|
*/
|
2014-08-30 19:55:54 +00:00
|
|
|
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
|
2002-11-14 23:49:09 +00:00
|
|
|
ifp->if_capabilities |= IFCAP_VLAN_MTU;
|
2011-03-23 13:10:15 +00:00
|
|
|
if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
|
2009-12-24 18:17:53 +00:00
|
|
|
ifp->if_capabilities |= IFCAP_WOL_MAGIC;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
ifp->if_capenable = ifp->if_capabilities;
|
2004-04-11 14:42:25 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
ifp->if_capabilities |= IFCAP_POLLING;
|
|
|
|
#endif
|
2002-08-07 22:31:27 +00:00
|
|
|
|
2003-04-17 20:32:06 +00:00
|
|
|
/* Hook interrupt last to avoid having to lock softc */
|
2005-08-30 20:35:08 +00:00
|
|
|
error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE,
|
2007-02-23 12:19:07 +00:00
|
|
|
NULL, ste_intr, sc, &sc->ste_intrhand);
|
2003-03-31 17:29:43 +00:00
|
|
|
|
|
|
|
if (error) {
|
2005-08-17 14:37:39 +00:00
|
|
|
device_printf(dev, "couldn't set up irq\n");
|
2003-04-17 20:32:06 +00:00
|
|
|
ether_ifdetach(ifp);
|
2003-03-31 17:29:43 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
fail:
|
2003-03-31 17:29:43 +00:00
|
|
|
if (error)
|
|
|
|
ste_detach(dev);
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (error);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2003-04-17 20:32:06 +00:00
|
|
|
/*
|
|
|
|
* Shutdown hardware and free up resources. This can be called any
|
|
|
|
* time after the mutex has been initialized. It is called in both
|
|
|
|
* the error case in attach and the normal detach case so it needs
|
|
|
|
* to be careful about only freeing resources that have actually been
|
|
|
|
* allocated.
|
|
|
|
*/
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_detach(device_t dev)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
struct ifnet *ifp;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
2003-03-31 20:22:00 +00:00
|
|
|
KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized"));
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
if (ifp->if_capenable & IFCAP_POLLING)
|
|
|
|
ether_poll_deregister(ifp);
|
|
|
|
#endif
|
|
|
|
|
2003-04-17 20:32:06 +00:00
|
|
|
/* These should only be active if attach succeeded */
|
2003-04-21 18:34:04 +00:00
|
|
|
if (device_is_attached(dev)) {
|
2009-11-19 22:06:40 +00:00
|
|
|
ether_ifdetach(ifp);
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK(sc);
|
2003-04-17 20:32:06 +00:00
|
|
|
ste_stop(sc);
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_UNLOCK(sc);
|
2009-12-22 20:11:56 +00:00
|
|
|
callout_drain(&sc->ste_callout);
|
2003-03-31 17:29:43 +00:00
|
|
|
}
|
2003-04-17 20:32:06 +00:00
|
|
|
if (sc->ste_miibus)
|
|
|
|
device_delete_child(dev, sc->ste_miibus);
|
|
|
|
bus_generic_detach(dev);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2003-03-31 17:29:43 +00:00
|
|
|
if (sc->ste_intrhand)
|
|
|
|
bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand);
|
|
|
|
if (sc->ste_irq)
|
|
|
|
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq);
|
|
|
|
if (sc->ste_res)
|
2009-12-22 21:39:34 +00:00
|
|
|
bus_release_resource(dev, sc->ste_res_type, sc->ste_res_id,
|
|
|
|
sc->ste_res);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2005-10-13 21:11:20 +00:00
|
|
|
if (ifp)
|
|
|
|
if_free(ifp);
|
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
ste_dma_free(sc);
|
2000-10-13 18:35:49 +00:00
|
|
|
mtx_destroy(&sc->ste_mtx);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
struct ste_dmamap_arg {
|
|
|
|
bus_addr_t ste_busaddr;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
|
|
|
{
|
|
|
|
struct ste_dmamap_arg *ctx;
|
|
|
|
|
|
|
|
if (error != 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
|
|
|
|
|
|
|
|
ctx = (struct ste_dmamap_arg *)arg;
|
|
|
|
ctx->ste_busaddr = segs[0].ds_addr;
|
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-22 18:57:07 +00:00
|
|
|
ste_dma_alloc(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-22 18:57:07 +00:00
|
|
|
struct ste_chain *txc;
|
|
|
|
struct ste_chain_onefrag *rxc;
|
|
|
|
struct ste_dmamap_arg ctx;
|
|
|
|
int error, i;
|
|
|
|
|
|
|
|
/* Create parent DMA tag. */
|
|
|
|
error = bus_dma_tag_create(
|
|
|
|
bus_get_dma_tag(sc->ste_dev), /* parent */
|
|
|
|
1, 0, /* alignment, boundary */
|
|
|
|
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
|
|
|
|
0, /* nsegments */
|
|
|
|
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, NULL, /* lockfunc, lockarg */
|
|
|
|
&sc->ste_cdata.ste_parent_tag);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not create parent DMA tag.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create DMA tag for Tx descriptor list. */
|
|
|
|
error = bus_dma_tag_create(
|
|
|
|
sc->ste_cdata.ste_parent_tag, /* parent */
|
|
|
|
STE_DESC_ALIGN, 0, /* alignment, boundary */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
STE_TX_LIST_SZ, /* maxsize */
|
|
|
|
1, /* nsegments */
|
|
|
|
STE_TX_LIST_SZ, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, NULL, /* lockfunc, lockarg */
|
|
|
|
&sc->ste_cdata.ste_tx_list_tag);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not create Tx list DMA tag.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create DMA tag for Rx descriptor list. */
|
|
|
|
error = bus_dma_tag_create(
|
|
|
|
sc->ste_cdata.ste_parent_tag, /* parent */
|
|
|
|
STE_DESC_ALIGN, 0, /* alignment, boundary */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
STE_RX_LIST_SZ, /* maxsize */
|
|
|
|
1, /* nsegments */
|
|
|
|
STE_RX_LIST_SZ, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, NULL, /* lockfunc, lockarg */
|
|
|
|
&sc->ste_cdata.ste_rx_list_tag);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not create Rx list DMA tag.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create DMA tag for Tx buffers. */
|
|
|
|
error = bus_dma_tag_create(
|
|
|
|
sc->ste_cdata.ste_parent_tag, /* parent */
|
|
|
|
1, 0, /* alignment, boundary */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
MCLBYTES * STE_MAXFRAGS, /* maxsize */
|
|
|
|
STE_MAXFRAGS, /* nsegments */
|
|
|
|
MCLBYTES, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, NULL, /* lockfunc, lockarg */
|
|
|
|
&sc->ste_cdata.ste_tx_tag);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev, "could not create Tx DMA tag.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create DMA tag for Rx buffers. */
|
|
|
|
error = bus_dma_tag_create(
|
|
|
|
sc->ste_cdata.ste_parent_tag, /* parent */
|
|
|
|
1, 0, /* alignment, boundary */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
MCLBYTES, /* maxsize */
|
|
|
|
1, /* nsegments */
|
|
|
|
MCLBYTES, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, NULL, /* lockfunc, lockarg */
|
|
|
|
&sc->ste_cdata.ste_rx_tag);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev, "could not create Rx DMA tag.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate DMA'able memory and load the DMA map for Tx list. */
|
|
|
|
error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
(void **)&sc->ste_ldata.ste_tx_list,
|
|
|
|
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
|
|
|
|
&sc->ste_cdata.ste_tx_list_map);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not allocate DMA'able memory for Tx list.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ctx.ste_busaddr = 0;
|
|
|
|
error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list,
|
|
|
|
STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
|
|
|
|
if (error != 0 || ctx.ste_busaddr == 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not load DMA'able memory for Tx list.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr;
|
|
|
|
|
|
|
|
/* Allocate DMA'able memory and load the DMA map for Rx list. */
|
|
|
|
error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag,
|
|
|
|
(void **)&sc->ste_ldata.ste_rx_list,
|
|
|
|
BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
|
|
|
|
&sc->ste_cdata.ste_rx_list_map);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not allocate DMA'able memory for Rx list.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
ctx.ste_busaddr = 0;
|
|
|
|
error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag,
|
|
|
|
sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list,
|
|
|
|
STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0);
|
|
|
|
if (error != 0 || ctx.ste_busaddr == 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not load DMA'able memory for Rx list.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr;
|
|
|
|
|
|
|
|
/* Create DMA maps for Tx buffers. */
|
|
|
|
for (i = 0; i < STE_TX_LIST_CNT; i++) {
|
|
|
|
txc = &sc->ste_cdata.ste_tx_chain[i];
|
|
|
|
txc->ste_ptr = NULL;
|
|
|
|
txc->ste_mbuf = NULL;
|
|
|
|
txc->ste_next = NULL;
|
|
|
|
txc->ste_phys = 0;
|
|
|
|
txc->ste_map = NULL;
|
|
|
|
error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0,
|
|
|
|
&txc->ste_map);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not create Tx dmamap.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Create DMA maps for Rx buffers. */
|
|
|
|
if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
|
|
|
|
&sc->ste_cdata.ste_rx_sparemap)) != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not create spare Rx dmamap.\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
for (i = 0; i < STE_RX_LIST_CNT; i++) {
|
|
|
|
rxc = &sc->ste_cdata.ste_rx_chain[i];
|
|
|
|
rxc->ste_ptr = NULL;
|
|
|
|
rxc->ste_mbuf = NULL;
|
|
|
|
rxc->ste_next = NULL;
|
|
|
|
rxc->ste_map = NULL;
|
|
|
|
error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0,
|
|
|
|
&rxc->ste_map);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(sc->ste_dev,
|
|
|
|
"could not create Rx dmamap.\n");
|
|
|
|
goto fail;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
fail:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ste_dma_free(struct ste_softc *sc)
|
|
|
|
{
|
|
|
|
struct ste_chain *txc;
|
|
|
|
struct ste_chain_onefrag *rxc;
|
|
|
|
int i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
/* Tx buffers. */
|
|
|
|
if (sc->ste_cdata.ste_tx_tag != NULL) {
|
|
|
|
for (i = 0; i < STE_TX_LIST_CNT; i++) {
|
|
|
|
txc = &sc->ste_cdata.ste_tx_chain[i];
|
|
|
|
if (txc->ste_map != NULL) {
|
|
|
|
bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag,
|
|
|
|
txc->ste_map);
|
|
|
|
txc->ste_map = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag);
|
|
|
|
sc->ste_cdata.ste_tx_tag = NULL;
|
|
|
|
}
|
|
|
|
/* Rx buffers. */
|
|
|
|
if (sc->ste_cdata.ste_rx_tag != NULL) {
|
|
|
|
for (i = 0; i < STE_RX_LIST_CNT; i++) {
|
|
|
|
rxc = &sc->ste_cdata.ste_rx_chain[i];
|
|
|
|
if (rxc->ste_map != NULL) {
|
|
|
|
bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
|
|
|
|
rxc->ste_map);
|
|
|
|
rxc->ste_map = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sc->ste_cdata.ste_rx_sparemap != NULL) {
|
|
|
|
bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag,
|
|
|
|
sc->ste_cdata.ste_rx_sparemap);
|
|
|
|
sc->ste_cdata.ste_rx_sparemap = NULL;
|
|
|
|
}
|
|
|
|
bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag);
|
|
|
|
sc->ste_cdata.ste_rx_tag = NULL;
|
|
|
|
}
|
|
|
|
/* Tx descriptor list. */
|
|
|
|
if (sc->ste_cdata.ste_tx_list_tag != NULL) {
|
2014-06-11 14:53:58 +00:00
|
|
|
if (sc->ste_ldata.ste_tx_list_paddr != 0)
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
sc->ste_cdata.ste_tx_list_map);
|
2014-06-11 14:53:58 +00:00
|
|
|
if (sc->ste_ldata.ste_tx_list != NULL)
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
sc->ste_ldata.ste_tx_list,
|
|
|
|
sc->ste_cdata.ste_tx_list_map);
|
|
|
|
sc->ste_ldata.ste_tx_list = NULL;
|
2014-06-11 14:53:58 +00:00
|
|
|
sc->ste_ldata.ste_tx_list_paddr = 0;
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag);
|
|
|
|
sc->ste_cdata.ste_tx_list_tag = NULL;
|
|
|
|
}
|
|
|
|
/* Rx descriptor list. */
|
|
|
|
if (sc->ste_cdata.ste_rx_list_tag != NULL) {
|
2014-06-11 14:53:58 +00:00
|
|
|
if (sc->ste_ldata.ste_rx_list_paddr != 0)
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag,
|
|
|
|
sc->ste_cdata.ste_rx_list_map);
|
2014-06-11 14:53:58 +00:00
|
|
|
if (sc->ste_ldata.ste_rx_list != NULL)
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag,
|
|
|
|
sc->ste_ldata.ste_rx_list,
|
|
|
|
sc->ste_cdata.ste_rx_list_map);
|
|
|
|
sc->ste_ldata.ste_rx_list = NULL;
|
2014-06-11 14:53:58 +00:00
|
|
|
sc->ste_ldata.ste_rx_list_paddr = 0;
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag);
|
|
|
|
sc->ste_cdata.ste_rx_list_tag = NULL;
|
|
|
|
}
|
|
|
|
if (sc->ste_cdata.ste_parent_tag != NULL) {
|
|
|
|
bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag);
|
|
|
|
sc->ste_cdata.ste_parent_tag = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc)
|
|
|
|
{
|
|
|
|
struct mbuf *m;
|
|
|
|
bus_dma_segment_t segs[1];
|
|
|
|
bus_dmamap_t map;
|
|
|
|
int error, nsegs;
|
|
|
|
|
2012-12-04 09:32:43 +00:00
|
|
|
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
|
2009-12-22 18:57:07 +00:00
|
|
|
if (m == NULL)
|
|
|
|
return (ENOBUFS);
|
|
|
|
m->m_len = m->m_pkthdr.len = MCLBYTES;
|
|
|
|
m_adj(m, ETHER_ALIGN);
|
|
|
|
|
|
|
|
if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag,
|
|
|
|
sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) {
|
|
|
|
m_freem(m);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
if (rxc->ste_mbuf != NULL) {
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
|
|
|
bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map);
|
|
|
|
}
|
|
|
|
map = rxc->ste_map;
|
|
|
|
rxc->ste_map = sc->ste_cdata.ste_rx_sparemap;
|
|
|
|
sc->ste_cdata.ste_rx_sparemap = map;
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map,
|
|
|
|
BUS_DMASYNC_PREREAD);
|
|
|
|
rxc->ste_mbuf = m;
|
|
|
|
rxc->ste_ptr->ste_status = 0;
|
|
|
|
rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr);
|
|
|
|
rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len |
|
|
|
|
STE_FRAG_LAST);
|
2009-12-21 20:18:01 +00:00
|
|
|
return (0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_init_rx_list(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_chain_data *cd;
|
|
|
|
struct ste_list_data *ld;
|
2009-12-22 18:57:07 +00:00
|
|
|
int error, i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-24 17:22:15 +00:00
|
|
|
sc->ste_int_rx_act = 0;
|
1999-08-21 18:34:58 +00:00
|
|
|
cd = &sc->ste_cdata;
|
2009-12-22 18:57:07 +00:00
|
|
|
ld = &sc->ste_ldata;
|
|
|
|
bzero(ld->ste_rx_list, STE_RX_LIST_SZ);
|
1999-08-21 18:34:58 +00:00
|
|
|
for (i = 0; i < STE_RX_LIST_CNT; i++) {
|
|
|
|
cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i];
|
2009-12-22 18:57:07 +00:00
|
|
|
error = ste_newbuf(sc, &cd->ste_rx_chain[i]);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
1999-08-21 18:34:58 +00:00
|
|
|
if (i == (STE_RX_LIST_CNT - 1)) {
|
2009-12-22 18:57:07 +00:00
|
|
|
cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0];
|
2010-01-08 02:43:20 +00:00
|
|
|
ld->ste_rx_list[i].ste_next =
|
|
|
|
htole32(ld->ste_rx_list_paddr +
|
|
|
|
(sizeof(struct ste_desc_onefrag) * 0));
|
1999-08-21 18:34:58 +00:00
|
|
|
} else {
|
2009-12-22 18:57:07 +00:00
|
|
|
cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1];
|
2010-01-08 02:43:20 +00:00
|
|
|
ld->ste_rx_list[i].ste_next =
|
|
|
|
htole32(ld->ste_rx_list_paddr +
|
|
|
|
(sizeof(struct ste_desc_onefrag) * (i + 1)));
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cd->ste_rx_head = &cd->ste_rx_chain[0];
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag,
|
|
|
|
sc->ste_cdata.ste_rx_list_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_init_tx_list(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_chain_data *cd;
|
|
|
|
struct ste_list_data *ld;
|
|
|
|
int i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
cd = &sc->ste_cdata;
|
2009-12-22 18:57:07 +00:00
|
|
|
ld = &sc->ste_ldata;
|
|
|
|
bzero(ld->ste_tx_list, STE_TX_LIST_SZ);
|
1999-08-21 18:34:58 +00:00
|
|
|
for (i = 0; i < STE_TX_LIST_CNT; i++) {
|
|
|
|
cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i];
|
2009-12-22 18:57:07 +00:00
|
|
|
cd->ste_tx_chain[i].ste_mbuf = NULL;
|
|
|
|
if (i == (STE_TX_LIST_CNT - 1)) {
|
|
|
|
cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0];
|
|
|
|
cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
|
|
|
|
ld->ste_tx_list_paddr +
|
|
|
|
(sizeof(struct ste_desc) * 0)));
|
|
|
|
} else {
|
|
|
|
cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1];
|
|
|
|
cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO(
|
|
|
|
ld->ste_tx_list_paddr +
|
|
|
|
(sizeof(struct ste_desc) * (i + 1))));
|
|
|
|
}
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
cd->ste_last_tx = NULL;
|
1999-12-07 20:14:42 +00:00
|
|
|
cd->ste_tx_prod = 0;
|
|
|
|
cd->ste_tx_cons = 0;
|
2009-12-22 18:57:07 +00:00
|
|
|
cd->ste_tx_cnt = 0;
|
|
|
|
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
sc->ste_cdata.ste_tx_list_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_init(void *xsc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = xsc;
|
2000-10-13 18:35:49 +00:00
|
|
|
STE_LOCK(sc);
|
2005-08-30 20:35:08 +00:00
|
|
|
ste_init_locked(sc);
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_init_locked(struct ste_softc *sc)
|
2005-08-30 20:35:08 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ifnet *ifp;
|
2009-12-23 18:42:25 +00:00
|
|
|
struct mii_data *mii;
|
2009-12-24 18:17:53 +00:00
|
|
|
uint8_t val;
|
2009-12-21 20:18:01 +00:00
|
|
|
int i;
|
2005-08-30 20:35:08 +00:00
|
|
|
|
|
|
|
STE_LOCK_ASSERT(sc);
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
2009-12-23 18:42:25 +00:00
|
|
|
mii = device_get_softc(sc->ste_miibus);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-23 17:46:11 +00:00
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
|
|
|
|
return;
|
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_stop(sc);
|
2009-12-22 20:57:30 +00:00
|
|
|
/* Reset the chip to a known state. */
|
|
|
|
ste_reset(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
/* Init our MAC address */
|
2007-06-15 21:45:41 +00:00
|
|
|
for (i = 0; i < ETHER_ADDR_LEN; i += 2) {
|
|
|
|
CSR_WRITE_2(sc, STE_PAR0 + i,
|
|
|
|
((IF_LLADDR(sc->ste_ifp)[i] & 0xff) |
|
|
|
|
IF_LLADDR(sc->ste_ifp)[i + 1] << 8));
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Init RX list */
|
2009-12-22 18:57:07 +00:00
|
|
|
if (ste_init_rx_list(sc) != 0) {
|
2006-09-15 10:40:54 +00:00
|
|
|
device_printf(sc->ste_dev,
|
2005-08-17 14:37:39 +00:00
|
|
|
"initialization failed: no memory for RX buffers\n");
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_stop(sc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2002-08-07 22:31:27 +00:00
|
|
|
/* Set RX polling interval */
|
2004-03-31 21:10:01 +00:00
|
|
|
CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64);
|
2002-08-07 22:31:27 +00:00
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
/* Init TX descriptors */
|
|
|
|
ste_init_tx_list(sc);
|
|
|
|
|
2009-12-24 18:17:53 +00:00
|
|
|
/* Clear and disable WOL. */
|
|
|
|
val = CSR_READ_1(sc, STE_WAKE_EVENT);
|
|
|
|
val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB |
|
|
|
|
STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB);
|
|
|
|
CSR_WRITE_1(sc, STE_WAKE_EVENT, val);
|
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
/* Set the TX freethresh value */
|
|
|
|
CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8);
|
|
|
|
|
|
|
|
/* Set the TX start threshold for best performance. */
|
|
|
|
CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh);
|
|
|
|
|
|
|
|
/* Set the TX reclaim threshold. */
|
|
|
|
CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4));
|
|
|
|
|
2009-12-23 18:24:22 +00:00
|
|
|
/* Accept VLAN length packets */
|
|
|
|
CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-23 18:24:22 +00:00
|
|
|
/* Set up the RX filter. */
|
|
|
|
ste_rxfilter(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
/* Load the address of the RX list. */
|
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL);
|
|
|
|
ste_wait(sc);
|
|
|
|
CSR_WRITE_4(sc, STE_RX_DMALIST_PTR,
|
2009-12-22 18:57:07 +00:00
|
|
|
STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr));
|
1999-08-21 18:34:58 +00:00
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
|
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL);
|
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
/* Set TX polling interval(defer until we TX first packet). */
|
2002-08-07 22:31:27 +00:00
|
|
|
CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
|
1999-12-07 20:14:42 +00:00
|
|
|
|
|
|
|
/* Load address of the TX list */
|
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
|
|
|
|
ste_wait(sc);
|
2002-08-07 22:31:27 +00:00
|
|
|
CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
|
1999-12-07 20:14:42 +00:00
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
|
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
|
|
|
|
ste_wait(sc);
|
2009-12-24 17:22:15 +00:00
|
|
|
/* Select 3.2us timer. */
|
|
|
|
STE_CLRBIT4(sc, STE_DMACTL, STE_DMACTL_COUNTDOWN_SPEED |
|
|
|
|
STE_DMACTL_COUNTDOWN_MODE);
|
1999-12-07 20:14:42 +00:00
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
/* Enable receiver and transmitter */
|
|
|
|
CSR_WRITE_2(sc, STE_MACCTL0, 0);
|
2002-08-07 22:31:27 +00:00
|
|
|
CSR_WRITE_2(sc, STE_MACCTL1, 0);
|
1999-08-21 18:34:58 +00:00
|
|
|
STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE);
|
|
|
|
STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE);
|
|
|
|
|
|
|
|
/* Enable stats counters. */
|
|
|
|
STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE);
|
2009-12-23 19:18:07 +00:00
|
|
|
/* Clear stats counters. */
|
|
|
|
ste_stats_clear(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-24 17:22:15 +00:00
|
|
|
CSR_WRITE_2(sc, STE_COUNTDOWN, 0);
|
1999-08-21 18:34:58 +00:00
|
|
|
CSR_WRITE_2(sc, STE_ISR, 0xFFFF);
|
2004-03-31 20:39:20 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
/* Disable interrupts if we are polling. */
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_POLLING)
|
2004-03-31 20:39:20 +00:00
|
|
|
CSR_WRITE_2(sc, STE_IMR, 0);
|
2009-12-21 20:02:12 +00:00
|
|
|
else
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
#endif
|
2004-03-31 20:39:20 +00:00
|
|
|
/* Enable interrupts. */
|
1999-08-21 18:34:58 +00:00
|
|
|
CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
|
|
|
|
|
2009-12-23 18:42:25 +00:00
|
|
|
sc->ste_flags &= ~STE_FLAG_LINK;
|
|
|
|
/* Switch to the current media. */
|
|
|
|
mii_mediachg(mii);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags |= IFF_DRV_RUNNING;
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 20:11:56 +00:00
|
|
|
callout_reset(&sc->ste_callout, hz, ste_tick, sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_stop(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ifnet *ifp;
|
2009-12-22 18:57:07 +00:00
|
|
|
struct ste_chain_onefrag *cur_rx;
|
|
|
|
struct ste_chain *cur_tx;
|
2009-12-22 20:57:30 +00:00
|
|
|
uint32_t val;
|
2009-12-21 20:18:01 +00:00
|
|
|
int i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK_ASSERT(sc);
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = sc->ste_ifp;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 20:11:56 +00:00
|
|
|
callout_stop(&sc->ste_callout);
|
|
|
|
sc->ste_timer = 0;
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
CSR_WRITE_2(sc, STE_IMR, 0);
|
2009-12-24 17:22:15 +00:00
|
|
|
CSR_WRITE_2(sc, STE_COUNTDOWN, 0);
|
2009-12-22 20:57:30 +00:00
|
|
|
/* Stop pending DMA. */
|
|
|
|
val = CSR_READ_4(sc, STE_DMACTL);
|
|
|
|
val |= STE_DMACTL_TXDMA_STALL | STE_DMACTL_RXDMA_STALL;
|
|
|
|
CSR_WRITE_4(sc, STE_DMACTL, val);
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_wait(sc);
|
2009-12-22 20:57:30 +00:00
|
|
|
/* Disable auto-polling. */
|
|
|
|
CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 0);
|
|
|
|
CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0);
|
|
|
|
/* Nullify DMA address to stop any further DMA. */
|
|
|
|
CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 0);
|
|
|
|
CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0);
|
|
|
|
/* Stop TX/RX MAC. */
|
|
|
|
val = CSR_READ_2(sc, STE_MACCTL1);
|
|
|
|
val |= STE_MACCTL1_TX_DISABLE | STE_MACCTL1_RX_DISABLE |
|
|
|
|
STE_MACCTL1_STATS_DISABLE;
|
|
|
|
CSR_WRITE_2(sc, STE_MACCTL1, val);
|
|
|
|
for (i = 0; i < STE_TIMEOUT; i++) {
|
|
|
|
DELAY(10);
|
|
|
|
if ((CSR_READ_2(sc, STE_MACCTL1) & (STE_MACCTL1_TX_DISABLE |
|
|
|
|
STE_MACCTL1_RX_DISABLE | STE_MACCTL1_STATS_DISABLE)) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == STE_TIMEOUT)
|
|
|
|
device_printf(sc->ste_dev, "Stopping MAC timed out\n");
|
|
|
|
/* Acknowledge any pending interrupts. */
|
|
|
|
CSR_READ_2(sc, STE_ISR_ACK);
|
|
|
|
ste_stats_update(sc);
|
1999-12-07 20:14:42 +00:00
|
|
|
|
1999-08-21 18:34:58 +00:00
|
|
|
for (i = 0; i < STE_RX_LIST_CNT; i++) {
|
2009-12-22 18:57:07 +00:00
|
|
|
cur_rx = &sc->ste_cdata.ste_rx_chain[i];
|
|
|
|
if (cur_rx->ste_mbuf != NULL) {
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_rx_tag,
|
|
|
|
cur_rx->ste_map, BUS_DMASYNC_POSTREAD);
|
|
|
|
bus_dmamap_unload(sc->ste_cdata.ste_rx_tag,
|
|
|
|
cur_rx->ste_map);
|
|
|
|
m_freem(cur_rx->ste_mbuf);
|
|
|
|
cur_rx->ste_mbuf = NULL;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < STE_TX_LIST_CNT; i++) {
|
2009-12-22 18:57:07 +00:00
|
|
|
cur_tx = &sc->ste_cdata.ste_tx_chain[i];
|
|
|
|
if (cur_tx->ste_mbuf != NULL) {
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_tx_tag,
|
|
|
|
cur_tx->ste_map, BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(sc->ste_cdata.ste_tx_tag,
|
|
|
|
cur_tx->ste_map);
|
|
|
|
m_freem(cur_tx->ste_mbuf);
|
|
|
|
cur_tx->ste_mbuf = NULL;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_reset(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-23 17:54:24 +00:00
|
|
|
uint32_t ctl;
|
2009-12-21 20:18:01 +00:00
|
|
|
int i;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-23 17:54:24 +00:00
|
|
|
ctl = CSR_READ_4(sc, STE_ASICCTL);
|
|
|
|
ctl |= STE_ASICCTL_GLOBAL_RESET | STE_ASICCTL_RX_RESET |
|
|
|
|
STE_ASICCTL_TX_RESET | STE_ASICCTL_DMA_RESET |
|
|
|
|
STE_ASICCTL_FIFO_RESET | STE_ASICCTL_NETWORK_RESET |
|
|
|
|
STE_ASICCTL_AUTOINIT_RESET |STE_ASICCTL_HOST_RESET |
|
|
|
|
STE_ASICCTL_EXTRESET_RESET;
|
|
|
|
CSR_WRITE_4(sc, STE_ASICCTL, ctl);
|
|
|
|
CSR_READ_4(sc, STE_ASICCTL);
|
|
|
|
/*
|
|
|
|
* Due to the need of accessing EEPROM controller can take
|
|
|
|
* up to 1ms to complete the global reset.
|
|
|
|
*/
|
|
|
|
DELAY(1000);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
for (i = 0; i < STE_TIMEOUT; i++) {
|
|
|
|
if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY))
|
|
|
|
break;
|
2009-12-23 17:54:24 +00:00
|
|
|
DELAY(10);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (i == STE_TIMEOUT)
|
2006-09-15 10:40:54 +00:00
|
|
|
device_printf(sc->ste_dev, "global reset never completed\n");
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2009-12-22 23:57:10 +00:00
|
|
|
static void
|
|
|
|
ste_restart_tx(struct ste_softc *sc)
|
|
|
|
{
|
|
|
|
uint16_t mac;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < STE_TIMEOUT; i++) {
|
|
|
|
mac = CSR_READ_2(sc, STE_MACCTL1);
|
|
|
|
mac |= STE_MACCTL1_TX_ENABLE;
|
|
|
|
CSR_WRITE_2(sc, STE_MACCTL1, mac);
|
|
|
|
mac = CSR_READ_2(sc, STE_MACCTL1);
|
|
|
|
if ((mac & STE_MACCTL1_TX_ENABLED) != 0)
|
|
|
|
break;
|
|
|
|
DELAY(10);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == STE_TIMEOUT)
|
|
|
|
device_printf(sc->ste_dev, "starting Tx failed");
|
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
struct ifreq *ifr;
|
|
|
|
struct mii_data *mii;
|
2009-12-24 18:17:53 +00:00
|
|
|
int error = 0, mask;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = ifp->if_softc;
|
|
|
|
ifr = (struct ifreq *)data;
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
switch (command) {
|
1999-08-21 18:34:58 +00:00
|
|
|
case SIOCSIFFLAGS:
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK(sc);
|
2009-12-23 18:24:22 +00:00
|
|
|
if ((ifp->if_flags & IFF_UP) != 0) {
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
|
|
|
|
((ifp->if_flags ^ sc->ste_if_flags) &
|
|
|
|
(IFF_PROMISC | IFF_ALLMULTI)) != 0)
|
|
|
|
ste_rxfilter(sc);
|
|
|
|
else
|
2005-08-30 20:35:08 +00:00
|
|
|
ste_init_locked(sc);
|
2009-12-23 18:24:22 +00:00
|
|
|
} else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
|
|
|
|
ste_stop(sc);
|
1999-12-07 20:14:42 +00:00
|
|
|
sc->ste_if_flags = ifp->if_flags;
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_UNLOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
break;
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK(sc);
|
2009-12-23 18:24:22 +00:00
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
|
|
|
|
ste_rxfilter(sc);
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_UNLOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
break;
|
|
|
|
case SIOCGIFMEDIA:
|
|
|
|
case SIOCSIFMEDIA:
|
|
|
|
mii = device_get_softc(sc->ste_miibus);
|
|
|
|
error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
|
|
|
|
break;
|
2004-04-11 14:42:25 +00:00
|
|
|
case SIOCSIFCAP:
|
2009-12-24 18:17:53 +00:00
|
|
|
STE_LOCK(sc);
|
|
|
|
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
2009-12-24 18:17:53 +00:00
|
|
|
if ((mask & IFCAP_POLLING) != 0 &&
|
|
|
|
(IFCAP_POLLING & ifp->if_capabilities) != 0) {
|
|
|
|
ifp->if_capenable ^= IFCAP_POLLING;
|
|
|
|
if ((IFCAP_POLLING & ifp->if_capenable) != 0) {
|
|
|
|
error = ether_poll_register(ste_poll, ifp);
|
|
|
|
if (error != 0) {
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Disable interrupts. */
|
|
|
|
CSR_WRITE_2(sc, STE_IMR, 0);
|
|
|
|
} else {
|
|
|
|
error = ether_poll_deregister(ifp);
|
|
|
|
/* Enable interrupts. */
|
|
|
|
CSR_WRITE_2(sc, STE_IMR, STE_INTRS);
|
|
|
|
}
|
Big polling(4) cleanup.
o Axe poll in trap.
o Axe IFF_POLLING flag from if_flags.
o Rework revision 1.21 (Giant removal), in such a way that
poll_mtx is not dropped during call to polling handler.
This fixes problem with idle polling.
o Make registration and deregistration from polling in a
functional way, insted of next tick/interrupt.
o Obsolete kern.polling.enable. Polling is turned on/off
with ifconfig.
Detailed kern_poll.c changes:
- Remove polling handler flags, introduced in 1.21. The are not
needed now.
- Forget and do not check if_flags, if_capenable and if_drv_flags.
- Call all registered polling handlers unconditionally.
- Do not drop poll_mtx, when entering polling handlers.
- In ether_poll() NET_LOCK_GIANT prior to locking poll_mtx.
- In netisr_poll() axe the block, where polling code asks drivers
to unregister.
- In netisr_poll() and ether_poll() do polling always, if any
handlers are present.
- In ether_poll_[de]register() remove a lot of error hiding code. Assert
that arguments are correct, instead.
- In ether_poll_[de]register() use standard return values in case of
error or success.
- Introduce poll_switch() that is a sysctl handler for kern.polling.enable.
poll_switch() goes through interface list and enabled/disables polling.
A message that kern.polling.enable is deprecated is printed.
Detailed driver changes:
- On attach driver announces IFCAP_POLLING in if_capabilities, but
not in if_capenable.
- On detach driver calls ether_poll_deregister() if polling is enabled.
- In polling handler driver obtains its lock and checks IFF_DRV_RUNNING
flag. If there is no, then unlocks and returns.
- In ioctl handler driver checks for IFCAP_POLLING flag requested to
be set or cleared. Driver first calls ether_poll_[de]register(), then
obtains driver lock and [dis/en]ables interrupts.
- In interrupt handler driver checks IFCAP_POLLING flag in if_capenable.
If present, then returns.This is important to protect from spurious
interrupts.
Reviewed by: ru, sam, jhb
2005-10-01 18:56:19 +00:00
|
|
|
}
|
|
|
|
#endif /* DEVICE_POLLING */
|
2009-12-24 18:17:53 +00:00
|
|
|
if ((mask & IFCAP_WOL_MAGIC) != 0 &&
|
|
|
|
(ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
|
|
|
|
ifp->if_capenable ^= IFCAP_WOL_MAGIC;
|
|
|
|
STE_UNLOCK(sc);
|
2004-04-11 14:42:25 +00:00
|
|
|
break;
|
1999-08-21 18:34:58 +00:00
|
|
|
default:
|
2002-11-14 23:49:09 +00:00
|
|
|
error = ether_ioctl(ifp, command, data);
|
1999-08-21 18:34:58 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (error);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static int
|
2009-12-22 18:57:07 +00:00
|
|
|
ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-22 18:57:07 +00:00
|
|
|
struct ste_frag *frag;
|
2009-12-21 20:18:01 +00:00
|
|
|
struct mbuf *m;
|
2009-12-22 18:57:07 +00:00
|
|
|
struct ste_desc *desc;
|
|
|
|
bus_dma_segment_t txsegs[STE_MAXFRAGS];
|
|
|
|
int error, i, nsegs;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
STE_LOCK_ASSERT(sc);
|
|
|
|
M_ASSERTPKTHDR((*m_head));
|
|
|
|
|
|
|
|
error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
|
|
|
|
txc->ste_map, *m_head, txsegs, &nsegs, 0);
|
|
|
|
if (error == EFBIG) {
|
2012-12-04 09:32:43 +00:00
|
|
|
m = m_collapse(*m_head, M_NOWAIT, STE_MAXFRAGS);
|
2009-12-22 18:57:07 +00:00
|
|
|
if (m == NULL) {
|
|
|
|
m_freem(*m_head);
|
|
|
|
*m_head = NULL;
|
|
|
|
return (ENOMEM);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
2009-12-22 18:57:07 +00:00
|
|
|
*m_head = m;
|
|
|
|
error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
|
|
|
|
txc->ste_map, *m_head, txsegs, &nsegs, 0);
|
|
|
|
if (error != 0) {
|
|
|
|
m_freem(*m_head);
|
|
|
|
*m_head = NULL;
|
|
|
|
return (error);
|
2002-09-11 21:26:22 +00:00
|
|
|
}
|
2009-12-22 18:57:07 +00:00
|
|
|
} else if (error != 0)
|
|
|
|
return (error);
|
|
|
|
if (nsegs == 0) {
|
|
|
|
m_freem(*m_head);
|
|
|
|
*m_head = NULL;
|
|
|
|
return (EIO);
|
2002-09-11 21:26:22 +00:00
|
|
|
}
|
2009-12-22 18:57:07 +00:00
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map,
|
|
|
|
BUS_DMASYNC_PREWRITE);
|
|
|
|
|
|
|
|
desc = txc->ste_ptr;
|
|
|
|
for (i = 0; i < nsegs; i++) {
|
|
|
|
frag = &desc->ste_frags[i];
|
|
|
|
frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr));
|
|
|
|
frag->ste_len = htole32(txsegs[i].ds_len);
|
|
|
|
}
|
|
|
|
desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST);
|
|
|
|
/*
|
|
|
|
* Because we use Tx polling we can't chain multiple
|
|
|
|
* Tx descriptors here. Otherwise we race with controller.
|
|
|
|
*/
|
|
|
|
desc->ste_next = 0;
|
2009-12-23 19:38:22 +00:00
|
|
|
if ((sc->ste_cdata.ste_tx_prod % STE_TX_INTR_FRAMES) == 0)
|
|
|
|
desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS |
|
|
|
|
STE_TXCTL_DMAINTR);
|
|
|
|
else
|
|
|
|
desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS);
|
2009-12-22 18:57:07 +00:00
|
|
|
txc->ste_mbuf = *m_head;
|
|
|
|
STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT);
|
|
|
|
sc->ste_cdata.ste_tx_cnt++;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-21 20:18:01 +00:00
|
|
|
return (0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_start(struct ifnet *ifp)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
2005-08-30 20:35:08 +00:00
|
|
|
|
|
|
|
sc = ifp->if_softc;
|
|
|
|
STE_LOCK(sc);
|
|
|
|
ste_start_locked(ifp);
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_start_locked(struct ifnet *ifp)
|
2005-08-30 20:35:08 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
|
|
|
struct ste_chain *cur_tx;
|
|
|
|
struct mbuf *m_head = NULL;
|
2009-12-22 18:57:07 +00:00
|
|
|
int enq;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = ifp->if_softc;
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK_ASSERT(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 19:32:16 +00:00
|
|
|
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
|
|
|
|
IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0)
|
1999-12-07 20:14:42 +00:00
|
|
|
return;
|
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
|
|
|
|
if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) {
|
|
|
|
/*
|
|
|
|
* Controller may have cached copy of the last used
|
|
|
|
* next ptr so we have to reserve one TFD to avoid
|
|
|
|
* TFD overruns.
|
|
|
|
*/
|
2005-08-09 10:20:02 +00:00
|
|
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
1999-12-07 20:14:42 +00:00
|
|
|
break;
|
|
|
|
}
|
2005-07-08 13:05:59 +00:00
|
|
|
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
|
1999-08-21 18:34:58 +00:00
|
|
|
if (m_head == NULL)
|
|
|
|
break;
|
2009-12-22 18:57:07 +00:00
|
|
|
cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod];
|
|
|
|
if (ste_encap(sc, &m_head, cur_tx) != 0) {
|
|
|
|
if (m_head == NULL)
|
|
|
|
break;
|
|
|
|
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
|
2002-09-11 21:26:22 +00:00
|
|
|
break;
|
2009-12-22 18:57:07 +00:00
|
|
|
}
|
|
|
|
if (sc->ste_cdata.ste_last_tx == NULL) {
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
sc->ste_cdata.ste_tx_list_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2002-08-07 22:31:27 +00:00
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL);
|
|
|
|
ste_wait(sc);
|
|
|
|
CSR_WRITE_4(sc, STE_TX_DMALIST_PTR,
|
2009-12-22 18:57:07 +00:00
|
|
|
STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr));
|
2002-08-07 22:31:27 +00:00
|
|
|
CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64);
|
|
|
|
STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL);
|
|
|
|
ste_wait(sc);
|
2009-12-22 18:57:07 +00:00
|
|
|
} else {
|
|
|
|
sc->ste_cdata.ste_last_tx->ste_ptr->ste_next =
|
|
|
|
sc->ste_cdata.ste_last_tx->ste_phys;
|
|
|
|
bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag,
|
|
|
|
sc->ste_cdata.ste_tx_list_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2002-08-07 22:31:27 +00:00
|
|
|
}
|
2009-12-22 18:57:07 +00:00
|
|
|
sc->ste_cdata.ste_last_tx = cur_tx;
|
2002-08-07 22:31:27 +00:00
|
|
|
|
2009-12-22 18:57:07 +00:00
|
|
|
enq++;
|
1999-08-21 18:34:58 +00:00
|
|
|
/*
|
1999-12-07 20:14:42 +00:00
|
|
|
* If there's a BPF listener, bounce a copy of this frame
|
1999-08-21 18:34:58 +00:00
|
|
|
* to him.
|
|
|
|
*/
|
2009-12-22 18:57:07 +00:00
|
|
|
BPF_MTAP(ifp, m_head);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
2009-12-22 18:57:07 +00:00
|
|
|
|
|
|
|
if (enq > 0)
|
|
|
|
sc->ste_timer = STE_TX_TIMEOUT;
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2002-08-23 23:49:02 +00:00
|
|
|
static void
|
2009-11-19 22:06:40 +00:00
|
|
|
ste_watchdog(struct ste_softc *sc)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ifnet *ifp;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-11-19 22:06:40 +00:00
|
|
|
ifp = sc->ste_ifp;
|
|
|
|
STE_LOCK_ASSERT(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2009-12-22 20:11:56 +00:00
|
|
|
if (sc->ste_timer == 0 || --sc->ste_timer)
|
|
|
|
return;
|
|
|
|
|
2014-09-19 03:51:26 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
2005-08-17 14:37:39 +00:00
|
|
|
if_printf(ifp, "watchdog timeout\n");
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
ste_txeof(sc);
|
2009-12-22 23:57:10 +00:00
|
|
|
ste_txeoc(sc);
|
2009-12-22 18:57:07 +00:00
|
|
|
ste_rxeof(sc, -1);
|
2009-12-23 17:46:11 +00:00
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
2005-08-30 20:35:08 +00:00
|
|
|
ste_init_locked(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2005-07-08 13:05:59 +00:00
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
2005-08-30 20:35:08 +00:00
|
|
|
ste_start_locked(ifp);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
|
|
|
|
2007-11-22 02:45:00 +00:00
|
|
|
static int
|
2009-12-21 19:50:29 +00:00
|
|
|
ste_shutdown(device_t dev)
|
2009-12-24 18:17:53 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (ste_suspend(dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ste_suspend(device_t dev)
|
1999-08-21 18:34:58 +00:00
|
|
|
{
|
2009-12-21 20:18:01 +00:00
|
|
|
struct ste_softc *sc;
|
1999-08-21 18:34:58 +00:00
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
|
|
|
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_LOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
ste_stop(sc);
|
2009-12-24 18:17:53 +00:00
|
|
|
ste_setwol(sc);
|
|
|
|
STE_UNLOCK(sc);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ste_resume(device_t dev)
|
|
|
|
{
|
|
|
|
struct ste_softc *sc;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int pmc;
|
|
|
|
uint16_t pmstat;
|
|
|
|
|
|
|
|
sc = device_get_softc(dev);
|
|
|
|
STE_LOCK(sc);
|
2011-03-23 13:10:15 +00:00
|
|
|
if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) == 0) {
|
2009-12-24 18:17:53 +00:00
|
|
|
/* Disable PME and clear PME status. */
|
|
|
|
pmstat = pci_read_config(sc->ste_dev,
|
|
|
|
pmc + PCIR_POWER_STATUS, 2);
|
|
|
|
if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
|
|
|
|
pmstat &= ~PCIM_PSTAT_PMEENABLE;
|
|
|
|
pci_write_config(sc->ste_dev,
|
|
|
|
pmc + PCIR_POWER_STATUS, pmstat, 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ifp = sc->ste_ifp;
|
|
|
|
if ((ifp->if_flags & IFF_UP) != 0) {
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
|
|
|
ste_init_locked(sc);
|
|
|
|
}
|
2005-08-30 20:35:08 +00:00
|
|
|
STE_UNLOCK(sc);
|
1999-08-21 18:34:58 +00:00
|
|
|
|
2007-11-22 02:45:00 +00:00
|
|
|
return (0);
|
1999-08-21 18:34:58 +00:00
|
|
|
}
|
2009-12-23 19:18:07 +00:00
|
|
|
|
|
|
|
#define STE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
|
|
|
|
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
|
|
|
|
#define STE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
|
2011-01-12 19:53:56 +00:00
|
|
|
SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
|
2009-12-23 19:18:07 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
ste_sysctl_node(struct ste_softc *sc)
|
|
|
|
{
|
|
|
|
struct sysctl_ctx_list *ctx;
|
|
|
|
struct sysctl_oid_list *child, *parent;
|
|
|
|
struct sysctl_oid *tree;
|
|
|
|
struct ste_hw_stats *stats;
|
|
|
|
|
|
|
|
stats = &sc->ste_stats;
|
|
|
|
ctx = device_get_sysctl_ctx(sc->ste_dev);
|
|
|
|
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ste_dev));
|
|
|
|
|
2009-12-24 17:22:15 +00:00
|
|
|
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_rx_mod",
|
|
|
|
CTLFLAG_RW, &sc->ste_int_rx_mod, 0, "ste RX interrupt moderation");
|
|
|
|
/* Pull in device tunables. */
|
|
|
|
sc->ste_int_rx_mod = STE_IM_RX_TIMER_DEFAULT;
|
|
|
|
resource_int_value(device_get_name(sc->ste_dev),
|
|
|
|
device_get_unit(sc->ste_dev), "int_rx_mod", &sc->ste_int_rx_mod);
|
|
|
|
|
2009-12-23 19:18:07 +00:00
|
|
|
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
|
|
|
|
NULL, "STE statistics");
|
|
|
|
parent = SYSCTL_CHILDREN(tree);
|
|
|
|
|
|
|
|
/* Rx statistics. */
|
|
|
|
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
|
|
|
|
NULL, "Rx MAC statistics");
|
|
|
|
child = SYSCTL_CHILDREN(tree);
|
|
|
|
STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
|
|
|
|
&stats->rx_bytes, "Good octets");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
|
|
|
|
&stats->rx_frames, "Good frames");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
|
|
|
|
&stats->rx_bcast_frames, "Good broadcast frames");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
|
|
|
|
&stats->rx_mcast_frames, "Good multicast frames");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "lost_frames",
|
|
|
|
&stats->rx_lost_frames, "Lost frames");
|
|
|
|
|
|
|
|
/* Tx statistics. */
|
|
|
|
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
|
|
|
|
NULL, "Tx MAC statistics");
|
|
|
|
child = SYSCTL_CHILDREN(tree);
|
|
|
|
STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets",
|
|
|
|
&stats->tx_bytes, "Good octets");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
|
|
|
|
&stats->tx_frames, "Good frames");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames",
|
|
|
|
&stats->tx_bcast_frames, "Good broadcast frames");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames",
|
|
|
|
&stats->tx_mcast_frames, "Good multicast frames");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "carrier_errs",
|
|
|
|
&stats->tx_carrsense_errs, "Carrier sense errors");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "single_colls",
|
|
|
|
&stats->tx_single_colls, "Single collisions");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls",
|
|
|
|
&stats->tx_multi_colls, "Multiple collisions");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "late_colls",
|
|
|
|
&stats->tx_late_colls, "Late collisions");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "defers",
|
|
|
|
&stats->tx_frames_defered, "Frames with deferrals");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers",
|
|
|
|
&stats->tx_excess_defers, "Frames with excessive derferrals");
|
|
|
|
STE_SYSCTL_STAT_ADD32(ctx, child, "abort",
|
|
|
|
&stats->tx_abort, "Aborted frames due to Excessive collisions");
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef STE_SYSCTL_STAT_ADD32
|
|
|
|
#undef STE_SYSCTL_STAT_ADD64
|
2009-12-24 18:17:53 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
ste_setwol(struct ste_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
uint16_t pmstat;
|
|
|
|
uint8_t val;
|
|
|
|
int pmc;
|
|
|
|
|
|
|
|
STE_LOCK_ASSERT(sc);
|
|
|
|
|
2011-03-23 13:10:15 +00:00
|
|
|
if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) != 0) {
|
2009-12-24 18:17:53 +00:00
|
|
|
/* Disable WOL. */
|
|
|
|
CSR_READ_1(sc, STE_WAKE_EVENT);
|
|
|
|
CSR_WRITE_1(sc, STE_WAKE_EVENT, 0);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifp = sc->ste_ifp;
|
|
|
|
val = CSR_READ_1(sc, STE_WAKE_EVENT);
|
|
|
|
val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB |
|
|
|
|
STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB);
|
|
|
|
if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
|
|
|
|
val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB;
|
|
|
|
CSR_WRITE_1(sc, STE_WAKE_EVENT, val);
|
|
|
|
/* Request PME. */
|
|
|
|
pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2);
|
|
|
|
pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
|
|
|
|
if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
|
|
|
|
pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
|
|
|
|
pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
|
|
|
|
}
|