Convert bge(4) to use the driver API.

Submitted by:   Sreekanth Rupavatharam <rupavath@juniper.net>
Obtained from:  Juniper Networks, Inc.
This commit is contained in:
Marcel Moolenaar 2014-07-01 19:50:47 +00:00
parent ffc6fb79b6
commit fba8b10966

View File

@ -422,18 +422,18 @@ static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *);
static void bge_intr(void *);
static int bge_msi_intr(void *);
static void bge_intr_task(void *, int);
static void bge_start_locked(struct ifnet *);
static void bge_start(struct ifnet *);
static int bge_ioctl(struct ifnet *, u_long, caddr_t);
static void bge_start_locked(if_t);
static void bge_start(if_t);
static int bge_ioctl(if_t, u_long, caddr_t);
static void bge_init_locked(struct bge_softc *);
static void bge_init(void *);
static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t);
static void bge_stop(struct bge_softc *);
static void bge_watchdog(struct bge_softc *);
static int bge_shutdown(device_t);
static int bge_ifmedia_upd_locked(struct ifnet *);
static int bge_ifmedia_upd(struct ifnet *);
static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
static int bge_ifmedia_upd_locked(if_t);
static int bge_ifmedia_upd(if_t);
static void bge_ifmedia_sts(if_t, struct ifmediareq *);
static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *);
static int bge_read_nvram(struct bge_softc *, caddr_t, int, int);
@ -474,7 +474,7 @@ static int bge_miibus_readreg(device_t, int, int);
static int bge_miibus_writereg(device_t, int, int, int);
static void bge_miibus_statchg(device_t);
#ifdef DEVICE_POLLING
static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
static int bge_poll(if_t ifp, enum poll_cmd cmd, int count);
#endif
#define BGE_RESET_SHUTDOWN 0
@ -1244,7 +1244,7 @@ bge_miibus_statchg(device_t dev)
uint32_t mac_mode, rx_mode, tx_mode;
sc = device_get_softc(dev);
if ((sc->bge_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0)
return;
mii = device_get_softc(sc->bge_miibus);
@ -1320,7 +1320,7 @@ bge_newbuf_std(struct bge_softc *sc, int i)
int error, nsegs;
if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
(sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
(if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
@ -1598,14 +1598,14 @@ bge_init_tx_ring(struct bge_softc *sc)
static void
bge_setpromisc(struct bge_softc *sc)
{
struct ifnet *ifp;
if_t ifp;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
/* Enable or disable promiscuous mode as needed. */
if (ifp->if_flags & IFF_PROMISC)
if (if_getflags(ifp) & IFF_PROMISC)
BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
else
BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC);
@ -1614,16 +1614,27 @@ bge_setpromisc(struct bge_softc *sc)
static void
bge_setmulti(struct bge_softc *sc)
{
struct ifnet *ifp;
struct ifmultiaddr *ifma;
if_t ifp;
int mc_count = 0;
uint32_t hashes[4] = { 0, 0, 0, 0 };
int h, i;
int h, i, mcnt;
unsigned char *mta;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
mc_count = if_multiaddr_count(ifp, -1);
mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN *
mc_count, M_DEVBUF, M_NOWAIT);
if(mta == NULL) {
device_printf(sc->bge_dev,
"Failed to allocated temp mcast list\n");
return;
}
if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) {
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF);
return;
@ -1633,32 +1644,30 @@ bge_setmulti(struct bge_softc *sc)
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0);
/* Now program new ones. */
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
if_multiaddr_array(ifp, mta, &mcnt, mc_count);
for(i = 0; i < mcnt; i++) {
h = ether_crc32_le(LLADDR((struct sockaddr_dl *)
ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F;
(mta + (i * ETHER_ADDR_LEN))), ETHER_ADDR_LEN) & 0x7F;
hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F);
}
if_maddr_runlock(ifp);
for (i = 0; i < 4; i++)
CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]);
free(mta, M_DEVBUF);
}
static void
bge_setvlan(struct bge_softc *sc)
{
struct ifnet *ifp;
if_t ifp;
BGE_LOCK_ASSERT(sc);
ifp = sc->bge_ifp;
/* Enable or disable VLAN tag stripping as needed. */
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING)
BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
else
BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG);
@ -1994,7 +2003,7 @@ bge_blockinit(struct bge_softc *sc)
/* Configure mbuf pool watermarks */
if (BGE_IS_5717_PLUS(sc)) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0);
if (sc->bge_ifp->if_mtu > ETHERMTU) {
if (if_getmtu(sc->bge_ifp) > ETHERMTU) {
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e);
CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea);
} else {
@ -3294,7 +3303,7 @@ bge_devinfo(struct bge_softc *sc)
static int
bge_attach(device_t dev)
{
struct ifnet *ifp;
if_t ifp;
struct bge_softc *sc;
uint32_t hwcfg = 0, misccfg, pcistate;
u_char eaddr[ETHER_ADDR_LEN];
@ -3723,28 +3732,27 @@ bge_attach(device_t dev)
error = ENXIO;
goto fail;
}
ifp->if_softc = sc;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = bge_ioctl;
ifp->if_start = bge_start;
ifp->if_init = bge_init;
ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1;
IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
IFQ_SET_READY(&ifp->if_snd);
ifp->if_hwassist = sc->bge_csum_features;
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_MTU;
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
if_setioctlfn(ifp, bge_ioctl);
if_setstartfn(ifp, bge_start);
if_setinitfn(ifp, bge_init);
if_setsendqlen(ifp, BGE_TX_RING_CNT);
if_setsendqready(ifp);
if_sethwassist(ifp, sc->bge_csum_features);
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING |
IFCAP_VLAN_MTU);
if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) {
ifp->if_hwassist |= CSUM_TSO;
ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_VLAN_HWTSO;
if_sethwassistbits(ifp, CSUM_TSO, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0);
}
#ifdef IFCAP_VLAN_HWCSUM
ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
#endif
ifp->if_capenable = ifp->if_capabilities;
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
#endif
/*
@ -3752,9 +3760,9 @@ bge_attach(device_t dev)
* to hardware bugs.
*/
if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) {
ifp->if_capabilities &= ~IFCAP_HWCSUM;
ifp->if_capenable &= ~IFCAP_HWCSUM;
ifp->if_hwassist = 0;
if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM);
if_setcapenablebit(ifp, 0, IFCAP_HWCSUM);
if_sethwassist(ifp, 0);
}
/*
@ -3829,7 +3837,7 @@ bge_attach(device_t dev)
sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED;
if (sc->bge_flags & BGE_FLAG_TBI) {
ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
ifmedia_init_drv(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd,
bge_ifmedia_sts);
ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL);
ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX,
@ -3850,9 +3858,10 @@ bge_attach(device_t dev)
again:
bge_asf_driver_up(sc);
error = mii_attach(dev, &sc->bge_miibus, ifp, bge_ifmedia_upd,
bge_ifmedia_sts, capmask, sc->bge_phy_addr, MII_OFFSET_ANY,
MIIF_DOPAUSE);
error = mii_attach(dev, &sc->bge_miibus, ifp,
(ifm_change_cb_t)bge_ifmedia_upd,
(ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr,
MII_OFFSET_ANY, MIIF_DOPAUSE);
if (error != 0) {
if (trys++ < 4) {
device_printf(sc->bge_dev, "Try again\n");
@ -3889,7 +3898,7 @@ again:
ether_ifattach(ifp, eaddr);
/* Tell upper layer we support long frames. */
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
/*
* Hookup IRQ last.
@ -3936,13 +3945,13 @@ static int
bge_detach(device_t dev)
{
struct bge_softc *sc;
struct ifnet *ifp;
if_t ifp;
sc = device_get_softc(dev);
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING)
if (if_getcapenable(ifp) & IFCAP_POLLING)
ether_poll_deregister(ifp);
#endif
@ -4298,7 +4307,7 @@ bge_rxreuse_jumbo(struct bge_softc *sc, int i)
static int
bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
{
struct ifnet *ifp;
if_t ifp;
int rx_npkts = 0, stdcnt = 0, jumbocnt = 0;
uint16_t rx_cons;
@ -4315,8 +4324,8 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE);
if (BGE_IS_JUMBO_CAPABLE(sc) &&
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
(MCLBYTES - ETHER_ALIGN))
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE);
@ -4328,7 +4337,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
int have_tag = 0;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING) {
if (if_getcapenable(ifp) & IFCAP_POLLING) {
if (sc->rxcycles <= 0)
break;
sc->rxcycles--;
@ -4340,7 +4349,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
rxidx = cur_rx->bge_idx;
BGE_INC(rx_cons, sc->bge_return_ring_cnt);
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING &&
if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING &&
cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) {
have_tag = 1;
vlan_tag = cur_rx->bge_vlan_tag;
@ -4355,7 +4364,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
}
if (bge_newbuf_jumbo(sc, rxidx) != 0) {
bge_rxreuse_jumbo(sc, rxidx);
ifp->if_iqdrops++;
if_inciqdrops(ifp, 1);
continue;
}
BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT);
@ -4368,13 +4377,13 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
}
if (bge_newbuf_std(sc, rxidx) != 0) {
bge_rxreuse_std(sc, rxidx);
ifp->if_iqdrops++;
if_inciqdrops(ifp, 1);
continue;
}
BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
}
ifp->if_ipackets++;
if_incipackets(ifp, 1);
#ifndef __NO_STRICT_ALIGNMENT
/*
* For architectures with strict alignment we must make sure
@ -4389,7 +4398,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN;
m->m_pkthdr.rcvif = ifp;
if (ifp->if_capenable & IFCAP_RXCSUM)
if (if_getcapenable(ifp) & IFCAP_RXCSUM)
bge_rxcsum(sc, cur_rx, m);
/*
@ -4403,13 +4412,13 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
if (holdlck != 0) {
BGE_UNLOCK(sc);
(*ifp->if_input)(ifp, m);
if_input(ifp, m);
BGE_LOCK(sc);
} else
(*ifp->if_input)(ifp, m);
if_input(ifp, m);
rx_npkts++;
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
return (rx_npkts);
}
@ -4437,7 +4446,7 @@ bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck)
* If you need correct statistics, you can enable this check.
*/
if (BGE_IS_5705_PLUS(sc))
ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS);
if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS));
#endif
return (rx_npkts);
}
@ -4481,7 +4490,7 @@ static void
bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
{
struct bge_tx_bd *cur_tx;
struct ifnet *ifp;
if_t ifp;
BGE_LOCK_ASSERT(sc);
@ -4503,7 +4512,7 @@ bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
idx = sc->bge_tx_saved_considx;
cur_tx = &sc->bge_ldata.bge_tx_ring[idx];
if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
ifp->if_opackets++;
if_incopackets(ifp, 1);
if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[idx],
@ -4517,22 +4526,22 @@ bge_txeof(struct bge_softc *sc, uint16_t tx_cons)
BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT);
}
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
if (sc->bge_txcnt == 0)
sc->bge_timer = 0;
}
#ifdef DEVICE_POLLING
static int
bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
bge_poll(if_t ifp, enum poll_cmd cmd, int count)
{
struct bge_softc *sc = ifp->if_softc;
struct bge_softc *sc = if_getsoftc(ifp);
uint16_t rx_prod, tx_cons;
uint32_t statusword;
int rx_npkts = 0;
BGE_LOCK(sc);
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BGE_UNLOCK(sc);
return (rx_npkts);
}
@ -4564,12 +4573,12 @@ bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
sc->rxcycles = count;
rx_npkts = bge_rxeof(sc, rx_prod, 1);
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
BGE_UNLOCK(sc);
return (rx_npkts);
}
bge_txeof(sc, tx_cons);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
if (!if_sendq_empty(ifp))
bge_start_locked(ifp);
BGE_UNLOCK(sc);
@ -4595,7 +4604,7 @@ static void
bge_intr_task(void *arg, int pending)
{
struct bge_softc *sc;
struct ifnet *ifp;
if_t ifp;
uint32_t status, status_tag;
uint16_t rx_prod, tx_cons;
@ -4603,7 +4612,7 @@ bge_intr_task(void *arg, int pending)
ifp = sc->bge_ifp;
BGE_LOCK(sc);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
BGE_UNLOCK(sc);
return;
}
@ -4632,17 +4641,17 @@ bge_intr_task(void *arg, int pending)
/* Let controller work. */
bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag);
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
sc->bge_rx_saved_considx != rx_prod) {
/* Check RX return ring producer/consumer. */
BGE_UNLOCK(sc);
bge_rxeof(sc, rx_prod, 0);
BGE_LOCK(sc);
}
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check TX ring producer/consumer. */
bge_txeof(sc, tx_cons);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
if (!if_sendq_empty(ifp))
bge_start_locked(ifp);
}
BGE_UNLOCK(sc);
@ -4652,7 +4661,7 @@ static void
bge_intr(void *xsc)
{
struct bge_softc *sc;
struct ifnet *ifp;
if_t ifp;
uint32_t statusword;
uint16_t rx_prod, tx_cons;
@ -4663,7 +4672,7 @@ bge_intr(void *xsc)
ifp = sc->bge_ifp;
#ifdef DEVICE_POLLING
if (ifp->if_capenable & IFCAP_POLLING) {
if (if_getcapenable(ifp) & IFCAP_POLLING) {
BGE_UNLOCK(sc);
return;
}
@ -4712,18 +4721,18 @@ bge_intr(void *xsc)
statusword || sc->bge_link_evt)
bge_link_upd(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check RX return ring producer/consumer. */
bge_rxeof(sc, rx_prod, 1);
}
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
/* Check TX ring producer/consumer. */
bge_txeof(sc, tx_cons);
}
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING &&
!if_sendq_empty(ifp))
bge_start_locked(ifp);
BGE_UNLOCK(sc);
@ -4787,7 +4796,7 @@ bge_tick(void *xsc)
*/
#ifdef DEVICE_POLLING
/* In polling mode we poll link state in bge_poll(). */
if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING))
if (!(sc->bge_if_getcapenable(ifp) & IFCAP_POLLING))
#endif
{
sc->bge_link_evt++;
@ -4808,7 +4817,7 @@ bge_tick(void *xsc)
static void
bge_stats_update_regs(struct bge_softc *sc)
{
struct ifnet *ifp;
if_t ifp;
struct bge_mac_stats *stats;
uint32_t val;
@ -4908,9 +4917,9 @@ bge_stats_update_regs(struct bge_softc *sc)
stats->RecvThresholdHit +=
CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT);
ifp->if_collisions = (u_long)stats->etherStatsCollisions;
ifp->if_ierrors = (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
stats->InputErrors);
if_setcollisions(ifp, (u_long)stats->etherStatsCollisions);
if_setierrors(ifp, (u_long)(stats->NoMoreRxBDs + stats->InputDiscards +
stats->InputErrors));
if (sc->bge_flags & BGE_FLAG_RDMA_BUG) {
/*
@ -4976,7 +4985,7 @@ bge_stats_clear_regs(struct bge_softc *sc)
static void
bge_stats_update(struct bge_softc *sc)
{
struct ifnet *ifp;
if_t ifp;
bus_size_t stats;
uint32_t cnt; /* current register value */
@ -4988,21 +4997,21 @@ bge_stats_update(struct bge_softc *sc)
CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat))
cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo);
ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions);
if_inccollisions(ifp, (uint32_t)(cnt - sc->bge_tx_collisions));
sc->bge_tx_collisions = cnt;
cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo);
ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_nobds);
if_incierrors(ifp, (uint32_t)(cnt - sc->bge_rx_nobds));
sc->bge_rx_nobds = cnt;
cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo);
ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_inerrs);
if_incierrors(ifp, (uint32_t)(cnt - sc->bge_rx_inerrs));
sc->bge_rx_inerrs = cnt;
cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo);
ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards);
if_incierrors(ifp, (uint32_t)(cnt - sc->bge_rx_discards));
sc->bge_rx_discards = cnt;
cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo);
ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards);
if_incoerrors(ifp, (uint32_t)(cnt - sc->bge_tx_discards));
sc->bge_tx_discards = cnt;
#undef READ_STAT
@ -5329,29 +5338,29 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
bge_start_locked(struct ifnet *ifp)
bge_start_locked(if_t ifp)
{
struct bge_softc *sc;
struct mbuf *m_head;
uint32_t prodidx;
int count;
sc = ifp->if_softc;
sc = if_getsoftc(ifp);
BGE_LOCK_ASSERT(sc);
if (!sc->bge_link ||
(ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
(if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
IFF_DRV_RUNNING)
return;
prodidx = sc->bge_tx_prodidx;
for (count = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
for (count = 0; !if_sendq_empty(ifp);) {
if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) {
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
m_head = if_dequeue(ifp);
if (m_head == NULL)
break;
@ -5363,8 +5372,8 @@ bge_start_locked(struct ifnet *ifp)
if (bge_encap(sc, &m_head, &prodidx)) {
if (m_head == NULL)
break;
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
if_sendq_prepend(ifp, m_head);
if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
break;
}
++count;
@ -5373,11 +5382,7 @@ bge_start_locked(struct ifnet *ifp)
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
#ifdef ETHER_BPF_MTAP
ETHER_BPF_MTAP(ifp, m_head);
#else
BPF_MTAP(ifp, m_head);
#endif
if_bpfmtap(ifp, m_head);
}
if (count > 0) {
@ -5403,11 +5408,11 @@ bge_start_locked(struct ifnet *ifp)
* to the mbuf data regions directly in the transmit descriptors.
*/
static void
bge_start(struct ifnet *ifp)
bge_start(if_t ifp)
{
struct bge_softc *sc;
sc = ifp->if_softc;
sc = if_getsoftc(ifp);
BGE_LOCK(sc);
bge_start_locked(ifp);
BGE_UNLOCK(sc);
@ -5416,7 +5421,7 @@ bge_start(struct ifnet *ifp)
static void
bge_init_locked(struct bge_softc *sc)
{
struct ifnet *ifp;
if_t ifp;
uint16_t *m;
uint32_t mode;
@ -5424,7 +5429,7 @@ bge_init_locked(struct bge_softc *sc)
ifp = sc->bge_ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
/* Cancel pending I/O and flush buffers. */
@ -5450,9 +5455,9 @@ bge_init_locked(struct bge_softc *sc)
ifp = sc->bge_ifp;
/* Specify MTU. */
CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu +
CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN +
(ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
(if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0));
/* Load our MAC address. */
m = (uint16_t *)IF_LLADDR(sc->bge_ifp);
@ -5473,10 +5478,10 @@ bge_init_locked(struct bge_softc *sc)
sc->bge_csum_features &= ~CSUM_UDP;
else
sc->bge_csum_features |= CSUM_UDP;
if (ifp->if_capabilities & IFCAP_TXCSUM &&
ifp->if_capenable & IFCAP_TXCSUM) {
ifp->if_hwassist &= ~(BGE_CSUM_FEATURES | CSUM_UDP);
ifp->if_hwassist |= sc->bge_csum_features;
if (if_getcapabilities(ifp) & IFCAP_TXCSUM &&
if_getcapenable(ifp) & IFCAP_TXCSUM) {
if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP));
if_sethwassistbits(ifp, sc->bge_csum_features, 0);
}
/* Init RX ring. */
@ -5506,8 +5511,8 @@ bge_init_locked(struct bge_softc *sc)
/* Init jumbo RX ring. */
if (BGE_IS_JUMBO_CAPABLE(sc) &&
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN >
(MCLBYTES - ETHER_ALIGN)) {
if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) {
if (bge_init_rx_ring_jumbo(sc) != 0) {
device_printf(sc->bge_dev,
"no memory for jumbo Rx buffers.\n");
@ -5568,7 +5573,7 @@ bge_init_locked(struct bge_softc *sc)
#ifdef DEVICE_POLLING
/* Disable interrupts if we are polling. */
if (ifp->if_capenable & IFCAP_POLLING) {
if (if_getcapenable(ifp) & IFCAP_POLLING) {
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
@ -5582,8 +5587,8 @@ bge_init_locked(struct bge_softc *sc)
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
}
ifp->if_drv_flags |= IFF_DRV_RUNNING;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
bge_ifmedia_upd_locked(ifp);
@ -5604,9 +5609,9 @@ bge_init(void *xsc)
* Set media options.
*/
static int
bge_ifmedia_upd(struct ifnet *ifp)
bge_ifmedia_upd(if_t ifp)
{
struct bge_softc *sc = ifp->if_softc;
struct bge_softc *sc = if_getsoftc(ifp);
int res;
BGE_LOCK(sc);
@ -5617,9 +5622,9 @@ bge_ifmedia_upd(struct ifnet *ifp)
}
static int
bge_ifmedia_upd_locked(struct ifnet *ifp)
bge_ifmedia_upd_locked(if_t ifp)
{
struct bge_softc *sc = ifp->if_softc;
struct bge_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
struct mii_softc *miisc;
struct ifmedia *ifm;
@ -5702,14 +5707,14 @@ bge_ifmedia_upd_locked(struct ifnet *ifp)
* Report current media status.
*/
static void
bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
{
struct bge_softc *sc = ifp->if_softc;
struct bge_softc *sc = if_getsoftc(ifp);
struct mii_data *mii;
BGE_LOCK(sc);
if ((ifp->if_flags & IFF_UP) == 0) {
if ((if_getflags(ifp) & IFF_UP) == 0) {
BGE_UNLOCK(sc);
return;
}
@ -5742,9 +5747,9 @@ bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
}
static int
bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
bge_ioctl(if_t ifp, u_long command, caddr_t data)
{
struct bge_softc *sc = ifp->if_softc;
struct bge_softc *sc = if_getsoftc(ifp);
struct ifreq *ifr = (struct ifreq *) data;
struct mii_data *mii;
int flags, mask, error = 0;
@ -5763,10 +5768,10 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
}
BGE_LOCK(sc);
if (ifp->if_mtu != ifr->ifr_mtu) {
ifp->if_mtu = ifr->ifr_mtu;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if (if_getmtu(ifp) != ifr->ifr_mtu) {
if_setmtu(ifp, ifr->ifr_mtu);
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init_locked(sc);
}
}
@ -5774,7 +5779,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
break;
case SIOCSIFFLAGS:
BGE_LOCK(sc);
if (ifp->if_flags & IFF_UP) {
if (if_getflags(ifp) & IFF_UP) {
/*
* If only the state of the PROMISC flag changed,
* then just use the 'set promisc mode' command
@ -5783,8 +5788,8 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
* waiting for it to start up, which may take a
* second or two. Similarly for ALLMULTI.
*/
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
flags = ifp->if_flags ^ sc->bge_if_flags;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
flags = if_getflags(ifp) ^ sc->bge_if_flags;
if (flags & IFF_PROMISC)
bge_setpromisc(sc);
if (flags & IFF_ALLMULTI)
@ -5792,17 +5797,17 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
} else
bge_init_locked(sc);
} else {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
bge_stop(sc);
}
}
sc->bge_if_flags = ifp->if_flags;
sc->bge_if_flags = if_getflags(ifp);
BGE_UNLOCK(sc);
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
BGE_LOCK(sc);
bge_setmulti(sc);
BGE_UNLOCK(sc);
@ -5821,7 +5826,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
#ifdef DEVICE_POLLING
if (mask & IFCAP_POLLING) {
if (ifr->ifr_reqcap & IFCAP_POLLING) {
@ -5832,7 +5837,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
BGE_SETBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 1);
ifp->if_capenable |= IFCAP_POLLING;
if_setcapenablebit(ifp, IFCAP_POLLING, 0);
BGE_UNLOCK(sc);
} else {
error = ether_poll_deregister(ifp);
@ -5841,53 +5846,55 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
BGE_CLRBIT(sc, BGE_PCI_MISC_CTL,
BGE_PCIMISCCTL_MASK_PCI_INTR);
bge_writembx(sc, BGE_MBX_IRQ0_LO, 0);
ifp->if_capenable &= ~IFCAP_POLLING;
if_setcapenablebit(ifp, 0, IFCAP_POLLING);
BGE_UNLOCK(sc);
}
}
#endif
if ((mask & IFCAP_TXCSUM) != 0 &&
(ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
ifp->if_capenable ^= IFCAP_TXCSUM;
if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
ifp->if_hwassist |= sc->bge_csum_features;
(if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
if_sethwassistbits(ifp,
sc->bge_csum_features, 0);
else
ifp->if_hwassist &= ~sc->bge_csum_features;
if_sethwassistbits(ifp, 0,
sc->bge_csum_features);
}
if ((mask & IFCAP_RXCSUM) != 0 &&
(ifp->if_capabilities & IFCAP_RXCSUM) != 0)
ifp->if_capenable ^= IFCAP_RXCSUM;
(if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
if_togglecapenable(ifp, IFCAP_RXCSUM);
if ((mask & IFCAP_TSO4) != 0 &&
(ifp->if_capabilities & IFCAP_TSO4) != 0) {
ifp->if_capenable ^= IFCAP_TSO4;
if ((ifp->if_capenable & IFCAP_TSO4) != 0)
ifp->if_hwassist |= CSUM_TSO;
(if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
if_togglecapenable(ifp, IFCAP_TSO4);
if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0)
if_sethwassistbits(ifp, CSUM_TSO, 0);
else
ifp->if_hwassist &= ~CSUM_TSO;
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
if (mask & IFCAP_VLAN_MTU) {
ifp->if_capenable ^= IFCAP_VLAN_MTU;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if_togglecapenable(ifp, IFCAP_VLAN_MTU);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init(sc);
}
if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
(ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
(if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
BGE_LOCK(sc);
bge_setvlan(sc);
BGE_UNLOCK(sc);
}
#ifdef VLAN_CAPABILITIES
VLAN_CAPABILITIES(ifp);
if_vlancap(ifp);
#endif
break;
default:
@ -5901,7 +5908,7 @@ bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
static void
bge_watchdog(struct bge_softc *sc)
{
struct ifnet *ifp;
if_t ifp;
uint32_t status;
BGE_LOCK_ASSERT(sc);
@ -5940,10 +5947,10 @@ bge_watchdog(struct bge_softc *sc)
if_printf(ifp, "watchdog timeout -- resetting\n");
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
bge_init_locked(sc);
ifp->if_oerrors++;
if_incoerrors(ifp, 1);
}
static void
@ -5967,7 +5974,7 @@ bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit)
static void
bge_stop(struct bge_softc *sc)
{
struct ifnet *ifp;
if_t ifp;
BGE_LOCK_ASSERT(sc);
@ -6057,7 +6064,7 @@ bge_stop(struct bge_softc *sc)
if_printf(sc->bge_ifp, "link DOWN\n");
sc->bge_link = 0;
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
}
/*
@ -6094,14 +6101,14 @@ static int
bge_resume(device_t dev)
{
struct bge_softc *sc;
struct ifnet *ifp;
if_t ifp;
sc = device_get_softc(dev);
BGE_LOCK(sc);
ifp = sc->bge_ifp;
if (ifp->if_flags & IFF_UP) {
if (if_getflags(ifp) & IFF_UP) {
bge_init_locked(sc);
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
bge_start_locked(ifp);
}
BGE_UNLOCK(sc);