- New 82580 devices supported

- Fixes from John Baldwin: vlan shadow tables made per/interface,
  make vlan hw setup only happen when capability enabled, and
  finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
  detection is in the TX clean path, with only the final check
  and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
  larger machines it can get greater than this, and it seems
  mostly a resource waste to do so. Even 8 might be high but
  it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
  make sure the descriptor is rewritten even when reusing
  mbufs since writeback clobbers things.

MFC: in a few days, this delta needs to get to 8.2
This commit is contained in:
Jack F Vogel 2010-11-23 22:12:02 +00:00
parent 496b5146bf
commit 46b9f1ff9f
2 changed files with 209 additions and 136 deletions

View File

@ -99,7 +99,7 @@ int igb_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
char igb_driver_version[] = "version - 2.0.4";
char igb_driver_version[] = "version - 2.0.7";
/*********************************************************************
@ -137,6 +137,10 @@ static igb_vendor_info_t igb_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_82580_QUAD_FIBER,
PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_DH89XXCC_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
{ 0, 0, 0, 0, 0}
};
@ -307,11 +311,19 @@ static int igb_enable_msix = 1;
TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
/*
* Header split has seemed to be beneficial in
* many circumstances tested, however there have
* been some stability issues, so the default is
* off.
*/
** Tuneable Interrupt rate
*/
static int igb_max_interrupt_rate = 8000;
TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate);
/*
** Header split causes the packet header to
** be dma'd to a seperate mbuf from the payload.
** this can have memory alignment benefits. But
** another plus is that small packets often fit
** into the header and thus use no cluster. Its
** a very workload dependent type feature.
*/
static bool igb_header_split = FALSE;
TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
@ -330,15 +342,6 @@ TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
static int igb_fc_setting = e1000_fc_full;
TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
/*
** Shadow VFTA table, this is needed because
** the real filter table gets cleared during
** a soft reset and the driver needs to be able
** to repopulate it.
*/
static u32 igb_shadow_vfta[IGB_VFTA_SIZE];
/*********************************************************************
* Device identification routine
*
@ -818,7 +821,7 @@ igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
/* Set watchdog on */
txr->watchdog_time = ticks;
txr->watchdog_check = TRUE;
txr->queue_status = IGB_QUEUE_WORKING;
}
}
@ -922,7 +925,7 @@ igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
}
if (enq > 0) {
/* Set the watchdog */
txr->watchdog_check = TRUE;
txr->queue_status = IGB_QUEUE_WORKING;
txr->watchdog_time = ticks;
}
return (err);
@ -1049,6 +1052,11 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
}
break;
case SIOCSIFMEDIA:
/*
** As the speed/duplex settings are being
** changed, we need toreset the PHY.
*/
adapter->hw.phy.reset_disable = FALSE;
/* Check SOL/IDER usage */
IGB_CORE_LOCK(adapter);
if (e1000_check_reset_block(&adapter->hw)) {
@ -1161,19 +1169,6 @@ igb_init_locked(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
/* Use real VLAN Filter support? */
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
/* Use real VLAN Filter support */
igb_setup_vlan_hw_support(adapter);
else {
u32 ctrl;
ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
}
}
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TXCSUM) {
@ -1201,10 +1196,12 @@ igb_init_locked(struct adapter *adapter)
** Figure out the desired mbuf pool
** for doing jumbo/packetsplit
*/
if (ifp->if_mtu > ETHERMTU)
if (adapter->max_frame_size <= 2048)
adapter->rx_mbuf_sz = MCLBYTES;
else if (adapter->max_frame_size <= 4096)
adapter->rx_mbuf_sz = MJUMPAGESIZE;
else
adapter->rx_mbuf_sz = MCLBYTES;
adapter->rx_mbuf_sz = MJUM9BYTES;
/* Prepare receive descriptors and buffers */
if (igb_setup_receive_structures(adapter)) {
@ -1213,6 +1210,19 @@ igb_init_locked(struct adapter *adapter)
}
igb_initialize_receive_units(adapter);
/* Use real VLAN Filter support? */
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
/* Use real VLAN Filter support */
igb_setup_vlan_hw_support(adapter);
else {
u32 ctrl;
ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
ctrl |= E1000_CTRL_VME;
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
}
}
/* Don't lose promiscuous settings */
igb_set_promisc(adapter);
@ -1225,9 +1235,6 @@ igb_init_locked(struct adapter *adapter)
if (adapter->msix > 1) /* Set up queue routing */
igb_configure_queues(adapter);
/* Set up VLAN tag offload and filter */
igb_setup_vlan_hw_support(adapter);
/* this clears any pending interrupts */
E1000_READ_REG(&adapter->hw, E1000_ICR);
#ifdef DEVICE_POLLING
@ -1627,11 +1634,6 @@ igb_media_change(struct ifnet *ifp)
device_printf(adapter->dev, "Unsupported media type\n");
}
/* As the speed/duplex settings my have changed we need to
* reset the PHY.
*/
adapter->hw.phy.reset_disable = FALSE;
igb_init_locked(adapter);
IGB_CORE_UNLOCK(adapter);
@ -1947,18 +1949,9 @@ igb_local_timer(void *arg)
/*
** Watchdog: check for time since any descriptor was cleaned
*/
for (int i = 0; i < adapter->num_queues; i++, txr++) {
IGB_TX_LOCK(txr);
if ((txr->watchdog_check == FALSE) ||
(txr->tx_avail == adapter->num_tx_desc)) {
IGB_TX_UNLOCK(txr);
continue;
}
if ((ticks - txr->watchdog_time) > IGB_WATCHDOG)
for (int i = 0; i < adapter->num_queues; i++, txr++)
if (txr->queue_status == IGB_QUEUE_HUNG)
goto timeout;
IGB_TX_UNLOCK(txr);
}
out:
callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
return;
@ -1973,7 +1966,6 @@ igb_local_timer(void *arg)
txr->me, txr->tx_avail, txr->next_to_clean);
adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
adapter->watchdog_events++;
IGB_TX_UNLOCK(txr);
igb_init_locked(adapter);
}
@ -2037,7 +2029,7 @@ igb_update_link_status(struct adapter *adapter)
if_link_state_change(ifp, LINK_STATE_DOWN);
/* Turn off watchdogs */
for (int i = 0; i < adapter->num_queues; i++, txr++)
txr->watchdog_check = FALSE;
txr->queue_status = IGB_QUEUE_IDLE;
}
}
@ -2069,7 +2061,7 @@ igb_stop(void *arg)
/* Unarm watchdog timer. */
for (int i = 0; i < adapter->num_queues; i++, txr++) {
IGB_TX_LOCK(txr);
txr->watchdog_check = FALSE;
txr->queue_status = IGB_QUEUE_IDLE;
IGB_TX_UNLOCK(txr);
}
@ -2280,8 +2272,7 @@ igb_configure_queues(struct adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct igb_queue *que;
u32 tmp, ivar = 0;
u32 newitr = IGB_DEFAULT_ITR;
u32 tmp, ivar = 0, newitr = 0;
/* First turn on RSS capability */
if (adapter->hw.mac.type > e1000_82575)
@ -2398,6 +2389,9 @@ igb_configure_queues(struct adapter *adapter)
}
/* Set the starting interrupt rate */
if (igb_max_interrupt_rate > 0)
newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC;
if (hw->mac.type == e1000_82575)
newitr |= newitr << 16;
else
@ -2509,6 +2503,8 @@ igb_setup_msix(struct adapter *adapter)
/* Manual override */
if (igb_num_queues != 0)
queues = igb_num_queues;
if (queues > 8) /* max queues */
queues = 8;
/* Can have max of 4 queues on 82575 */
if ((adapter->hw.mac.type == e1000_82575) && (queues > 4))
@ -2636,7 +2632,7 @@ igb_reset(struct adapter *adapter)
fc->send_xon = TRUE;
/* Set Flow control, use the tunable location if sane */
if ((igb_fc_setting >= 0) || (igb_fc_setting < 4))
if ((igb_fc_setting >= 0) && (igb_fc_setting < 4))
fc->requested_mode = igb_fc_setting;
else
fc->requested_mode = e1000_fc_none;
@ -2728,10 +2724,11 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
ifp->if_capabilities |= IFCAP_TSO4;
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
if (igb_header_split)
ifp->if_capabilities |= IFCAP_LRO;
ifp->if_capenable = ifp->if_capabilities;
/* Don't enable LRO by default */
ifp->if_capabilities |= IFCAP_LRO;
#ifdef DEVICE_POLLING
ifp->if_capabilities |= IFCAP_POLLING;
#endif
@ -3172,7 +3169,7 @@ igb_initialize_transmit_units(struct adapter *adapter)
E1000_READ_REG(hw, E1000_TDBAL(i)),
E1000_READ_REG(hw, E1000_TDLEN(i)));
txr->watchdog_check = FALSE;
txr->queue_status = IGB_QUEUE_IDLE;
txdctl |= IGB_TX_PTHRESH;
txdctl |= IGB_TX_HTHRESH << 8;
@ -3184,14 +3181,14 @@ igb_initialize_transmit_units(struct adapter *adapter)
if (adapter->hw.mac.type == e1000_vfadapt)
return;
e1000_config_collision_dist(hw);
/* Program the Transmit Control Register */
tctl = E1000_READ_REG(hw, E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
e1000_config_collision_dist(hw);
/* This write will effectively turn on the transmit unit. */
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
}
@ -3500,16 +3497,19 @@ static bool
igb_txeof(struct tx_ring *txr)
{
struct adapter *adapter = txr->adapter;
int first, last, done;
int first, last, done, processed;
struct igb_tx_buffer *tx_buffer;
struct e1000_tx_desc *tx_desc, *eop_desc;
struct ifnet *ifp = adapter->ifp;
IGB_TX_LOCK_ASSERT(txr);
if (txr->tx_avail == adapter->num_tx_desc)
if (txr->tx_avail == adapter->num_tx_desc) {
txr->queue_status = IGB_QUEUE_IDLE;
return FALSE;
}
processed = 0;
first = txr->next_to_clean;
tx_desc = &txr->tx_base[first];
tx_buffer = &txr->tx_buffers[first];
@ -3536,6 +3536,7 @@ igb_txeof(struct tx_ring *txr)
tx_desc->lower.data = 0;
tx_desc->buffer_addr = 0;
++txr->tx_avail;
++processed;
if (tx_buffer->m_head) {
txr->bytes +=
@ -3575,6 +3576,15 @@ igb_txeof(struct tx_ring *txr)
txr->next_to_clean = first;
/*
** Watchdog calculation, we know there's
** work outstanding or the first return
** would have been taken, so none processed
** for too long indicates a hang.
*/
if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG))
txr->queue_status = IGB_QUEUE_HUNG;
/*
* If we have enough room, clear IFF_DRV_OACTIVE
* to tell the stack that it is OK to send packets.
@ -3583,7 +3593,7 @@ igb_txeof(struct tx_ring *txr)
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
/* All clean, turn off the watchdog */
if (txr->tx_avail == adapter->num_tx_desc) {
txr->watchdog_check = FALSE;
txr->queue_status = IGB_QUEUE_IDLE;
return (FALSE);
}
}
@ -3615,51 +3625,59 @@ igb_refresh_mbufs(struct rx_ring *rxr, int limit)
cleaned = -1; /* Signify no completions */
while (i != limit) {
rxbuf = &rxr->rx_buffers[i];
if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
/* No hdr mbuf used with header split off */
if (rxr->hdr_split == FALSE)
goto no_split;
if (rxbuf->m_head == NULL) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
if (mh == NULL)
goto update;
mh->m_pkthdr.len = mh->m_len = MHLEN;
mh->m_len = MHLEN;
mh->m_flags |= M_PKTHDR;
m_adj(mh, ETHER_ALIGN);
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->htag,
rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
printf("GET BUF: dmamap load"
" failure - %d\n", error);
m_free(mh);
goto update;
}
rxbuf->m_head = mh;
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.hdr_addr =
htole64(hseg[0].ds_addr);
}
} else
mh = rxbuf->m_head;
mh->m_pkthdr.len = mh->m_len = MHLEN;
mh->m_len = MHLEN;
mh->m_flags |= M_PKTHDR;
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->htag,
rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
printf("Refresh mbufs: hdr dmamap load"
" failure - %d\n", error);
m_free(mh);
rxbuf->m_head = NULL;
goto update;
}
rxbuf->m_head = mh;
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.hdr_addr =
htole64(hseg[0].ds_addr);
no_split:
if (rxbuf->m_pack == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (mp == NULL)
goto update;
mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->ptag,
rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
printf("GET BUF: dmamap load"
" failure - %d\n", error);
m_free(mp);
goto update;
}
rxbuf->m_pack = mp;
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.pkt_addr =
htole64(pseg[0].ds_addr);
} else
mp = rxbuf->m_pack;
mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
/* Get the memory mapping */
error = bus_dmamap_load_mbuf_sg(rxr->ptag,
rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
printf("Refresh mbufs: payload dmamap load"
" failure - %d\n", error);
m_free(mp);
rxbuf->m_pack = NULL;
goto update;
}
rxbuf->m_pack = mp;
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
BUS_DMASYNC_PREREAD);
rxr->rx_base[i].read.pkt_addr =
htole64(pseg[0].ds_addr);
cleaned = i;
/* Calculate next index */
@ -3722,9 +3740,9 @@ igb_allocate_receive_buffers(struct rx_ring *rxr)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
MJUMPAGESIZE, /* maxsize */
MJUM9BYTES, /* maxsize */
1, /* nsegments */
MJUMPAGESIZE, /* maxsegsize */
MJUM9BYTES, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
@ -3826,7 +3844,7 @@ igb_setup_receive_ring(struct rx_ring *rxr)
rxr->hdr_split = TRUE;
/* Now replenish the ring mbufs */
for (int j = 0; j != adapter->num_rx_desc; ++j) {
for (int j = 0; j < adapter->num_rx_desc; ++j) {
struct mbuf *mh, *mp;
rxbuf = &rxr->rx_buffers[j];
@ -3897,8 +3915,8 @@ igb_setup_receive_ring(struct rx_ring *rxr)
** are undesireable in similar setups.
*/
if (ifp->if_capenable & IFCAP_LRO) {
int err = tcp_lro_init(lro);
if (err) {
error = tcp_lro_init(lro);
if (error) {
device_printf(dev, "LRO Initialization failed!\n");
goto fail;
}
@ -3940,7 +3958,9 @@ igb_setup_receive_structures(struct adapter *adapter)
*/
for (int j = 0; j > i; ++j) {
rxr = &adapter->rx_rings[i];
IGB_RX_LOCK(rxr);
igb_free_receive_ring(rxr);
IGB_RX_UNLOCK(rxr);
}
return (ENOBUFS);
@ -3983,9 +4003,13 @@ igb_initialize_receive_units(struct adapter *adapter)
*/
if (ifp->if_mtu > ETHERMTU) {
rctl |= E1000_RCTL_LPE;
srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
} else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
}
/* Set maximum packet len */
psize = adapter->max_frame_size;
/* are we on a vlan? */
@ -4194,11 +4218,11 @@ igb_free_receive_buffers(struct rx_ring *rxr)
static __inline void
igb_rx_discard(struct rx_ring *rxr, int i)
{
struct adapter *adapter = rxr->adapter;
struct igb_rx_buf *rbuf;
struct mbuf *mh, *mp;
rbuf = &rxr->rx_buffers[i];
/* Partially received? Free the chain */
if (rxr->fmp != NULL) {
rxr->fmp->m_flags |= M_PKTHDR;
m_freem(rxr->fmp);
@ -4206,19 +4230,23 @@ igb_rx_discard(struct rx_ring *rxr, int i)
rxr->lmp = NULL;
}
mh = rbuf->m_head;
mp = rbuf->m_pack;
/* Reuse loaded DMA map and just update mbuf chain */
if (mh) { /* with no hdr split would be null */
mh->m_len = MHLEN;
mh->m_flags |= M_PKTHDR;
mh->m_next = NULL;
/*
** With advanced descriptors the writeback
** clobbers the buffer addrs, so its easier
** to just free the existing mbufs and take
** the normal refresh path to get new buffers
** and mapping.
*/
if (rbuf->m_head) {
m_free(rbuf->m_head);
rbuf->m_head = NULL;
}
if (rbuf->m_pack) {
m_free(rbuf->m_pack);
rbuf->m_pack = NULL;
}
mp->m_len = mp->m_pkthdr.len = adapter->rx_mbuf_sz;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
return;
}
@ -4333,10 +4361,9 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
E1000_RXDADV_HDRBUFLEN_SHIFT;
if (hlen > IGB_HDR_BUF)
hlen = IGB_HDR_BUF;
/* Handle the header mbuf */
mh = rxr->rx_buffers[i].m_head;
mh->m_len = hlen;
/* clear buf info for refresh */
/* clear buf pointer for refresh */
rxbuf->m_head = NULL;
/*
** Get the payload length, this
@ -4347,7 +4374,7 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
mp = rxr->rx_buffers[i].m_pack;
mp->m_len = plen;
mh->m_next = mp;
/* clear buf info for refresh */
/* clear buf pointer */
rxbuf->m_pack = NULL;
rxr->rx_split_packets++;
}
@ -4368,7 +4395,7 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
/* Initial frame - setup */
if (rxr->fmp == NULL) {
mh->m_pkthdr.len = mh->m_len;
/* Store the first mbuf */
/* Save the head of the chain */
rxr->fmp = mh;
rxr->lmp = mh;
if (mp != NULL) {
@ -4533,12 +4560,15 @@ igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
IGB_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
igb_shadow_vfta[index] |= (1 << bit);
adapter->shadow_vfta[index] |= (1 << bit);
++adapter->num_vlans;
/* Re-init to load the changes */
igb_init(adapter);
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
igb_init_locked(adapter);
IGB_CORE_UNLOCK(adapter);
}
/*
@ -4557,12 +4587,15 @@ igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
return;
IGB_CORE_LOCK(adapter);
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
igb_shadow_vfta[index] &= ~(1 << bit);
adapter->shadow_vfta[index] &= ~(1 << bit);
--adapter->num_vlans;
/* Re-init to load the changes */
igb_init(adapter);
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
igb_init_locked(adapter);
IGB_CORE_UNLOCK(adapter);
}
static void
@ -4585,12 +4618,13 @@ igb_setup_vlan_hw_support(struct adapter *adapter)
** we need to repopulate it now.
*/
for (int i = 0; i < IGB_VFTA_SIZE; i++)
if (igb_shadow_vfta[i] != 0) {
if (adapter->shadow_vfta[i] != 0) {
if (hw->mac.type == e1000_vfadapt)
e1000_vfta_set_vf(hw, igb_shadow_vfta[i], TRUE);
e1000_vfta_set_vf(hw,
adapter->shadow_vfta[i], TRUE);
else
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
i, igb_shadow_vfta[i]);
i, adapter->shadow_vfta[i]);
}
if (hw->mac.type == e1000_vfadapt)
@ -4993,6 +5027,28 @@ igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
return (sysctl_handle_int(oidp, &val, 0, req));
}
/*
** Tuneable interrupt rate handler
*/
static int
igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
{
struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1);
int error;
u32 reg, usec, rate;
reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix));
usec = ((reg & 0x7FFC) >> 2);
if (usec > 0)
rate = 1000000 / usec;
else
rate = 0;
error = sysctl_handle_int(oidp, &rate, 0, req);
if (error || !req->newptr)
return error;
return 0;
}
/*
* Add sysctl variables, one per statistic, to the system.
*/
@ -5065,6 +5121,12 @@ igb_add_hw_stats(struct adapter *adapter)
CTLFLAG_RD, NULL, "Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
CTLFLAG_RD, &adapter->queues[i],
sizeof(&adapter->queues[i]),
igb_sysctl_interrupt_rate_handler,
"IU", "Interrupt Rate");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
CTLFLAG_RD, adapter, E1000_TDH(txr->me),
igb_sysctl_reg_handler, "IU",

View File

@ -190,6 +190,9 @@
#define IGB_TX_BUFFER_SIZE ((uint32_t) 1514)
#define IGB_FC_PAUSE_TIME 0x0680
#define IGB_EEPROM_APME 0x400;
#define IGB_QUEUE_IDLE 0
#define IGB_QUEUE_WORKING 1
#define IGB_QUEUE_HUNG 2
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
@ -237,7 +240,7 @@
/* Define the starting Interrupt rate per Queue */
#define IGB_INTS_PER_SEC 8000
#define IGB_DEFAULT_ITR 1000000000/(IGB_INTS_PER_SEC * 256)
#define IGB_DEFAULT_ITR ((1000000/IGB_INTS_PER_SEC) << 2)
#define IGB_LINK_ITR 2000
@ -300,7 +303,7 @@ struct tx_ring {
u32 bytes;
u32 packets;
bool watchdog_check;
int queue_status;
int watchdog_time;
int tdt;
int tdh;
@ -384,7 +387,15 @@ struct adapter {
int wol;
int has_manage;
/* Info about the board itself */
/*
** Shadow VFTA table, this is needed because
** the real vlan filter table gets cleared during
** a soft reset and the driver needs to be able
** to repopulate it.
*/
u32 shadow_vfta[IGB_VFTA_SIZE];
/* Info about the interface */
u8 link_active;
u16 link_speed;
u16 link_duplex;