MFC bug fixes to em and igb from HEAD.
This commit is contained in:
parent
cc32d5b339
commit
ace006dec7
@ -93,7 +93,7 @@ int em_display_debug_stats = 0;
|
||||
/*********************************************************************
|
||||
* Driver version:
|
||||
*********************************************************************/
|
||||
char em_driver_version[] = "7.0.0";
|
||||
char em_driver_version[] = "7.0.5";
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
@ -192,7 +192,7 @@ static int em_suspend(device_t);
|
||||
static int em_resume(device_t);
|
||||
static void em_start(struct ifnet *);
|
||||
static void em_start_locked(struct ifnet *, struct tx_ring *);
|
||||
#if __FreeBSD_version >= 800000
|
||||
#ifdef EM_MULTIQUEUE
|
||||
static int em_mq_start(struct ifnet *, struct mbuf *);
|
||||
static int em_mq_start_locked(struct ifnet *,
|
||||
struct tx_ring *, struct mbuf *);
|
||||
@ -797,7 +797,7 @@ em_resume(device_t dev)
|
||||
* the packet is requeued.
|
||||
**********************************************************************/
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
#ifdef EM_MULTIQUEUE
|
||||
static int
|
||||
em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
|
||||
{
|
||||
@ -812,10 +812,18 @@ em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
|
||||
return (err);
|
||||
}
|
||||
|
||||
/* Call cleanup if number of TX descriptors low */
|
||||
if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
|
||||
em_txeof(txr);
|
||||
|
||||
enq = 0;
|
||||
if (m == NULL)
|
||||
if (m == NULL) {
|
||||
next = drbr_dequeue(ifp, txr->br);
|
||||
else
|
||||
} else if (drbr_needs_enqueue(ifp, txr->br)) {
|
||||
if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
|
||||
return (err);
|
||||
next = drbr_dequeue(ifp, txr->br);
|
||||
} else
|
||||
next = m;
|
||||
|
||||
/* Process the queue */
|
||||
@ -830,12 +838,17 @@ em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
|
||||
ETHER_BPF_MTAP(ifp, next);
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
||||
break;
|
||||
if (txr->tx_avail < EM_MAX_SCATTER) {
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
break;
|
||||
}
|
||||
next = drbr_dequeue(ifp, txr->br);
|
||||
}
|
||||
|
||||
if (enq > 0) {
|
||||
/* Set the watchdog */
|
||||
txr->watchdog_check = TRUE;
|
||||
txr->watchdog_time = ticks;
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
@ -860,8 +873,7 @@ em_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
txr = &adapter->tx_rings[i];
|
||||
|
||||
if (EM_TX_TRYLOCK(txr)) {
|
||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
||||
error = em_mq_start_locked(ifp, txr, m);
|
||||
error = em_mq_start_locked(ifp, txr, m);
|
||||
EM_TX_UNLOCK(txr);
|
||||
} else
|
||||
error = drbr_enqueue(ifp, txr->br, m);
|
||||
@ -888,7 +900,7 @@ em_qflush(struct ifnet *ifp)
|
||||
if_qflush(ifp);
|
||||
}
|
||||
|
||||
#endif /* FreeBSD_version */
|
||||
#endif /* EM_MULTIQUEUE */
|
||||
|
||||
static void
|
||||
em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
|
||||
@ -905,8 +917,15 @@ em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
|
||||
if (!adapter->link_active)
|
||||
return;
|
||||
|
||||
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
|
||||
/* Call cleanup if number of TX descriptors low */
|
||||
if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
|
||||
em_txeof(txr);
|
||||
|
||||
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
|
||||
if (txr->tx_avail < EM_MAX_SCATTER) {
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
break;
|
||||
}
|
||||
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
|
||||
if (m_head == NULL)
|
||||
break;
|
||||
@ -926,6 +945,7 @@ em_start_locked(struct ifnet *ifp, struct tx_ring *txr)
|
||||
ETHER_BPF_MTAP(ifp, m_head);
|
||||
|
||||
/* Set timeout in case hardware has problems transmitting. */
|
||||
txr->watchdog_time = ticks;
|
||||
txr->watchdog_check = TRUE;
|
||||
}
|
||||
|
||||
@ -1118,6 +1138,10 @@ em_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
|
||||
reinit = 1;
|
||||
}
|
||||
if (mask & IFCAP_VLAN_HWFILTER) {
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
|
||||
reinit = 1;
|
||||
}
|
||||
if ((mask & IFCAP_WOL) &&
|
||||
(ifp->if_capabilities & IFCAP_WOL) != 0) {
|
||||
if (mask & IFCAP_WOL_MCAST)
|
||||
@ -1228,8 +1252,18 @@ em_init_locked(struct adapter *adapter)
|
||||
/* Setup VLAN support, basic and offload if available */
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
|
||||
|
||||
/* Use real VLAN Filter support */
|
||||
em_setup_vlan_hw_support(adapter);
|
||||
/* Use real VLAN Filter support? */
|
||||
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
|
||||
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
|
||||
/* Use real VLAN Filter support */
|
||||
em_setup_vlan_hw_support(adapter);
|
||||
else {
|
||||
u32 ctrl;
|
||||
ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
|
||||
ctrl |= E1000_CTRL_VME;
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
/* Set hardware offload abilities */
|
||||
ifp->if_hwassist = 0;
|
||||
@ -1337,11 +1371,13 @@ em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
|
||||
}
|
||||
EM_CORE_UNLOCK(adapter);
|
||||
|
||||
EM_RX_LOCK(rxr);
|
||||
rx_done = em_rxeof(rxr, count);
|
||||
EM_RX_UNLOCK(rxr);
|
||||
|
||||
EM_TX_LOCK(txr);
|
||||
em_txeof(txr);
|
||||
#if __FreeBSD_version >= 800000
|
||||
#ifdef EM_MULTIQUEUE
|
||||
if (!drbr_empty(ifp, txr->br))
|
||||
em_mq_start_locked(ifp, txr, NULL);
|
||||
#else
|
||||
@ -1409,28 +1445,28 @@ em_handle_que(void *context, int pending)
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
struct tx_ring *txr = adapter->tx_rings;
|
||||
struct rx_ring *rxr = adapter->rx_rings;
|
||||
u32 loop = EM_MAX_LOOP;
|
||||
bool more_rx, more_tx;
|
||||
bool more_rx;
|
||||
|
||||
|
||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
||||
EM_TX_LOCK(txr);
|
||||
do {
|
||||
more_rx = em_rxeof(rxr, adapter->rx_process_limit);
|
||||
more_tx = em_txeof(txr);
|
||||
} while (loop-- && (more_rx || more_tx));
|
||||
EM_RX_LOCK(rxr);
|
||||
more_rx = em_rxeof(rxr, adapter->rx_process_limit);
|
||||
EM_RX_UNLOCK(rxr);
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
EM_TX_LOCK(txr);
|
||||
em_txeof(txr);
|
||||
#ifdef EM_MULTIQUEUE
|
||||
if (!drbr_empty(ifp, txr->br))
|
||||
em_mq_start_locked(ifp, txr, NULL);
|
||||
#else
|
||||
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
||||
em_start_locked(ifp, txr);
|
||||
#endif
|
||||
if (more_rx || more_tx)
|
||||
taskqueue_enqueue(adapter->tq, &adapter->que_task);
|
||||
|
||||
EM_TX_UNLOCK(txr);
|
||||
if (more_rx) {
|
||||
taskqueue_enqueue(adapter->tq, &adapter->que_task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
em_enable_intr(adapter);
|
||||
@ -1475,8 +1511,10 @@ em_msix_rx(void *arg)
|
||||
struct adapter *adapter = rxr->adapter;
|
||||
bool more;
|
||||
|
||||
EM_RX_LOCK(rxr);
|
||||
++rxr->rx_irq;
|
||||
more = em_rxeof(rxr, adapter->rx_process_limit);
|
||||
EM_RX_UNLOCK(rxr);
|
||||
if (more)
|
||||
taskqueue_enqueue(rxr->tq, &rxr->rx_task);
|
||||
else
|
||||
@ -1513,14 +1551,16 @@ em_handle_rx(void *context, int pending)
|
||||
{
|
||||
struct rx_ring *rxr = context;
|
||||
struct adapter *adapter = rxr->adapter;
|
||||
u32 loop = EM_MAX_LOOP;
|
||||
bool more;
|
||||
|
||||
do {
|
||||
more = em_rxeof(rxr, adapter->rx_process_limit);
|
||||
} while (loop-- && more);
|
||||
/* Reenable this interrupt */
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
|
||||
EM_RX_LOCK(rxr);
|
||||
more = em_rxeof(rxr, adapter->rx_process_limit);
|
||||
EM_RX_UNLOCK(rxr);
|
||||
if (more)
|
||||
taskqueue_enqueue(rxr->tq, &rxr->rx_task);
|
||||
else
|
||||
/* Reenable this interrupt */
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1529,16 +1569,13 @@ em_handle_tx(void *context, int pending)
|
||||
struct tx_ring *txr = context;
|
||||
struct adapter *adapter = txr->adapter;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
u32 loop = EM_MAX_LOOP;
|
||||
bool more;
|
||||
|
||||
if (!EM_TX_TRYLOCK(txr))
|
||||
return;
|
||||
do {
|
||||
more = em_txeof(txr);
|
||||
} while (loop-- && more);
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
em_txeof(txr);
|
||||
|
||||
#ifdef EM_MULTIQUEUE
|
||||
if (!drbr_empty(ifp, txr->br))
|
||||
em_mq_start_locked(ifp, txr, NULL);
|
||||
#else
|
||||
@ -1706,13 +1743,6 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
|
||||
txd_upper = txd_lower = txd_used = txd_saved = 0;
|
||||
do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0);
|
||||
|
||||
/*
|
||||
* Force a cleanup if number of TX descriptors
|
||||
* available hits the threshold
|
||||
*/
|
||||
if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD)
|
||||
em_txeof(txr);
|
||||
|
||||
/*
|
||||
* TSO workaround:
|
||||
* If an mbuf is only header we need
|
||||
@ -2642,7 +2672,7 @@ em_setup_interface(device_t dev, struct adapter *adapter)
|
||||
|
||||
ifp->if_capabilities = ifp->if_capenable = 0;
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
#ifdef EM_MULTIQUEUE
|
||||
/* Multiqueue tx functions */
|
||||
ifp->if_transmit = em_mq_start;
|
||||
ifp->if_qflush = em_qflush;
|
||||
@ -2656,12 +2686,23 @@ em_setup_interface(device_t dev, struct adapter *adapter)
|
||||
ifp->if_capenable |= IFCAP_TSO4;
|
||||
|
||||
/*
|
||||
* Tell the upper layer(s) we support long frames.
|
||||
* Tell the upper layer(s) we
|
||||
* support full VLAN capability
|
||||
*/
|
||||
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
|
||||
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
|
||||
ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
|
||||
|
||||
/*
|
||||
** Dont turn this on by default, if vlans are
|
||||
** created on another pseudo device (eg. lagg)
|
||||
** then vlan events are not passed thru, breaking
|
||||
** operation, but with HW FILTER off it works. If
|
||||
** using vlans directly on the em driver you can
|
||||
** enable this and get full hardware tag filtering.
|
||||
*/
|
||||
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
|
||||
|
||||
#ifdef DEVICE_POLLING
|
||||
ifp->if_capabilities |= IFCAP_POLLING;
|
||||
#endif
|
||||
@ -3681,6 +3722,8 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
|
||||
rxr->next_to_refresh = i;
|
||||
}
|
||||
update:
|
||||
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
|
||||
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
||||
if (cleaned != -1) /* Update tail index */
|
||||
E1000_WRITE_REG(&adapter->hw,
|
||||
E1000_RDT(rxr->me), cleaned);
|
||||
@ -3972,7 +4015,7 @@ em_initialize_receive_unit(struct adapter *adapter)
|
||||
** When using MSIX interrupts we need to throttle
|
||||
** using the EITR register (82574 only)
|
||||
*/
|
||||
if (adapter->msix)
|
||||
if (hw->mac.type == e1000_82574)
|
||||
for (int i = 0; i < 4; i++)
|
||||
E1000_WRITE_REG(hw, E1000_EITR_82574(i),
|
||||
DEFAULT_ITR);
|
||||
@ -4015,6 +4058,9 @@ em_initialize_receive_unit(struct adapter *adapter)
|
||||
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
|
||||
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
||||
|
||||
/* Strip the CRC */
|
||||
rctl |= E1000_RCTL_SECRC;
|
||||
|
||||
/* Make sure VLAN Filters are off */
|
||||
rctl &= ~E1000_RCTL_VFE;
|
||||
rctl &= ~E1000_RCTL_SBP;
|
||||
@ -4046,15 +4092,15 @@ static int
|
||||
em_rxeof(struct rx_ring *rxr, int count)
|
||||
{
|
||||
struct adapter *adapter = rxr->adapter;
|
||||
struct ifnet *ifp = adapter->ifp;;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
struct mbuf *mp, *sendmp;
|
||||
u8 status;
|
||||
u8 status = 0;
|
||||
u16 len;
|
||||
int i, processed, rxdone = 0;
|
||||
bool eop;
|
||||
struct e1000_rx_desc *cur;
|
||||
|
||||
EM_RX_LOCK(rxr);
|
||||
EM_RX_LOCK_ASSERT(rxr);
|
||||
|
||||
for (i = rxr->next_to_check, processed = 0; count != 0;) {
|
||||
|
||||
@ -4109,6 +4155,10 @@ em_rxeof(struct rx_ring *rxr, int count)
|
||||
E1000_RXD_SPC_VLAN_MASK);
|
||||
rxr->fmp->m_flags |= M_VLANTAG;
|
||||
}
|
||||
#ifdef EM_MULTIQUEUE
|
||||
rxr->fmp->m_pkthdr.flowid = curcpu;
|
||||
rxr->fmp->m_flags |= M_FLOWID;
|
||||
#endif
|
||||
#ifndef __NO_STRICT_ALIGNMENT
|
||||
skip:
|
||||
#endif
|
||||
@ -4162,8 +4212,11 @@ skip:
|
||||
|
||||
rxr->next_to_check = i;
|
||||
|
||||
EM_RX_UNLOCK(rxr);
|
||||
#ifdef DEVICE_POLLING
|
||||
return (rxdone);
|
||||
#else
|
||||
return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef __NO_STRICT_ALIGNMENT
|
||||
@ -4346,7 +4399,7 @@ em_enable_intr(struct adapter *adapter)
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 ims_mask = IMS_ENABLE_MASK;
|
||||
|
||||
if (adapter->msix) {
|
||||
if (hw->mac.type == e1000_82574) {
|
||||
E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK);
|
||||
ims_mask |= EM_MSIX_MASK;
|
||||
}
|
||||
@ -4358,7 +4411,7 @@ em_disable_intr(struct adapter *adapter)
|
||||
{
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
if (adapter->msix)
|
||||
if (hw->mac.type == e1000_82574)
|
||||
E1000_WRITE_REG(hw, EM_EIAC, 0);
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
|
||||
}
|
||||
|
@ -223,7 +223,7 @@
|
||||
#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
|
||||
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
|
||||
|
||||
#define EM_MAX_SCATTER 64
|
||||
#define EM_MAX_SCATTER 32
|
||||
#define EM_VFTA_SIZE 128
|
||||
#define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
|
||||
#define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */
|
||||
@ -453,5 +453,6 @@ struct em_buffer {
|
||||
#define EM_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
|
||||
#define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
|
||||
#define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
|
||||
#define EM_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED)
|
||||
|
||||
#endif /* _EM_H_DEFINED_ */
|
||||
|
@ -99,7 +99,7 @@ int igb_display_debug_stats = 0;
|
||||
/*********************************************************************
|
||||
* Driver version:
|
||||
*********************************************************************/
|
||||
char igb_driver_version[] = "version - 1.9.3";
|
||||
char igb_driver_version[] = "version - 1.9.5";
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
@ -758,8 +758,15 @@ igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
|
||||
if (!adapter->link_active)
|
||||
return;
|
||||
|
||||
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
|
||||
/* Call cleanup if number of TX descriptors low */
|
||||
if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
|
||||
igb_txeof(txr);
|
||||
|
||||
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
|
||||
if (txr->tx_avail <= IGB_TX_OP_THRESHOLD) {
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
break;
|
||||
}
|
||||
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
|
||||
if (m_head == NULL)
|
||||
break;
|
||||
@ -779,6 +786,7 @@ igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
|
||||
ETHER_BPF_MTAP(ifp, m_head);
|
||||
|
||||
/* Set watchdog on */
|
||||
txr->watchdog_time = ticks;
|
||||
txr->watchdog_check = TRUE;
|
||||
}
|
||||
}
|
||||
@ -817,8 +825,6 @@ igb_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
/* Which queue to use */
|
||||
if ((m->m_flags & M_FLOWID) != 0)
|
||||
i = m->m_pkthdr.flowid % adapter->num_queues;
|
||||
else
|
||||
i = curcpu % adapter->num_queues;
|
||||
|
||||
txr = &adapter->tx_rings[i];
|
||||
|
||||
@ -847,6 +853,10 @@ igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
|
||||
return (err);
|
||||
}
|
||||
|
||||
/* Call cleanup if number of TX descriptors low */
|
||||
if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
|
||||
igb_txeof(txr);
|
||||
|
||||
enq = 0;
|
||||
if (m == NULL) {
|
||||
next = drbr_dequeue(ifp, txr->br);
|
||||
@ -856,6 +866,7 @@ igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
|
||||
next = drbr_dequeue(ifp, txr->br);
|
||||
} else
|
||||
next = m;
|
||||
|
||||
/* Process the queue */
|
||||
while (next != NULL) {
|
||||
if ((err = igb_xmit(txr, &next)) != 0) {
|
||||
@ -877,6 +888,7 @@ igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
|
||||
if (enq > 0) {
|
||||
/* Set the watchdog */
|
||||
txr->watchdog_check = TRUE;
|
||||
txr->watchdog_time = ticks;
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
@ -1055,6 +1067,10 @@ igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
|
||||
reinit = 1;
|
||||
}
|
||||
if (mask & IFCAP_VLAN_HWFILTER) {
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
|
||||
reinit = 1;
|
||||
}
|
||||
if (mask & IFCAP_LRO) {
|
||||
ifp->if_capenable ^= IFCAP_LRO;
|
||||
reinit = 1;
|
||||
@ -1110,6 +1126,19 @@ igb_init_locked(struct adapter *adapter)
|
||||
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
|
||||
|
||||
/* Use real VLAN Filter support? */
|
||||
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
|
||||
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
|
||||
/* Use real VLAN Filter support */
|
||||
igb_setup_vlan_hw_support(adapter);
|
||||
else {
|
||||
u32 ctrl;
|
||||
ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
|
||||
ctrl |= E1000_CTRL_VME;
|
||||
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
/* Set hardware offload abilities */
|
||||
ifp->if_hwassist = 0;
|
||||
if (ifp->if_capenable & IFCAP_TXCSUM) {
|
||||
@ -1231,19 +1260,13 @@ igb_handle_que(void *context, int pending)
|
||||
struct adapter *adapter = que->adapter;
|
||||
struct tx_ring *txr = que->txr;
|
||||
struct ifnet *ifp = adapter->ifp;
|
||||
u32 loop = IGB_MAX_LOOP;
|
||||
bool more;
|
||||
|
||||
/* RX first */
|
||||
do {
|
||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
||||
more = igb_rxeof(que, -1);
|
||||
} while (loop-- && more);
|
||||
|
||||
if (IGB_TX_TRYLOCK(txr)) {
|
||||
loop = IGB_MAX_LOOP;
|
||||
do {
|
||||
more = igb_txeof(txr);
|
||||
} while (loop-- && more);
|
||||
IGB_TX_LOCK(txr);
|
||||
igb_txeof(txr);
|
||||
#if __FreeBSD_version >= 800000
|
||||
igb_mq_start_locked(ifp, txr, NULL);
|
||||
#else
|
||||
@ -1251,6 +1274,10 @@ igb_handle_que(void *context, int pending)
|
||||
igb_start_locked(txr, ifp);
|
||||
#endif
|
||||
IGB_TX_UNLOCK(txr);
|
||||
if (more) {
|
||||
taskqueue_enqueue(que->tq, &que->que_task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Reenable this interrupt */
|
||||
@ -1436,7 +1463,7 @@ igb_msix_que(void *arg)
|
||||
if (adapter->hw.mac.type == e1000_82575)
|
||||
newitr |= newitr << 16;
|
||||
else
|
||||
newitr |= 0x8000000;
|
||||
newitr |= E1000_EITR_CNT_IGNR;
|
||||
|
||||
/* save for next interrupt */
|
||||
que->eitr_setting = newitr;
|
||||
@ -2340,7 +2367,7 @@ igb_configure_queues(struct adapter *adapter)
|
||||
if (hw->mac.type == e1000_82575)
|
||||
newitr |= newitr << 16;
|
||||
else
|
||||
newitr |= 0x8000000;
|
||||
newitr |= E1000_EITR_CNT_IGNR;
|
||||
|
||||
for (int i = 0; i < adapter->num_queues; i++) {
|
||||
que = &adapter->queues[i];
|
||||
@ -2669,12 +2696,23 @@ igb_setup_interface(device_t dev, struct adapter *adapter)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Tell the upper layer(s) we support long frames.
|
||||
* Tell the upper layer(s) we
|
||||
* support full VLAN capability.
|
||||
*/
|
||||
ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
|
||||
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
|
||||
ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
|
||||
|
||||
/*
|
||||
** Dont turn this on by default, if vlans are
|
||||
** created on another pseudo device (eg. lagg)
|
||||
** then vlan events are not passed thru, breaking
|
||||
** operation, but with HW FILTER off it works. If
|
||||
** using vlans directly on the em driver you can
|
||||
** enable this and get full hardware tag filtering.
|
||||
*/
|
||||
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
|
||||
|
||||
/*
|
||||
* Specify the media types supported by this adapter and register
|
||||
* callbacks to update media and link information
|
||||
@ -3779,6 +3817,9 @@ igb_setup_receive_ring(struct rx_ring *rxr)
|
||||
/* Update descriptor */
|
||||
rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
|
||||
}
|
||||
|
||||
/* Setup our descriptor indices */
|
||||
rxr->next_to_check = 0;
|
||||
rxr->next_to_refresh = 0;
|
||||
rxr->lro_enabled = FALSE;
|
||||
|
||||
@ -4672,10 +4713,12 @@ igb_update_stats_counters(struct adapter *adapter)
|
||||
{
|
||||
struct ifnet *ifp;
|
||||
|
||||
if(adapter->hw.phy.media_type == e1000_media_type_copper ||
|
||||
if (adapter->hw.phy.media_type == e1000_media_type_copper ||
|
||||
(E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
|
||||
adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
|
||||
adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC);
|
||||
adapter->stats.symerrs +=
|
||||
E1000_READ_REG(&adapter->hw, E1000_SYMERRS);
|
||||
adapter->stats.sec +=
|
||||
E1000_READ_REG(&adapter->hw, E1000_SEC);
|
||||
}
|
||||
adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS);
|
||||
adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC);
|
||||
|
@ -39,9 +39,6 @@
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#if __FreeBSD_version >= 800000
|
||||
#include <sys/buf_ring.h>
|
||||
#endif
|
||||
#include <sys/bus.h>
|
||||
#include <sys/endian.h>
|
||||
#include <sys/kernel.h>
|
||||
@ -94,7 +91,7 @@ int lem_display_debug_stats = 0;
|
||||
/*********************************************************************
|
||||
* Legacy Em Driver version:
|
||||
*********************************************************************/
|
||||
char lem_driver_version[] = "1.0.0";
|
||||
char lem_driver_version[] = "1.0.1";
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
@ -177,11 +174,6 @@ static int lem_suspend(device_t);
|
||||
static int lem_resume(device_t);
|
||||
static void lem_start(struct ifnet *);
|
||||
static void lem_start_locked(struct ifnet *ifp);
|
||||
#if __FreeBSD_version >= 800000
|
||||
static int lem_mq_start(struct ifnet *, struct mbuf *);
|
||||
static int lem_mq_start_locked(struct ifnet *, struct mbuf *);
|
||||
static void lem_qflush(struct ifnet *);
|
||||
#endif
|
||||
static int lem_ioctl(struct ifnet *, u_long, caddr_t);
|
||||
static void lem_init(void *);
|
||||
static void lem_init_locked(struct adapter *);
|
||||
@ -304,12 +296,6 @@ MODULE_DEPEND(lem, ether, 1, 1, 1);
|
||||
|
||||
#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
|
||||
#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
|
||||
#define M_TSO_LEN 66
|
||||
|
||||
/* Allow common code without TSO */
|
||||
#ifndef CSUM_TSO
|
||||
#define CSUM_TSO 0
|
||||
#endif
|
||||
|
||||
static int lem_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
|
||||
static int lem_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
|
||||
@ -827,118 +813,6 @@ lem_resume(device_t dev)
|
||||
}
|
||||
|
||||
|
||||
/*********************************************************************
|
||||
* Transmit entry point
|
||||
*
|
||||
* em_start is called by the stack to initiate a transmit.
|
||||
* The driver will remain in this routine as long as there are
|
||||
* packets to transmit and transmit resources are available.
|
||||
* In case resources are not available stack is notified and
|
||||
* the packet is requeued.
|
||||
**********************************************************************/
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
static int
|
||||
lem_mq_start_locked(struct ifnet *ifp, struct mbuf *m)
|
||||
{
|
||||
struct adapter *adapter = ifp->if_softc;
|
||||
struct mbuf *next;
|
||||
int error = E1000_SUCCESS;
|
||||
|
||||
EM_TX_LOCK_ASSERT(adapter);
|
||||
/* To allow being called from a tasklet */
|
||||
if (m == NULL)
|
||||
goto process;
|
||||
|
||||
if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
|
||||
IFF_DRV_RUNNING)
|
||||
|| (!adapter->link_active)) {
|
||||
error = drbr_enqueue(ifp, adapter->br, m);
|
||||
return (error);
|
||||
} else if (drbr_empty(ifp, adapter->br) &&
|
||||
(adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) {
|
||||
if ((error = lem_xmit(adapter, &m)) != 0) {
|
||||
if (m)
|
||||
error = drbr_enqueue(ifp, adapter->br, m);
|
||||
return (error);
|
||||
} else {
|
||||
/*
|
||||
* We've bypassed the buf ring so we need to update
|
||||
* ifp directly
|
||||
*/
|
||||
drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags);
|
||||
/*
|
||||
** Send a copy of the frame to the BPF
|
||||
** listener and set the watchdog on.
|
||||
*/
|
||||
ETHER_BPF_MTAP(ifp, m);
|
||||
adapter->watchdog_check = TRUE;
|
||||
}
|
||||
} else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0)
|
||||
return (error);
|
||||
|
||||
process:
|
||||
if (drbr_empty(ifp, adapter->br))
|
||||
return(error);
|
||||
/* Process the queue */
|
||||
while (TRUE) {
|
||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
||||
break;
|
||||
next = drbr_dequeue(ifp, adapter->br);
|
||||
if (next == NULL)
|
||||
break;
|
||||
if ((error = lem_xmit(adapter, &next)) != 0) {
|
||||
if (next != NULL)
|
||||
error = drbr_enqueue(ifp, adapter->br, next);
|
||||
break;
|
||||
}
|
||||
drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
|
||||
ETHER_BPF_MTAP(ifp, next);
|
||||
/* Set the watchdog */
|
||||
adapter->watchdog_check = TRUE;
|
||||
}
|
||||
|
||||
if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
** Multiqueue capable stack interface, this is not
|
||||
** yet truely multiqueue, but that is coming...
|
||||
*/
|
||||
static int
|
||||
lem_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
{
|
||||
|
||||
struct adapter *adapter = ifp->if_softc;
|
||||
int error = 0;
|
||||
|
||||
if (EM_TX_TRYLOCK(adapter)) {
|
||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
||||
error = lem_mq_start_locked(ifp, m);
|
||||
EM_TX_UNLOCK(adapter);
|
||||
} else
|
||||
error = drbr_enqueue(ifp, adapter->br, m);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
static void
|
||||
lem_qflush(struct ifnet *ifp)
|
||||
{
|
||||
struct mbuf *m;
|
||||
struct adapter *adapter = (struct adapter *)ifp->if_softc;
|
||||
|
||||
EM_TX_LOCK(adapter);
|
||||
while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL)
|
||||
m_freem(m);
|
||||
if_qflush(ifp);
|
||||
EM_TX_UNLOCK(adapter);
|
||||
}
|
||||
#endif /* FreeBSD_version */
|
||||
|
||||
static void
|
||||
lem_start_locked(struct ifnet *ifp)
|
||||
{
|
||||
@ -975,6 +849,7 @@ lem_start_locked(struct ifnet *ifp)
|
||||
|
||||
/* Set timeout in case hardware has problems transmitting. */
|
||||
adapter->watchdog_check = TRUE;
|
||||
adapter->watchdog_time = ticks;
|
||||
}
|
||||
if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)
|
||||
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
||||
@ -1151,12 +1026,6 @@ lem_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
||||
ifp->if_capenable ^= IFCAP_HWCSUM;
|
||||
reinit = 1;
|
||||
}
|
||||
#if __FreeBSD_version >= 700000
|
||||
if (mask & IFCAP_TSO4) {
|
||||
ifp->if_capenable ^= IFCAP_TSO4;
|
||||
reinit = 1;
|
||||
}
|
||||
#endif
|
||||
if (mask & IFCAP_VLAN_HWTAGGING) {
|
||||
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
|
||||
reinit = 1;
|
||||
@ -1279,10 +1148,6 @@ lem_init_locked(struct adapter *adapter)
|
||||
if (adapter->hw.mac.type >= e1000_82543) {
|
||||
if (ifp->if_capenable & IFCAP_TXCSUM)
|
||||
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
|
||||
#if __FreeBSD_version >= 700000
|
||||
if (ifp->if_capenable & IFCAP_TSO4)
|
||||
ifp->if_hwassist |= CSUM_TSO;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Configure for OS presence */
|
||||
@ -1394,13 +1259,8 @@ lem_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
|
||||
|
||||
EM_TX_LOCK(adapter);
|
||||
lem_txeof(adapter);
|
||||
#if __FreeBSD_version >= 800000
|
||||
if (!drbr_empty(ifp, adapter->br))
|
||||
lem_mq_start_locked(ifp, NULL);
|
||||
#else
|
||||
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
||||
lem_start_locked(ifp);
|
||||
#endif
|
||||
EM_TX_UNLOCK(adapter);
|
||||
return (rx_done);
|
||||
}
|
||||
@ -1494,14 +1354,8 @@ lem_handle_rxtx(void *context, int pending)
|
||||
taskqueue_enqueue(adapter->tq, &adapter->rxtx_task);
|
||||
EM_TX_LOCK(adapter);
|
||||
lem_txeof(adapter);
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
if (!drbr_empty(ifp, adapter->br))
|
||||
lem_mq_start_locked(ifp, NULL);
|
||||
#else
|
||||
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
||||
lem_start_locked(ifp);
|
||||
#endif
|
||||
EM_TX_UNLOCK(adapter);
|
||||
}
|
||||
|
||||
@ -1852,15 +1706,17 @@ lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
|
||||
if (mtag != NULL) {
|
||||
ctxd->upper.fields.special =
|
||||
htole16(VLAN_TAG_VALUE(mtag));
|
||||
ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
|
||||
}
|
||||
#else /* FreeBSD 7 */
|
||||
if (m_head->m_flags & M_VLANTAG) {
|
||||
/* Set the vlan id. */
|
||||
ctxd->upper.fields.special =
|
||||
htole16(m_head->m_pkthdr.ether_vtag);
|
||||
#endif
|
||||
/* Tell hardware to add tag */
|
||||
ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
|
||||
}
|
||||
#endif
|
||||
|
||||
tx_buffer->m_head = m_head;
|
||||
tx_buffer_mapped->map = tx_buffer->map;
|
||||
@ -2544,12 +2400,6 @@ lem_setup_interface(device_t dev, struct adapter *adapter)
|
||||
|
||||
ifp->if_capabilities = ifp->if_capenable = 0;
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
/* Multiqueue tx functions */
|
||||
ifp->if_transmit = lem_mq_start;
|
||||
ifp->if_qflush = lem_qflush;
|
||||
adapter->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &adapter->tx_mtx);
|
||||
#endif
|
||||
if (adapter->hw.mac.type >= e1000_82543) {
|
||||
int version_cap;
|
||||
#if __FreeBSD_version < 700000
|
||||
@ -4549,10 +4399,6 @@ lem_print_hw_stats(struct adapter *adapter)
|
||||
(long long)adapter->stats.gprc);
|
||||
device_printf(dev, "Good Packets Xmtd = %lld\n",
|
||||
(long long)adapter->stats.gptc);
|
||||
device_printf(dev, "TSO Contexts Xmtd = %lld\n",
|
||||
(long long)adapter->stats.tsctc);
|
||||
device_printf(dev, "TSO Contexts Failed = %lld\n",
|
||||
(long long)adapter->stats.tsctfc);
|
||||
}
|
||||
|
||||
/**********************************************************************
|
||||
|
Loading…
x
Reference in New Issue
Block a user