2008-02-29 21:50:11 +00:00
|
|
|
/******************************************************************************
|
|
|
|
|
2013-02-21 00:25:45 +00:00
|
|
|
Copyright (c) 2001-2013, Intel Corporation
|
2008-02-29 21:50:11 +00:00
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions are met:
|
|
|
|
|
|
|
|
1. Redistributions of source code must retain the above copyright notice,
|
|
|
|
this list of conditions and the following disclaimer.
|
|
|
|
|
|
|
|
2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in the
|
|
|
|
documentation and/or other materials provided with the distribution.
|
|
|
|
|
|
|
|
3. Neither the name of the Intel Corporation nor the names of its
|
|
|
|
contributors may be used to endorse or promote products derived from
|
|
|
|
this software without specific prior written permission.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
******************************************************************************/
|
|
|
|
/*$FreeBSD$*/
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2009-07-14 19:32:36 +00:00
|
|
|
#include "opt_inet.h"
|
2011-06-20 22:59:29 +00:00
|
|
|
#include "opt_inet6.h"
|
2014-06-30 04:34:59 +00:00
|
|
|
#include "opt_rss.h"
|
2013-07-12 22:36:26 +00:00
|
|
|
|
|
|
|
#ifdef HAVE_KERNEL_OPTION_HEADERS
|
|
|
|
#include "opt_device_polling.h"
|
2010-02-01 19:28:43 +00:00
|
|
|
#include "opt_altq.h"
|
2008-02-29 21:50:11 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2009-06-24 17:41:29 +00:00
|
|
|
#include <sys/buf_ring.h>
|
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/endian.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/rman.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/taskqueue.h>
|
2008-07-30 21:56:53 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2008-02-29 21:50:11 +00:00
|
|
|
#include <sys/pcpu.h>
|
2009-06-24 17:41:29 +00:00
|
|
|
#include <sys/smp.h>
|
|
|
|
#include <machine/smp.h>
|
2008-02-29 21:50:11 +00:00
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <machine/resource.h>
|
|
|
|
|
|
|
|
#include <net/bpf.h>
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/if.h>
|
2013-10-26 17:58:36 +00:00
|
|
|
#include <net/if_var.h>
|
2008-02-29 21:50:11 +00:00
|
|
|
#include <net/if_arp.h>
|
|
|
|
#include <net/if_dl.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
|
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/if_vlan_var.h>
|
|
|
|
|
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/if_ether.h>
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/ip6.h>
|
|
|
|
#include <netinet/tcp.h>
|
2008-07-30 23:54:23 +00:00
|
|
|
#include <netinet/tcp_lro.h>
|
2008-02-29 21:50:11 +00:00
|
|
|
#include <netinet/udp.h>
|
2014-06-30 04:34:59 +00:00
|
|
|
#ifdef RSS
|
|
|
|
#include <netinet/in_rss.h>
|
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
#include <machine/in_cksum.h>
|
2010-03-31 20:43:24 +00:00
|
|
|
#include <dev/led/led.h>
|
2008-02-29 21:50:11 +00:00
|
|
|
#include <dev/pci/pcivar.h>
|
|
|
|
#include <dev/pci/pcireg.h>
|
|
|
|
|
|
|
|
#include "e1000_api.h"
|
|
|
|
#include "e1000_82575.h"
|
|
|
|
#include "if_igb.h"
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Set this to one to display debug statistics
|
|
|
|
*********************************************************************/
|
|
|
|
int igb_display_debug_stats = 0;
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Driver version:
|
|
|
|
*********************************************************************/
|
2013-10-09 17:32:52 +00:00
|
|
|
char igb_driver_version[] = "version - 2.4.0";
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* PCI Device ID Table
|
|
|
|
*
|
|
|
|
* Used by probe to select devices to load on
|
|
|
|
* Last field stores an index into e1000_strings
|
|
|
|
* Last entry must be all 0s
|
|
|
|
*
|
|
|
|
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
|
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
static igb_vendor_info_t igb_vendor_info_array[] =
|
|
|
|
{
|
|
|
|
{ 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
2008-07-30 21:56:53 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2009-04-10 00:05:46 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2009-12-08 01:07:44 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2008-07-30 21:56:53 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2009-06-24 17:41:29 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
2009-04-10 00:05:46 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
2010-09-28 00:13:15 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
2010-06-30 17:26:47 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82576_VF, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2009-12-08 01:07:44 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_82580_QUAD_FIBER,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_DH89XXCC_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_DH89XXCC_SFP, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I350_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I350_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I350_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I350_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I350_VF, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2012-07-05 20:26:57 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_I210_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I210_COPPER_IT, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I210_COPPER_OEM1,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
2013-10-09 17:32:52 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
2012-07-05 20:26:57 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_I210_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I210_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I210_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I211_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2013-10-09 17:32:52 +00:00
|
|
|
{ 0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS,
|
|
|
|
PCI_ANY_ID, PCI_ANY_ID, 0},
|
|
|
|
{ 0x8086, E1000_DEV_ID_I354_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0},
|
2008-02-29 21:50:11 +00:00
|
|
|
/* required last entry */
|
|
|
|
{ 0, 0, 0, 0, 0}
|
|
|
|
};
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Table of branding strings for all supported NICs.
|
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
static char *igb_strings[] = {
|
|
|
|
"Intel(R) PRO/1000 Network Connection"
|
|
|
|
};
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Function prototypes
|
|
|
|
*********************************************************************/
|
|
|
|
static int igb_probe(device_t);
|
|
|
|
static int igb_attach(device_t);
|
|
|
|
static int igb_detach(device_t);
|
|
|
|
static int igb_shutdown(device_t);
|
|
|
|
static int igb_suspend(device_t);
|
|
|
|
static int igb_resume(device_t);
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2009-06-24 17:41:29 +00:00
|
|
|
static int igb_mq_start(struct ifnet *, struct mbuf *);
|
2012-11-26 20:03:57 +00:00
|
|
|
static int igb_mq_start_locked(struct ifnet *, struct tx_ring *);
|
2009-06-24 17:41:29 +00:00
|
|
|
static void igb_qflush(struct ifnet *);
|
2012-04-11 21:33:45 +00:00
|
|
|
static void igb_deferred_mq_start(void *, int);
|
2012-03-30 19:54:48 +00:00
|
|
|
#else
|
|
|
|
static void igb_start(struct ifnet *);
|
|
|
|
static void igb_start_locked(struct tx_ring *, struct ifnet *ifp);
|
2009-06-24 17:41:29 +00:00
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
static int igb_ioctl(struct ifnet *, u_long, caddr_t);
|
2014-09-19 11:49:41 +00:00
|
|
|
static uint64_t igb_get_counter(if_t, ift_counter);
|
2008-02-29 21:50:11 +00:00
|
|
|
static void igb_init(void *);
|
|
|
|
static void igb_init_locked(struct adapter *);
|
|
|
|
static void igb_stop(void *);
|
|
|
|
static void igb_media_status(struct ifnet *, struct ifmediareq *);
|
|
|
|
static int igb_media_change(struct ifnet *);
|
|
|
|
static void igb_identify_hardware(struct adapter *);
|
|
|
|
static int igb_allocate_pci_resources(struct adapter *);
|
|
|
|
static int igb_allocate_msix(struct adapter *);
|
|
|
|
static int igb_allocate_legacy(struct adapter *);
|
|
|
|
static int igb_setup_msix(struct adapter *);
|
|
|
|
static void igb_free_pci_resources(struct adapter *);
|
|
|
|
static void igb_local_timer(void *);
|
2009-12-08 01:07:44 +00:00
|
|
|
static void igb_reset(struct adapter *);
|
2010-08-28 00:09:19 +00:00
|
|
|
static int igb_setup_interface(device_t, struct adapter *);
|
2008-02-29 21:50:11 +00:00
|
|
|
static int igb_allocate_queues(struct adapter *);
|
|
|
|
static void igb_configure_queues(struct adapter *);
|
|
|
|
|
|
|
|
static int igb_allocate_transmit_buffers(struct tx_ring *);
|
|
|
|
static void igb_setup_transmit_structures(struct adapter *);
|
|
|
|
static void igb_setup_transmit_ring(struct tx_ring *);
|
|
|
|
static void igb_initialize_transmit_units(struct adapter *);
|
|
|
|
static void igb_free_transmit_structures(struct adapter *);
|
|
|
|
static void igb_free_transmit_buffers(struct tx_ring *);
|
|
|
|
|
|
|
|
static int igb_allocate_receive_buffers(struct rx_ring *);
|
|
|
|
static int igb_setup_receive_structures(struct adapter *);
|
|
|
|
static int igb_setup_receive_ring(struct rx_ring *);
|
|
|
|
static void igb_initialize_receive_units(struct adapter *);
|
|
|
|
static void igb_free_receive_structures(struct adapter *);
|
|
|
|
static void igb_free_receive_buffers(struct rx_ring *);
|
2010-01-26 22:32:22 +00:00
|
|
|
static void igb_free_receive_ring(struct rx_ring *);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
static void igb_enable_intr(struct adapter *);
|
|
|
|
static void igb_disable_intr(struct adapter *);
|
|
|
|
static void igb_update_stats_counters(struct adapter *);
|
|
|
|
static bool igb_txeof(struct tx_ring *);
|
2010-01-26 22:32:22 +00:00
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
static __inline void igb_rx_discard(struct rx_ring *, int);
|
2010-01-26 22:32:22 +00:00
|
|
|
static __inline void igb_rx_input(struct rx_ring *,
|
|
|
|
struct ifnet *, struct mbuf *, u32);
|
|
|
|
|
2010-06-16 16:37:36 +00:00
|
|
|
static bool igb_rxeof(struct igb_queue *, int, int *);
|
2010-01-26 22:32:22 +00:00
|
|
|
static void igb_rx_checksum(u32, struct mbuf *, u32);
|
2013-10-09 17:32:52 +00:00
|
|
|
static int igb_tx_ctx_setup(struct tx_ring *,
|
|
|
|
struct mbuf *, u32 *, u32 *);
|
|
|
|
static int igb_tso_setup(struct tx_ring *,
|
|
|
|
struct mbuf *, u32 *, u32 *);
|
2008-02-29 21:50:11 +00:00
|
|
|
static void igb_set_promisc(struct adapter *);
|
|
|
|
static void igb_disable_promisc(struct adapter *);
|
|
|
|
static void igb_set_multi(struct adapter *);
|
|
|
|
static void igb_update_link_status(struct adapter *);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
static void igb_refresh_mbufs(struct rx_ring *, int);
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2008-07-30 21:56:53 +00:00
|
|
|
static void igb_register_vlan(void *, struct ifnet *, u16);
|
|
|
|
static void igb_unregister_vlan(void *, struct ifnet *, u16);
|
2009-06-24 17:41:29 +00:00
|
|
|
static void igb_setup_vlan_hw_support(struct adapter *);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
static int igb_xmit(struct tx_ring *, struct mbuf **);
|
|
|
|
static int igb_dma_malloc(struct adapter *, bus_size_t,
|
|
|
|
struct igb_dma_alloc *, int);
|
|
|
|
static void igb_dma_free(struct adapter *, struct igb_dma_alloc *);
|
2010-06-16 17:36:53 +00:00
|
|
|
static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
|
2008-02-29 21:50:11 +00:00
|
|
|
static void igb_print_nvm_info(struct adapter *);
|
|
|
|
static int igb_is_valid_ether_addr(u8 *);
|
2010-06-30 17:26:47 +00:00
|
|
|
static void igb_add_hw_stats(struct adapter *);
|
|
|
|
|
|
|
|
static void igb_vf_init_stats(struct adapter *);
|
|
|
|
static void igb_update_vf_stats_counters(struct adapter *);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Management and WOL Support */
|
|
|
|
static void igb_init_manageability(struct adapter *);
|
|
|
|
static void igb_release_manageability(struct adapter *);
|
|
|
|
static void igb_get_hw_control(struct adapter *);
|
|
|
|
static void igb_release_hw_control(struct adapter *);
|
|
|
|
static void igb_enable_wakeup(device_t);
|
2010-03-31 20:43:24 +00:00
|
|
|
static void igb_led_func(void *, int);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
static int igb_irq_fast(void *);
|
2011-03-18 18:54:00 +00:00
|
|
|
static void igb_msix_que(void *);
|
|
|
|
static void igb_msix_link(void *);
|
2010-01-26 22:32:22 +00:00
|
|
|
static void igb_handle_que(void *context, int pending);
|
2010-01-27 20:12:04 +00:00
|
|
|
static void igb_handle_link(void *context, int pending);
|
2012-03-30 19:54:48 +00:00
|
|
|
static void igb_handle_link_locked(struct adapter *);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
static void igb_set_sysctl_value(struct adapter *, const char *,
|
|
|
|
const char *, int *, int);
|
2011-04-05 21:55:43 +00:00
|
|
|
static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS);
|
2011-06-20 22:59:29 +00:00
|
|
|
static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS);
|
2012-07-07 20:21:05 +00:00
|
|
|
static int igb_sysctl_eee(SYSCTL_HANDLER_ARGS);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
static poll_handler_t igb_poll;
|
|
|
|
#endif /* POLLING */
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
* FreeBSD Device Interface Entry Points
|
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
static device_method_t igb_methods[] = {
|
|
|
|
/* Device interface */
|
|
|
|
DEVMETHOD(device_probe, igb_probe),
|
|
|
|
DEVMETHOD(device_attach, igb_attach),
|
|
|
|
DEVMETHOD(device_detach, igb_detach),
|
|
|
|
DEVMETHOD(device_shutdown, igb_shutdown),
|
|
|
|
DEVMETHOD(device_suspend, igb_suspend),
|
|
|
|
DEVMETHOD(device_resume, igb_resume),
|
2013-01-30 18:01:20 +00:00
|
|
|
DEVMETHOD_END
|
2008-02-29 21:50:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t igb_driver = {
|
|
|
|
"igb", igb_methods, sizeof(struct adapter),
|
|
|
|
};
|
|
|
|
|
|
|
|
static devclass_t igb_devclass;
|
|
|
|
DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0);
|
|
|
|
MODULE_DEPEND(igb, pci, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(igb, ether, 1, 1, 1);
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Tunable default values.
|
|
|
|
*********************************************************************/
|
|
|
|
|
2011-12-21 20:10:11 +00:00
|
|
|
static SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters");
|
|
|
|
|
2008-08-28 22:28:28 +00:00
|
|
|
/* Descriptor defaults */
|
2008-02-29 21:50:11 +00:00
|
|
|
static int igb_rxd = IGB_DEFAULT_RXD;
|
|
|
|
static int igb_txd = IGB_DEFAULT_TXD;
|
2011-12-21 20:10:11 +00:00
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0,
|
|
|
|
"Number of receive descriptors per queue");
|
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0,
|
|
|
|
"Number of transmit descriptors per queue");
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2008-08-28 22:28:28 +00:00
|
|
|
/*
|
2010-01-26 22:32:22 +00:00
|
|
|
** AIM: Adaptive Interrupt Moderation
|
|
|
|
** which means that the interrupt rate
|
|
|
|
** is varied over time based on the
|
|
|
|
** traffic for that interrupt vector
|
2008-08-28 22:28:28 +00:00
|
|
|
*/
|
|
|
|
static int igb_enable_aim = TRUE;
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igb_enable_aim, 0,
|
2011-12-21 20:10:11 +00:00
|
|
|
"Enable adaptive interrupt moderation");
|
2010-01-26 22:32:22 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/*
|
|
|
|
* MSIX should be the default for best performance,
|
|
|
|
* but this allows it to be forced off for testing.
|
|
|
|
*/
|
|
|
|
static int igb_enable_msix = 1;
|
2011-12-21 20:10:11 +00:00
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0,
|
|
|
|
"Enable MSI-X interrupts");
|
2009-12-08 01:07:44 +00:00
|
|
|
|
|
|
|
/*
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
** Tuneable Interrupt rate
|
|
|
|
*/
|
|
|
|
static int igb_max_interrupt_rate = 8000;
|
2011-12-21 20:10:11 +00:00
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
|
|
|
|
&igb_max_interrupt_rate, 0, "Maximum interrupts per second");
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2013-02-07 15:20:54 +00:00
|
|
|
/*
|
|
|
|
** Tuneable number of buffers in the buf-ring (drbr_xxx)
|
|
|
|
*/
|
|
|
|
static int igb_buf_ring_size = IGB_BR_SIZE;
|
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN,
|
|
|
|
&igb_buf_ring_size, 0, "Size of the bufring");
|
|
|
|
#endif
|
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/*
|
|
|
|
** Header split causes the packet header to
|
|
|
|
** be dma'd to a seperate mbuf from the payload.
|
|
|
|
** this can have memory alignment benefits. But
|
|
|
|
** another plus is that small packets often fit
|
|
|
|
** into the header and thus use no cluster. Its
|
|
|
|
** a very workload dependent type feature.
|
|
|
|
*/
|
2011-12-12 18:27:34 +00:00
|
|
|
static int igb_header_split = FALSE;
|
2011-12-21 20:10:11 +00:00
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0,
|
|
|
|
"Enable receive mbuf header split");
|
2009-12-08 01:07:44 +00:00
|
|
|
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
/*
|
2013-04-03 23:39:54 +00:00
|
|
|
** This will autoconfigure based on the
|
|
|
|
** number of CPUs and max supported
|
|
|
|
** MSIX messages if left at 0.
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
*/
|
2009-12-08 01:07:44 +00:00
|
|
|
static int igb_num_queues = 0;
|
2011-12-21 20:10:11 +00:00
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0,
|
|
|
|
"Number of queues to configure, 0 indicates autoconfigure");
|
|
|
|
|
2012-05-10 00:00:28 +00:00
|
|
|
/*
|
|
|
|
** Global variable to store last used CPU when binding queues
|
|
|
|
** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a
|
|
|
|
** queue is bound to a cpu.
|
|
|
|
*/
|
|
|
|
static int igb_last_bind_cpu = -1;
|
|
|
|
|
2011-12-21 20:10:11 +00:00
|
|
|
/* How many packets rxeof tries to clean at a time */
|
|
|
|
static int igb_rx_process_limit = 100;
|
|
|
|
SYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
|
|
|
|
&igb_rx_process_limit, 0,
|
|
|
|
"Maximum number of received packets to process at a time, -1 means unlimited");
|
2011-12-10 07:08:52 +00:00
|
|
|
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP /* see ixgbe.c for details */
|
|
|
|
#include <dev/netmap/if_igb_netmap.h>
|
|
|
|
#endif /* DEV_NETMAP */
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
* Device identification routine
|
|
|
|
*
|
|
|
|
* igb_probe determines if the driver should be loaded on
|
|
|
|
* adapter based on PCI vendor/device id of the adapter.
|
|
|
|
*
|
|
|
|
* return BUS_PROBE_DEFAULT on success, positive on failure
|
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_probe(device_t dev)
|
|
|
|
{
|
|
|
|
char adapter_name[60];
|
|
|
|
uint16_t pci_vendor_id = 0;
|
|
|
|
uint16_t pci_device_id = 0;
|
|
|
|
uint16_t pci_subvendor_id = 0;
|
|
|
|
uint16_t pci_subdevice_id = 0;
|
|
|
|
igb_vendor_info_t *ent;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_probe: begin");
|
|
|
|
|
|
|
|
pci_vendor_id = pci_get_vendor(dev);
|
|
|
|
if (pci_vendor_id != IGB_VENDOR_ID)
|
|
|
|
return (ENXIO);
|
|
|
|
|
|
|
|
pci_device_id = pci_get_device(dev);
|
|
|
|
pci_subvendor_id = pci_get_subvendor(dev);
|
|
|
|
pci_subdevice_id = pci_get_subdevice(dev);
|
|
|
|
|
|
|
|
ent = igb_vendor_info_array;
|
|
|
|
while (ent->vendor_id != 0) {
|
|
|
|
if ((pci_vendor_id == ent->vendor_id) &&
|
|
|
|
(pci_device_id == ent->device_id) &&
|
|
|
|
|
|
|
|
((pci_subvendor_id == ent->subvendor_id) ||
|
|
|
|
(ent->subvendor_id == PCI_ANY_ID)) &&
|
|
|
|
|
|
|
|
((pci_subdevice_id == ent->subdevice_id) ||
|
|
|
|
(ent->subdevice_id == PCI_ANY_ID))) {
|
|
|
|
sprintf(adapter_name, "%s %s",
|
|
|
|
igb_strings[ent->index],
|
|
|
|
igb_driver_version);
|
|
|
|
device_set_desc_copy(dev, adapter_name);
|
|
|
|
return (BUS_PROBE_DEFAULT);
|
|
|
|
}
|
|
|
|
ent++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Device initialization routine
|
|
|
|
*
|
|
|
|
* The attach entry point is called when the driver is being loaded.
|
|
|
|
* This routine identifies the type of hardware, allocates all resources
|
|
|
|
* and initializes the hardware.
|
|
|
|
*
|
|
|
|
* return 0 on success, positive on failure
|
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_attach(device_t dev)
|
|
|
|
{
|
|
|
|
struct adapter *adapter;
|
|
|
|
int error = 0;
|
|
|
|
u16 eeprom_data;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_attach: begin");
|
|
|
|
|
2012-10-22 13:06:09 +00:00
|
|
|
if (resource_disabled("igb", device_get_unit(dev))) {
|
|
|
|
device_printf(dev, "Disabled by device hint\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
adapter = device_get_softc(dev);
|
|
|
|
adapter->dev = adapter->osdep.dev = dev;
|
|
|
|
IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
|
|
|
|
|
|
|
|
/* SYSCTL stuff */
|
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
|
2010-06-16 17:36:53 +00:00
|
|
|
OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
|
|
|
|
igb_sysctl_nvm_info, "I", "NVM Information");
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-12-21 20:10:11 +00:00
|
|
|
igb_set_sysctl_value(adapter, "enable_aim",
|
|
|
|
"Interrupt Moderation", &adapter->enable_aim,
|
|
|
|
igb_enable_aim);
|
2008-08-28 22:28:28 +00:00
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
|
2011-06-20 22:59:29 +00:00
|
|
|
OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW,
|
2011-04-05 21:55:43 +00:00
|
|
|
adapter, 0, igb_set_flowcntl, "I", "Flow Control");
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
|
|
|
|
|
|
|
|
/* Determine hardware and mac info */
|
|
|
|
igb_identify_hardware(adapter);
|
|
|
|
|
|
|
|
/* Setup PCI resources */
|
|
|
|
if (igb_allocate_pci_resources(adapter)) {
|
|
|
|
device_printf(dev, "Allocation of PCI resources failed\n");
|
|
|
|
error = ENXIO;
|
|
|
|
goto err_pci;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do Shared Code initialization */
|
|
|
|
if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
|
|
|
|
device_printf(dev, "Setup of Shared code failed\n");
|
|
|
|
error = ENXIO;
|
|
|
|
goto err_pci;
|
|
|
|
}
|
|
|
|
|
|
|
|
e1000_get_bus_info(&adapter->hw);
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
/* Sysctl for limiting the amount of work done in the taskqueue */
|
|
|
|
igb_set_sysctl_value(adapter, "rx_processing_limit",
|
2011-06-20 22:59:29 +00:00
|
|
|
"max number of rx packets to process",
|
2011-12-21 20:10:11 +00:00
|
|
|
&adapter->rx_process_limit, igb_rx_process_limit);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Validate number of transmit and receive descriptors. It
|
|
|
|
* must not exceed hardware maximum, and must be multiple
|
|
|
|
* of E1000_DBA_ALIGN.
|
|
|
|
*/
|
|
|
|
if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 ||
|
|
|
|
(igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) {
|
|
|
|
device_printf(dev, "Using %d TX descriptors instead of %d!\n",
|
|
|
|
IGB_DEFAULT_TXD, igb_txd);
|
|
|
|
adapter->num_tx_desc = IGB_DEFAULT_TXD;
|
|
|
|
} else
|
|
|
|
adapter->num_tx_desc = igb_txd;
|
|
|
|
if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 ||
|
|
|
|
(igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) {
|
|
|
|
device_printf(dev, "Using %d RX descriptors instead of %d!\n",
|
|
|
|
IGB_DEFAULT_RXD, igb_rxd);
|
|
|
|
adapter->num_rx_desc = IGB_DEFAULT_RXD;
|
|
|
|
} else
|
|
|
|
adapter->num_rx_desc = igb_rxd;
|
|
|
|
|
|
|
|
adapter->hw.mac.autoneg = DO_AUTO_NEG;
|
|
|
|
adapter->hw.phy.autoneg_wait_to_complete = FALSE;
|
|
|
|
adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
|
|
|
|
|
|
|
|
/* Copper options */
|
|
|
|
if (adapter->hw.phy.media_type == e1000_media_type_copper) {
|
|
|
|
adapter->hw.phy.mdix = AUTO_ALL_MODES;
|
|
|
|
adapter->hw.phy.disable_polarity_correction = FALSE;
|
|
|
|
adapter->hw.phy.ms_type = IGB_MASTER_SLAVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the frame limits assuming
|
|
|
|
* standard ethernet sized frames.
|
|
|
|
*/
|
|
|
|
adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Allocate and Setup Queues
|
|
|
|
*/
|
|
|
|
if (igb_allocate_queues(adapter)) {
|
|
|
|
error = ENOMEM;
|
2008-08-28 22:28:28 +00:00
|
|
|
goto err_pci;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
/* Allocate the appropriate stats memory */
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp) {
|
2010-06-30 17:26:47 +00:00
|
|
|
adapter->stats =
|
|
|
|
(struct e1000_vf_stats *)malloc(sizeof \
|
|
|
|
(struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
|
|
|
|
igb_vf_init_stats(adapter);
|
|
|
|
} else
|
|
|
|
adapter->stats =
|
|
|
|
(struct e1000_hw_stats *)malloc(sizeof \
|
|
|
|
(struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
|
2010-08-27 23:50:13 +00:00
|
|
|
if (adapter->stats == NULL) {
|
|
|
|
device_printf(dev, "Can not allocate stats memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto err_late;
|
|
|
|
}
|
2010-06-30 17:26:47 +00:00
|
|
|
|
2010-08-28 00:34:22 +00:00
|
|
|
/* Allocate multicast array memory. */
|
|
|
|
adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
|
|
|
|
MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
|
|
|
|
if (adapter->mta == NULL) {
|
|
|
|
device_printf(dev, "Can not allocate multicast setup array\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto err_late;
|
|
|
|
}
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
/* Some adapter-specific advanced features */
|
|
|
|
if (adapter->hw.mac.type >= e1000_i350) {
|
2011-06-20 22:59:29 +00:00
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
|
|
|
|
OID_AUTO, "dmac", CTLTYPE_INT|CTLFLAG_RW,
|
|
|
|
adapter, 0, igb_sysctl_dmac, "I", "DMA Coalesce");
|
2012-07-07 20:21:05 +00:00
|
|
|
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
|
|
|
|
OID_AUTO, "eee_disabled", CTLTYPE_INT|CTLFLAG_RW,
|
|
|
|
adapter, 0, igb_sysctl_eee, "I",
|
|
|
|
"Disable Energy Efficient Ethernet");
|
2013-10-09 17:32:52 +00:00
|
|
|
if (adapter->hw.phy.media_type == e1000_media_type_copper) {
|
|
|
|
if (adapter->hw.mac.type == e1000_i354)
|
|
|
|
e1000_set_eee_i354(&adapter->hw);
|
|
|
|
else
|
|
|
|
e1000_set_eee_i350(&adapter->hw);
|
|
|
|
}
|
2011-03-18 18:54:00 +00:00
|
|
|
}
|
|
|
|
|
2009-04-10 00:05:46 +00:00
|
|
|
/*
|
|
|
|
** Start from a known state, this is
|
|
|
|
** important in reading the nvm and
|
|
|
|
** mac from that.
|
|
|
|
*/
|
|
|
|
e1000_reset_hw(&adapter->hw);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Make sure we have a good EEPROM before we read from it */
|
2012-07-05 20:26:57 +00:00
|
|
|
if (((adapter->hw.mac.type != e1000_i210) &&
|
|
|
|
(adapter->hw.mac.type != e1000_i211)) &&
|
|
|
|
(e1000_validate_nvm_checksum(&adapter->hw) < 0)) {
|
2008-02-29 21:50:11 +00:00
|
|
|
/*
|
|
|
|
** Some PCI-E parts fail the first check due to
|
|
|
|
** the link being in sleep state, call it again,
|
|
|
|
** if it fails a second time its a real issue.
|
|
|
|
*/
|
|
|
|
if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
|
|
|
|
device_printf(dev,
|
|
|
|
"The EEPROM Checksum Is Not Valid\n");
|
|
|
|
error = EIO;
|
|
|
|
goto err_late;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-10 00:05:46 +00:00
|
|
|
/*
|
|
|
|
** Copy the permanent MAC address out of the EEPROM
|
|
|
|
*/
|
2008-02-29 21:50:11 +00:00
|
|
|
if (e1000_read_mac_addr(&adapter->hw) < 0) {
|
|
|
|
device_printf(dev, "EEPROM read error while reading MAC"
|
|
|
|
" address\n");
|
|
|
|
error = EIO;
|
|
|
|
goto err_late;
|
|
|
|
}
|
2009-04-10 00:05:46 +00:00
|
|
|
/* Check its sanity */
|
2008-02-29 21:50:11 +00:00
|
|
|
if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) {
|
|
|
|
device_printf(dev, "Invalid MAC address\n");
|
|
|
|
error = EIO;
|
|
|
|
goto err_late;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup OS specific network interface */
|
2010-08-28 00:09:19 +00:00
|
|
|
if (igb_setup_interface(dev, adapter) != 0)
|
|
|
|
goto err_late;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* Now get a good starting state */
|
|
|
|
igb_reset(adapter);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Initialize statistics */
|
|
|
|
igb_update_stats_counters(adapter);
|
|
|
|
|
|
|
|
adapter->hw.mac.get_link_status = 1;
|
|
|
|
igb_update_link_status(adapter);
|
|
|
|
|
|
|
|
/* Indicate SOL/IDER usage */
|
|
|
|
if (e1000_check_reset_block(&adapter->hw))
|
|
|
|
device_printf(dev,
|
|
|
|
"PHY reset is blocked due to SOL/IDER session.\n");
|
|
|
|
|
|
|
|
/* Determine if we have to control management hardware */
|
|
|
|
adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup Wake-on-Lan
|
|
|
|
*/
|
|
|
|
/* APME bit in EEPROM is mapped to WUC.APME */
|
|
|
|
eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME;
|
|
|
|
if (eeprom_data)
|
|
|
|
adapter->wol = E1000_WUFC_MAG;
|
|
|
|
|
2008-07-30 21:56:53 +00:00
|
|
|
/* Register for VLAN events */
|
|
|
|
adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
|
2009-07-24 21:35:52 +00:00
|
|
|
igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
|
2008-07-30 21:56:53 +00:00
|
|
|
adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
|
2009-07-24 21:35:52 +00:00
|
|
|
igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
|
2008-07-30 21:56:53 +00:00
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
igb_add_hw_stats(adapter);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Tell the stack that the interface is not active */
|
2011-12-10 07:08:52 +00:00
|
|
|
adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
|
|
|
adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-03-31 20:43:24 +00:00
|
|
|
adapter->led_dev = led_create(igb_led_func, adapter,
|
|
|
|
device_get_nameunit(dev));
|
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
/*
|
|
|
|
** Configure Interrupts
|
|
|
|
*/
|
|
|
|
if ((adapter->msix > 1) && (igb_enable_msix))
|
|
|
|
error = igb_allocate_msix(adapter);
|
|
|
|
else /* MSI or Legacy */
|
|
|
|
error = igb_allocate_legacy(adapter);
|
|
|
|
if (error)
|
|
|
|
goto err_late;
|
|
|
|
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
igb_netmap_attach(adapter);
|
|
|
|
#endif /* DEV_NETMAP */
|
2008-02-29 21:50:11 +00:00
|
|
|
INIT_DEBUGOUT("igb_attach: end");
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
err_late:
|
2011-06-20 22:59:29 +00:00
|
|
|
igb_detach(dev);
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_free_transmit_structures(adapter);
|
|
|
|
igb_free_receive_structures(adapter);
|
|
|
|
igb_release_hw_control(adapter);
|
|
|
|
err_pci:
|
|
|
|
igb_free_pci_resources(adapter);
|
2011-12-10 07:08:52 +00:00
|
|
|
if (adapter->ifp != NULL)
|
|
|
|
if_free(adapter->ifp);
|
2010-08-28 00:34:22 +00:00
|
|
|
free(adapter->mta, M_DEVBUF);
|
2008-02-29 21:50:11 +00:00
|
|
|
IGB_CORE_LOCK_DESTROY(adapter);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Device removal routine
|
|
|
|
*
|
|
|
|
* The detach entry point is called when the driver is being removed.
|
|
|
|
* This routine stops the adapter and deallocates all the resources
|
|
|
|
* that were allocated for driver operation.
|
|
|
|
*
|
|
|
|
* return 0 on success, positive on failure
|
|
|
|
*********************************************************************/
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_detach(device_t dev)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = device_get_softc(dev);
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_detach: begin");
|
|
|
|
|
|
|
|
/* Make sure VLANS are not using driver */
|
|
|
|
if (adapter->ifp->if_vlantrunk != NULL) {
|
|
|
|
device_printf(dev,"Vlan in use, detach first\n");
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
|
2012-04-11 21:33:45 +00:00
|
|
|
ether_ifdetach(adapter->ifp);
|
|
|
|
|
2010-03-31 20:43:24 +00:00
|
|
|
if (adapter->led_dev != NULL)
|
|
|
|
led_destroy(adapter->led_dev);
|
|
|
|
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
if (ifp->if_capenable & IFCAP_POLLING)
|
|
|
|
ether_poll_deregister(ifp);
|
|
|
|
#endif
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
adapter->in_detach = 1;
|
|
|
|
igb_stop(adapter);
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
|
|
|
|
e1000_phy_hw_reset(&adapter->hw);
|
|
|
|
|
|
|
|
/* Give control back to firmware */
|
|
|
|
igb_release_manageability(adapter);
|
|
|
|
igb_release_hw_control(adapter);
|
|
|
|
|
|
|
|
if (adapter->wol) {
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
|
|
|
|
igb_enable_wakeup(dev);
|
|
|
|
}
|
|
|
|
|
2008-07-30 21:56:53 +00:00
|
|
|
/* Unregister VLAN events */
|
|
|
|
if (adapter->vlan_attach != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
|
|
|
|
if (adapter->vlan_detach != NULL)
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
callout_drain(&adapter->timer);
|
|
|
|
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
netmap_detach(adapter->ifp);
|
|
|
|
#endif /* DEV_NETMAP */
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_free_pci_resources(adapter);
|
|
|
|
bus_generic_detach(dev);
|
|
|
|
if_free(ifp);
|
|
|
|
|
|
|
|
igb_free_transmit_structures(adapter);
|
|
|
|
igb_free_receive_structures(adapter);
|
2011-06-20 22:59:29 +00:00
|
|
|
if (adapter->mta != NULL)
|
|
|
|
free(adapter->mta, M_DEVBUF);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
IGB_CORE_LOCK_DESTROY(adapter);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Shutdown entry point
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_shutdown(device_t dev)
|
|
|
|
{
|
|
|
|
return igb_suspend(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Suspend/resume device methods.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
igb_suspend(device_t dev)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = device_get_softc(dev);
|
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
|
|
|
|
igb_stop(adapter);
|
|
|
|
|
|
|
|
igb_release_manageability(adapter);
|
|
|
|
igb_release_hw_control(adapter);
|
|
|
|
|
|
|
|
if (adapter->wol) {
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
|
|
|
|
igb_enable_wakeup(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
|
|
|
|
return bus_generic_suspend(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_resume(device_t dev)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = device_get_softc(dev);
|
2012-03-30 19:54:48 +00:00
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
2008-02-29 21:50:11 +00:00
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
igb_init_locked(adapter);
|
|
|
|
igb_init_manageability(adapter);
|
|
|
|
|
|
|
|
if ((ifp->if_flags & IFF_UP) &&
|
2012-03-30 19:54:48 +00:00
|
|
|
(ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) {
|
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++) {
|
|
|
|
IGB_TX_LOCK(txr);
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2012-03-30 19:54:48 +00:00
|
|
|
/* Process the stack queue only if not depleted */
|
|
|
|
if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
|
|
|
|
!drbr_empty(ifp, txr->br))
|
2012-11-26 20:03:57 +00:00
|
|
|
igb_mq_start_locked(ifp, txr);
|
2012-03-30 19:54:48 +00:00
|
|
|
#else
|
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
|
|
|
igb_start_locked(txr, ifp);
|
|
|
|
#endif
|
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
}
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
|
|
|
|
return bus_generic_resume(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifdef IGB_LEGACY_TX
|
2012-06-01 15:52:41 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
* Transmit entry point
|
|
|
|
*
|
|
|
|
* igb_start is called by the stack to initiate a transmit.
|
|
|
|
* The driver will remain in this routine as long as there are
|
|
|
|
* packets to transmit and transmit resources are available.
|
|
|
|
* In case resources are not available stack is notified and
|
|
|
|
* the packet is requeued.
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = ifp->if_softc;
|
|
|
|
struct mbuf *m_head;
|
|
|
|
|
|
|
|
IGB_TX_LOCK_ASSERT(txr);
|
|
|
|
|
|
|
|
if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
|
|
|
|
IFF_DRV_RUNNING)
|
|
|
|
return;
|
|
|
|
if (!adapter->link_active)
|
|
|
|
return;
|
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
/* Call cleanup if number of TX descriptors low */
|
|
|
|
if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
|
|
|
|
igb_txeof(txr);
|
|
|
|
|
2010-04-14 20:55:33 +00:00
|
|
|
while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
|
2011-04-05 21:55:43 +00:00
|
|
|
if (txr->tx_avail <= IGB_MAX_SCATTER) {
|
2011-12-10 07:08:52 +00:00
|
|
|
txr->queue_status |= IGB_QUEUE_DEPLETED;
|
2010-04-14 20:55:33 +00:00
|
|
|
break;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
|
|
|
|
if (m_head == NULL)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Encapsulation can modify our pointer, and or make it
|
|
|
|
* NULL on failure. In that event, we can't requeue.
|
|
|
|
*/
|
|
|
|
if (igb_xmit(txr, &m_head)) {
|
2011-12-10 07:08:52 +00:00
|
|
|
if (m_head != NULL)
|
|
|
|
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
|
|
|
|
if (txr->tx_avail <= IGB_MAX_SCATTER)
|
|
|
|
txr->queue_status |= IGB_QUEUE_DEPLETED;
|
2008-02-29 21:50:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Send a copy of the frame to the BPF listener */
|
|
|
|
ETHER_BPF_MTAP(ifp, m_head);
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* Set watchdog on */
|
2010-04-14 20:55:33 +00:00
|
|
|
txr->watchdog_time = ticks;
|
2011-12-10 07:08:52 +00:00
|
|
|
txr->queue_status |= IGB_QUEUE_WORKING;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
|
|
|
* Legacy TX driver routine, called from the
|
|
|
|
* stack, always uses tx[0], and spins for it.
|
|
|
|
* Should not be used with multiqueue tx
|
|
|
|
*/
|
2008-02-29 21:50:11 +00:00
|
|
|
static void
|
|
|
|
igb_start(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = ifp->if_softc;
|
2009-06-24 17:41:29 +00:00
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
|
|
|
IGB_TX_LOCK(txr);
|
|
|
|
igb_start_locked(txr, ifp);
|
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
}
|
2009-06-24 17:41:29 +00:00
|
|
|
return;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2013-03-29 18:25:45 +00:00
|
|
|
#else /* ~IGB_LEGACY_TX */
|
2012-07-05 20:26:57 +00:00
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
2013-02-21 00:25:45 +00:00
|
|
|
** Multiqueue Transmit Entry:
|
|
|
|
** quick turnaround to the stack
|
2009-06-24 17:41:29 +00:00
|
|
|
**
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
igb_mq_start(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
2010-06-11 20:54:27 +00:00
|
|
|
struct adapter *adapter = ifp->if_softc;
|
|
|
|
struct igb_queue *que;
|
|
|
|
struct tx_ring *txr;
|
2011-12-10 07:08:52 +00:00
|
|
|
int i, err = 0;
|
2014-06-30 04:34:59 +00:00
|
|
|
#ifdef RSS
|
|
|
|
uint32_t bucket_id;
|
|
|
|
#endif
|
2009-06-24 17:41:29 +00:00
|
|
|
|
|
|
|
/* Which queue to use */
|
2014-06-30 04:34:59 +00:00
|
|
|
/*
|
|
|
|
* When doing RSS, map it to the same outbound queue
|
|
|
|
* as the incoming flow would be mapped to.
|
|
|
|
*
|
|
|
|
* If everything is setup correctly, it should be the
|
|
|
|
* same bucket that the current CPU we're on is.
|
|
|
|
*/
|
|
|
|
if ((m->m_flags & M_FLOWID) != 0) {
|
|
|
|
#ifdef RSS
|
|
|
|
if (rss_hash2bucket(m->m_pkthdr.flowid,
|
|
|
|
M_HASHTYPE_GET(m), &bucket_id) == 0) {
|
|
|
|
/* XXX TODO: spit out something if bucket_id > num_queues? */
|
|
|
|
i = bucket_id % adapter->num_queues;
|
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
i = m->m_pkthdr.flowid % adapter->num_queues;
|
|
|
|
#ifdef RSS
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else {
|
2011-12-10 07:08:52 +00:00
|
|
|
i = curcpu % adapter->num_queues;
|
2014-06-30 04:34:59 +00:00
|
|
|
}
|
2009-06-24 17:41:29 +00:00
|
|
|
txr = &adapter->tx_rings[i];
|
2010-06-11 20:54:27 +00:00
|
|
|
que = &adapter->queues[i];
|
2013-02-21 00:25:45 +00:00
|
|
|
|
|
|
|
err = drbr_enqueue(ifp, txr->br, m);
|
2013-08-13 00:25:39 +00:00
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
if (IGB_TX_TRYLOCK(txr)) {
|
2014-06-06 20:49:56 +00:00
|
|
|
igb_mq_start_locked(ifp, txr);
|
2013-08-13 00:25:39 +00:00
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
} else
|
|
|
|
taskqueue_enqueue(que->tq, &txr->txq_task);
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2014-06-06 20:49:56 +00:00
|
|
|
return (0);
|
2009-06-24 17:41:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2012-11-26 20:03:57 +00:00
|
|
|
igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
|
2009-06-24 17:41:29 +00:00
|
|
|
{
|
|
|
|
struct adapter *adapter = txr->adapter;
|
|
|
|
struct mbuf *next;
|
2013-10-09 17:32:52 +00:00
|
|
|
int err = 0, enq = 0;
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_TX_LOCK_ASSERT(txr);
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
|
2012-11-26 20:03:57 +00:00
|
|
|
adapter->link_active == 0)
|
2013-02-21 00:25:45 +00:00
|
|
|
return (ENETDOWN);
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2010-04-14 20:55:33 +00:00
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/* Process the queue */
|
2013-02-07 15:20:54 +00:00
|
|
|
while ((next = drbr_peek(ifp, txr->br)) != NULL) {
|
2010-01-26 22:32:22 +00:00
|
|
|
if ((err = igb_xmit(txr, &next)) != 0) {
|
2013-02-07 15:20:54 +00:00
|
|
|
if (next == NULL) {
|
|
|
|
/* It was freed, move forward */
|
|
|
|
drbr_advance(ifp, txr->br);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Still have one left, it may not be
|
|
|
|
* the same since the transmit function
|
|
|
|
* may have changed it.
|
|
|
|
*/
|
|
|
|
drbr_putback(ifp, txr->br, next);
|
|
|
|
}
|
2009-06-24 17:41:29 +00:00
|
|
|
break;
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
2013-02-07 15:20:54 +00:00
|
|
|
drbr_advance(ifp, txr->br);
|
2010-01-26 22:32:22 +00:00
|
|
|
enq++;
|
2014-09-19 11:49:41 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len);
|
The drbr(9) API appeared to be so unclear, that most drivers in
tree used it incorrectly, which lead to inaccurate overrated
if_obytes accounting. The drbr(9) used to update ifnet stats on
drbr_enqueue(), which is not accurate since enqueuing doesn't
imply successful processing by driver. Dequeuing neither mean
that. Most drivers also called drbr_stats_update() which did
accounting again, leading to doubled if_obytes statistics. And
in case of severe transmitting, when a packet could be several
times enqueued and dequeued it could have been accounted several
times.
o Thus, make drbr(9) API thinner. Now drbr(9) merely chooses between
ALTQ queueing or buf_ring(9) queueing.
- It doesn't touch the buf_ring stats any more.
- It doesn't touch ifnet stats anymore.
- drbr_stats_update() no longer exists.
o buf_ring(9) handles its stats itself:
- It handles br_drops itself.
- br_prod_bytes stats are dropped. Rationale: no one ever
reads them but update of a common counter on every packet
negatively affects performance due to excessive cache
invalidation.
- buf_ring_enqueue_bytes() reduced to buf_ring_enqueue(), since
we no longer account bytes.
o Drivers handle their stats theirselves: if_obytes, if_omcasts.
o mlx4(4), igb(4), em(4), vxge(4), oce(4) and ixv(4) no longer
use drbr_stats_update(), and update ifnet stats theirselves.
o bxe(4) was the most correct driver, it didn't call
drbr_stats_update(), thus it was the only driver accurate under
moderate load. Now it also maintains stats itself.
o ixgbe(4) had already taken stats from hardware, so just
- drop software stats updating.
- take multicast packet count from hardware as well.
o mxge(4) just no longer needs NO_SLOW_STATS define.
o cxgb(4), cxgbe(4) need no change, since they obtain stats
from hardware.
Reviewed by: jfv, gnn
2012-09-28 18:28:27 +00:00
|
|
|
if (next->m_flags & M_MCAST)
|
2014-09-19 11:49:41 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
|
2010-01-26 22:32:22 +00:00
|
|
|
ETHER_BPF_MTAP(ifp, next);
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
2009-06-24 17:41:29 +00:00
|
|
|
break;
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
|
|
|
if (enq > 0) {
|
2009-06-24 17:41:29 +00:00
|
|
|
/* Set the watchdog */
|
2011-12-10 07:08:52 +00:00
|
|
|
txr->queue_status |= IGB_QUEUE_WORKING;
|
2010-04-14 20:55:33 +00:00
|
|
|
txr->watchdog_time = ticks;
|
2009-06-24 17:41:29 +00:00
|
|
|
}
|
2011-12-10 07:08:52 +00:00
|
|
|
if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
|
|
|
|
igb_txeof(txr);
|
|
|
|
if (txr->tx_avail <= IGB_MAX_SCATTER)
|
|
|
|
txr->queue_status |= IGB_QUEUE_DEPLETED;
|
2009-06-24 17:41:29 +00:00
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2012-04-11 21:33:45 +00:00
|
|
|
/*
|
|
|
|
* Called from a taskqueue to drain queued transmit packets.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_deferred_mq_start(void *arg, int pending)
|
|
|
|
{
|
|
|
|
struct tx_ring *txr = arg;
|
|
|
|
struct adapter *adapter = txr->adapter;
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
|
|
|
|
IGB_TX_LOCK(txr);
|
|
|
|
if (!drbr_empty(ifp, txr->br))
|
2012-11-26 20:03:57 +00:00
|
|
|
igb_mq_start_locked(ifp, txr);
|
2012-04-11 21:33:45 +00:00
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
}
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
|
|
|
** Flush all ring buffers
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_qflush(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = ifp->if_softc;
|
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++) {
|
|
|
|
IGB_TX_LOCK(txr);
|
|
|
|
while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
|
|
|
|
m_freem(m);
|
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
}
|
|
|
|
if_qflush(ifp);
|
|
|
|
}
|
2013-03-29 18:25:45 +00:00
|
|
|
#endif /* ~IGB_LEGACY_TX */
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
* Ioctl entry point
|
|
|
|
*
|
|
|
|
* igb_ioctl is called when the user wants to configure the
|
|
|
|
* interface.
|
|
|
|
*
|
|
|
|
* return 0 on success, positive on failure
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = ifp->if_softc;
|
2011-06-20 22:59:29 +00:00
|
|
|
struct ifreq *ifr = (struct ifreq *)data;
|
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
struct ifaddr *ifa = (struct ifaddr *)data;
|
2008-11-06 11:11:25 +00:00
|
|
|
#endif
|
2011-12-11 18:46:14 +00:00
|
|
|
bool avoid_reset = FALSE;
|
2011-06-20 22:59:29 +00:00
|
|
|
int error = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
if (adapter->in_detach)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
switch (command) {
|
|
|
|
case SIOCSIFADDR:
|
2008-11-06 11:11:25 +00:00
|
|
|
#ifdef INET
|
2011-06-20 22:59:29 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET)
|
|
|
|
avoid_reset = TRUE;
|
|
|
|
#endif
|
2011-12-11 09:37:25 +00:00
|
|
|
#ifdef INET6
|
2011-06-20 22:59:29 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET6)
|
|
|
|
avoid_reset = TRUE;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
** Calling init results in link renegotiation,
|
|
|
|
** so we avoid doing it when possible.
|
|
|
|
*/
|
|
|
|
if (avoid_reset) {
|
2008-02-29 21:50:11 +00:00
|
|
|
ifp->if_flags |= IFF_UP;
|
2011-06-20 22:59:29 +00:00
|
|
|
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
|
|
|
|
igb_init(adapter);
|
2011-12-11 18:46:14 +00:00
|
|
|
#ifdef INET
|
2010-01-26 22:32:22 +00:00
|
|
|
if (!(ifp->if_flags & IFF_NOARP))
|
|
|
|
arp_ifinit(ifp, ifa);
|
2011-12-11 18:46:14 +00:00
|
|
|
#endif
|
2011-07-07 00:46:50 +00:00
|
|
|
} else
|
2008-02-29 21:50:11 +00:00
|
|
|
error = ether_ioctl(ifp, command, data);
|
|
|
|
break;
|
|
|
|
case SIOCSIFMTU:
|
|
|
|
{
|
|
|
|
int max_frame_size;
|
|
|
|
|
|
|
|
IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
|
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
max_frame_size = 9234;
|
|
|
|
if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
|
|
|
|
ETHER_CRC_LEN) {
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifp->if_mtu = ifr->ifr_mtu;
|
|
|
|
adapter->max_frame_size =
|
|
|
|
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
|
|
|
|
igb_init_locked(adapter);
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case SIOCSIFFLAGS:
|
|
|
|
IOCTL_DEBUGOUT("ioctl rcv'd:\
|
|
|
|
SIOCSIFFLAGS (Set Interface Flags)");
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
if (ifp->if_flags & IFF_UP) {
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
|
|
|
|
if ((ifp->if_flags ^ adapter->if_flags) &
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
(IFF_PROMISC | IFF_ALLMULTI)) {
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_disable_promisc(adapter);
|
|
|
|
igb_set_promisc(adapter);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
igb_init_locked(adapter);
|
|
|
|
} else
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
|
|
|
igb_stop(adapter);
|
|
|
|
adapter->if_flags = ifp->if_flags;
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
break;
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
|
|
|
IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
igb_disable_intr(adapter);
|
|
|
|
igb_set_multi(adapter);
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
if (!(ifp->if_capenable & IFCAP_POLLING))
|
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_enable_intr(adapter);
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SIOCSIFMEDIA:
|
|
|
|
/* Check SOL/IDER usage */
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
if (e1000_check_reset_block(&adapter->hw)) {
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
device_printf(adapter->dev, "Media change is"
|
|
|
|
" blocked due to SOL/IDER session.\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
case SIOCGIFMEDIA:
|
|
|
|
IOCTL_DEBUGOUT("ioctl rcv'd: \
|
|
|
|
SIOCxIFMEDIA (Get/Set Interface Media)");
|
|
|
|
error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
|
|
|
|
break;
|
|
|
|
case SIOCSIFCAP:
|
|
|
|
{
|
|
|
|
int mask, reinit;
|
|
|
|
|
|
|
|
IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
|
|
|
|
reinit = 0;
|
|
|
|
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
if (mask & IFCAP_POLLING) {
|
|
|
|
if (ifr->ifr_reqcap & IFCAP_POLLING) {
|
|
|
|
error = ether_poll_register(igb_poll, ifp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
igb_disable_intr(adapter);
|
|
|
|
ifp->if_capenable |= IFCAP_POLLING;
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
} else {
|
|
|
|
error = ether_poll_deregister(ifp);
|
|
|
|
/* Enable interrupt even in error case */
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
igb_enable_intr(adapter);
|
|
|
|
ifp->if_capenable &= ~IFCAP_POLLING;
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
if (mask & IFCAP_HWCSUM) {
|
|
|
|
ifp->if_capenable ^= IFCAP_HWCSUM;
|
|
|
|
reinit = 1;
|
|
|
|
}
|
|
|
|
if (mask & IFCAP_TSO4) {
|
|
|
|
ifp->if_capenable ^= IFCAP_TSO4;
|
|
|
|
reinit = 1;
|
|
|
|
}
|
2013-10-09 17:32:52 +00:00
|
|
|
if (mask & IFCAP_TSO6) {
|
|
|
|
ifp->if_capenable ^= IFCAP_TSO6;
|
|
|
|
reinit = 1;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
if (mask & IFCAP_VLAN_HWTAGGING) {
|
|
|
|
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
|
|
|
|
reinit = 1;
|
|
|
|
}
|
2010-04-08 00:50:43 +00:00
|
|
|
if (mask & IFCAP_VLAN_HWFILTER) {
|
|
|
|
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
|
|
|
|
reinit = 1;
|
|
|
|
}
|
2011-06-20 22:59:29 +00:00
|
|
|
if (mask & IFCAP_VLAN_HWTSO) {
|
|
|
|
ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
|
|
|
|
reinit = 1;
|
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
if (mask & IFCAP_LRO) {
|
2009-06-24 17:41:29 +00:00
|
|
|
ifp->if_capenable ^= IFCAP_LRO;
|
|
|
|
reinit = 1;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
|
|
|
|
igb_init(adapter);
|
|
|
|
VLAN_CAPABILITIES(ifp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = ether_ioctl(ifp, command, data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Init entry point
|
|
|
|
*
|
|
|
|
* This routine is used in two ways. It is used by the stack as
|
|
|
|
* init entry point in network interface structure. It is also used
|
|
|
|
* by the driver as a hw/sw initialization routine to get to a
|
|
|
|
* consistent state.
|
|
|
|
*
|
|
|
|
* return 0 on success, positive on failure
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_init_locked(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
device_t dev = adapter->dev;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_init: begin");
|
|
|
|
|
|
|
|
IGB_CORE_LOCK_ASSERT(adapter);
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
igb_disable_intr(adapter);
|
|
|
|
callout_stop(&adapter->timer);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* Get the latest mac address, User can use a LAA */
|
|
|
|
bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
|
|
|
|
ETHER_ADDR_LEN);
|
|
|
|
|
|
|
|
/* Put the address into the Receive Address Array */
|
|
|
|
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
igb_reset(adapter);
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_update_link_status(adapter);
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Set hardware offload abilities */
|
|
|
|
ifp->if_hwassist = 0;
|
2009-04-10 00:05:46 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_TXCSUM) {
|
2008-02-29 21:50:11 +00:00
|
|
|
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
|
2009-04-10 00:05:46 +00:00
|
|
|
#if __FreeBSD_version >= 800000
|
|
|
|
if (adapter->hw.mac.type == e1000_82576)
|
|
|
|
ifp->if_hwassist |= CSUM_SCTP;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_TSO)
|
2008-02-29 21:50:11 +00:00
|
|
|
ifp->if_hwassist |= CSUM_TSO;
|
|
|
|
|
|
|
|
/* Configure for OS presence */
|
|
|
|
igb_init_manageability(adapter);
|
|
|
|
|
|
|
|
/* Prepare transmit descriptors and buffers */
|
|
|
|
igb_setup_transmit_structures(adapter);
|
|
|
|
igb_initialize_transmit_units(adapter);
|
|
|
|
|
|
|
|
/* Setup Multicast table */
|
|
|
|
igb_set_multi(adapter);
|
|
|
|
|
2009-04-10 00:05:46 +00:00
|
|
|
/*
|
|
|
|
** Figure out the desired mbuf pool
|
|
|
|
** for doing jumbo/packetsplit
|
|
|
|
*/
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
if (adapter->max_frame_size <= 2048)
|
|
|
|
adapter->rx_mbuf_sz = MCLBYTES;
|
|
|
|
else if (adapter->max_frame_size <= 4096)
|
2009-04-10 00:05:46 +00:00
|
|
|
adapter->rx_mbuf_sz = MJUMPAGESIZE;
|
|
|
|
else
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
adapter->rx_mbuf_sz = MJUM9BYTES;
|
2009-04-10 00:05:46 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Prepare receive descriptors and buffers */
|
|
|
|
if (igb_setup_receive_structures(adapter)) {
|
|
|
|
device_printf(dev, "Could not setup receive structures\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
igb_initialize_receive_units(adapter);
|
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
/* Enable VLAN support */
|
|
|
|
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
|
|
|
|
igb_setup_vlan_hw_support(adapter);
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Don't lose promiscuous settings */
|
|
|
|
igb_set_promisc(adapter);
|
|
|
|
|
|
|
|
ifp->if_drv_flags |= IFF_DRV_RUNNING;
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
|
|
|
|
|
|
|
callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
|
|
|
|
e1000_clear_hw_cntrs_base_generic(&adapter->hw);
|
|
|
|
|
|
|
|
if (adapter->msix > 1) /* Set up queue routing */
|
|
|
|
igb_configure_queues(adapter);
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* this clears any pending interrupts */
|
|
|
|
E1000_READ_REG(&adapter->hw, E1000_ICR);
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
/*
|
|
|
|
* Only enable interrupts if we are not polling, make sure
|
|
|
|
* they are off otherwise.
|
|
|
|
*/
|
|
|
|
if (ifp->if_capenable & IFCAP_POLLING)
|
|
|
|
igb_disable_intr(adapter);
|
|
|
|
else
|
|
|
|
#endif /* DEVICE_POLLING */
|
|
|
|
{
|
2011-04-05 21:55:43 +00:00
|
|
|
igb_enable_intr(adapter);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
|
2010-02-01 19:28:43 +00:00
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
/* Set Energy Efficient Ethernet */
|
2013-10-09 17:32:52 +00:00
|
|
|
if (adapter->hw.phy.media_type == e1000_media_type_copper) {
|
|
|
|
if (adapter->hw.mac.type == e1000_i354)
|
|
|
|
e1000_set_eee_i354(&adapter->hw);
|
|
|
|
else
|
|
|
|
e1000_set_eee_i350(&adapter->hw);
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_init(void *arg)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = arg;
|
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
igb_init_locked(adapter);
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2010-01-26 22:32:22 +00:00
|
|
|
igb_handle_que(void *context, int pending)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
2010-01-26 22:32:22 +00:00
|
|
|
struct igb_queue *que = context;
|
|
|
|
struct adapter *adapter = que->adapter;
|
|
|
|
struct tx_ring *txr = que->txr;
|
2009-12-08 01:07:44 +00:00
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-04-14 20:55:33 +00:00
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
2010-06-16 16:37:36 +00:00
|
|
|
bool more;
|
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
more = igb_rxeof(que, adapter->rx_process_limit, NULL);
|
2010-01-26 22:32:22 +00:00
|
|
|
|
2010-04-14 20:55:33 +00:00
|
|
|
IGB_TX_LOCK(txr);
|
2012-03-30 19:54:48 +00:00
|
|
|
igb_txeof(txr);
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2011-12-10 07:08:52 +00:00
|
|
|
/* Process the stack queue only if not depleted */
|
|
|
|
if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
|
|
|
|
!drbr_empty(ifp, txr->br))
|
2012-11-26 20:03:57 +00:00
|
|
|
igb_mq_start_locked(ifp, txr);
|
2009-06-24 17:41:29 +00:00
|
|
|
#else
|
2012-03-30 19:54:48 +00:00
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
|
|
|
igb_start_locked(txr, ifp);
|
2009-06-24 17:41:29 +00:00
|
|
|
#endif
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_TX_UNLOCK(txr);
|
2011-12-10 07:08:52 +00:00
|
|
|
/* Do we need another? */
|
2012-03-30 19:54:48 +00:00
|
|
|
if (more) {
|
2010-04-14 20:55:33 +00:00
|
|
|
taskqueue_enqueue(que->tq, &que->que_task);
|
|
|
|
return;
|
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
|
|
|
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
2010-06-11 20:54:27 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_POLLING)
|
|
|
|
return;
|
2010-02-01 19:28:43 +00:00
|
|
|
#endif
|
2010-06-11 20:54:27 +00:00
|
|
|
/* Reenable this interrupt */
|
|
|
|
if (que->eims)
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
|
|
|
|
else
|
|
|
|
igb_enable_intr(adapter);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2010-01-27 20:12:04 +00:00
|
|
|
/* Deal with link in a sleepable context */
|
|
|
|
static void
|
|
|
|
igb_handle_link(void *context, int pending)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = context;
|
|
|
|
|
2012-03-30 19:54:48 +00:00
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
igb_handle_link_locked(adapter);
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_handle_link_locked(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
|
|
|
|
IGB_CORE_LOCK_ASSERT(adapter);
|
2010-01-27 20:12:04 +00:00
|
|
|
adapter->hw.mac.get_link_status = 1;
|
|
|
|
igb_update_link_status(adapter);
|
2012-03-30 19:54:48 +00:00
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) {
|
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++) {
|
|
|
|
IGB_TX_LOCK(txr);
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2012-03-30 19:54:48 +00:00
|
|
|
/* Process the stack queue only if not depleted */
|
|
|
|
if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
|
|
|
|
!drbr_empty(ifp, txr->br))
|
2012-11-26 20:03:57 +00:00
|
|
|
igb_mq_start_locked(ifp, txr);
|
2012-03-30 19:54:48 +00:00
|
|
|
#else
|
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
|
|
|
igb_start_locked(txr, ifp);
|
|
|
|
#endif
|
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
}
|
|
|
|
}
|
2010-01-27 20:12:04 +00:00
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* MSI/Legacy Deferred
|
|
|
|
* Interrupt Service routine
|
|
|
|
*
|
|
|
|
*********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_irq_fast(void *arg)
|
|
|
|
{
|
2010-06-15 21:11:51 +00:00
|
|
|
struct adapter *adapter = arg;
|
|
|
|
struct igb_queue *que = adapter->queues;
|
|
|
|
u32 reg_icr;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
|
|
|
|
|
|
|
|
/* Hot eject? */
|
|
|
|
if (reg_icr == 0xffffffff)
|
|
|
|
return FILTER_STRAY;
|
|
|
|
|
|
|
|
/* Definitely not our interrupt. */
|
|
|
|
if (reg_icr == 0x0)
|
|
|
|
return FILTER_STRAY;
|
|
|
|
|
|
|
|
if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
|
|
|
|
return FILTER_STRAY;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mask interrupts until the taskqueue is finished running. This is
|
|
|
|
* cheap, just assume that it is needed. This also works around the
|
|
|
|
* MSI message reordering errata on certain systems.
|
|
|
|
*/
|
|
|
|
igb_disable_intr(adapter);
|
2010-06-15 21:11:51 +00:00
|
|
|
taskqueue_enqueue(que->tq, &que->que_task);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* Link status change */
|
2010-01-27 20:12:04 +00:00
|
|
|
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
|
2010-06-15 21:11:51 +00:00
|
|
|
taskqueue_enqueue(que->tq, &adapter->link_task);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
if (reg_icr & E1000_ICR_RXO)
|
|
|
|
adapter->rx_overruns++;
|
|
|
|
return FILTER_HANDLED;
|
|
|
|
}
|
|
|
|
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
#if __FreeBSD_version >= 800000
|
|
|
|
#define POLL_RETURN_COUNT(a) (a)
|
|
|
|
static int
|
|
|
|
#else
|
|
|
|
#define POLL_RETURN_COUNT(a)
|
|
|
|
static void
|
|
|
|
#endif
|
|
|
|
igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
|
|
|
|
{
|
2010-03-31 23:24:42 +00:00
|
|
|
struct adapter *adapter = ifp->if_softc;
|
2012-08-06 22:43:49 +00:00
|
|
|
struct igb_queue *que;
|
|
|
|
struct tx_ring *txr;
|
2010-03-31 23:24:42 +00:00
|
|
|
u32 reg_icr, rx_done = 0;
|
|
|
|
u32 loop = IGB_MAX_LOOP;
|
|
|
|
bool more;
|
2010-02-01 19:28:43 +00:00
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
return POLL_RETURN_COUNT(rx_done);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd == POLL_AND_CHECK_STATUS) {
|
|
|
|
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
|
|
|
|
/* Link status change */
|
|
|
|
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
|
2012-03-30 19:54:48 +00:00
|
|
|
igb_handle_link_locked(adapter);
|
2010-02-01 19:28:43 +00:00
|
|
|
|
|
|
|
if (reg_icr & E1000_ICR_RXO)
|
|
|
|
adapter->rx_overruns++;
|
|
|
|
}
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
|
2012-08-06 22:43:49 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
|
|
|
que = &adapter->queues[i];
|
|
|
|
txr = que->txr;
|
|
|
|
|
|
|
|
igb_rxeof(que, count, &rx_done);
|
2010-02-01 19:28:43 +00:00
|
|
|
|
2012-08-06 22:43:49 +00:00
|
|
|
IGB_TX_LOCK(txr);
|
|
|
|
do {
|
|
|
|
more = igb_txeof(txr);
|
|
|
|
} while (loop-- && more);
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2012-08-06 22:43:49 +00:00
|
|
|
if (!drbr_empty(ifp, txr->br))
|
2012-11-26 20:03:57 +00:00
|
|
|
igb_mq_start_locked(ifp, txr);
|
2010-02-01 19:28:43 +00:00
|
|
|
#else
|
2012-08-06 22:43:49 +00:00
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
|
|
|
igb_start_locked(txr, ifp);
|
2010-02-01 19:28:43 +00:00
|
|
|
#endif
|
2012-08-06 22:43:49 +00:00
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
}
|
|
|
|
|
2010-02-01 19:28:43 +00:00
|
|
|
return POLL_RETURN_COUNT(rx_done);
|
|
|
|
}
|
|
|
|
#endif /* DEVICE_POLLING */
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
2011-12-10 07:08:52 +00:00
|
|
|
* MSIX Que Interrupt Service routine
|
2008-02-29 21:50:11 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
2010-01-26 22:32:22 +00:00
|
|
|
igb_msix_que(void *arg)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
2010-01-26 22:32:22 +00:00
|
|
|
struct igb_queue *que = arg;
|
|
|
|
struct adapter *adapter = que->adapter;
|
2012-03-30 19:54:48 +00:00
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct tx_ring *txr = que->txr;
|
|
|
|
struct rx_ring *rxr = que->rxr;
|
|
|
|
u32 newitr = 0;
|
2012-03-30 19:54:48 +00:00
|
|
|
bool more_rx;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-08-06 18:00:53 +00:00
|
|
|
/* Ignore spurious interrupts */
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
|
|
|
return;
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims);
|
|
|
|
++que->irqs;
|
2009-04-10 00:05:46 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
IGB_TX_LOCK(txr);
|
2012-03-30 19:54:48 +00:00
|
|
|
igb_txeof(txr);
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2012-03-30 19:54:48 +00:00
|
|
|
/* Process the stack queue only if not depleted */
|
|
|
|
if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) &&
|
|
|
|
!drbr_empty(ifp, txr->br))
|
2012-11-26 20:03:57 +00:00
|
|
|
igb_mq_start_locked(ifp, txr);
|
2012-03-30 19:54:48 +00:00
|
|
|
#else
|
|
|
|
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
|
|
|
|
igb_start_locked(txr, ifp);
|
|
|
|
#endif
|
2009-04-10 00:05:46 +00:00
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
|
2010-06-16 16:37:36 +00:00
|
|
|
more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL);
|
2008-08-28 22:28:28 +00:00
|
|
|
|
2011-12-21 20:10:11 +00:00
|
|
|
if (adapter->enable_aim == FALSE)
|
2010-01-26 22:32:22 +00:00
|
|
|
goto no_calc;
|
|
|
|
/*
|
|
|
|
** Do Adaptive Interrupt Moderation:
|
|
|
|
** - Write out last calculated setting
|
|
|
|
** - Calculate based on average size over
|
|
|
|
** the last interval.
|
|
|
|
*/
|
|
|
|
if (que->eitr_setting)
|
|
|
|
E1000_WRITE_REG(&adapter->hw,
|
|
|
|
E1000_EITR(que->msix), que->eitr_setting);
|
|
|
|
|
|
|
|
que->eitr_setting = 0;
|
|
|
|
|
|
|
|
/* Idle, do nothing */
|
|
|
|
if ((txr->bytes == 0) && (rxr->bytes == 0))
|
|
|
|
goto no_calc;
|
|
|
|
|
|
|
|
/* Used half Default if sub-gig */
|
|
|
|
if (adapter->link_speed != 1000)
|
|
|
|
newitr = IGB_DEFAULT_ITR / 2;
|
|
|
|
else {
|
|
|
|
if ((txr->bytes) && (txr->packets))
|
|
|
|
newitr = txr->bytes/txr->packets;
|
|
|
|
if ((rxr->bytes) && (rxr->packets))
|
|
|
|
newitr = max(newitr,
|
|
|
|
(rxr->bytes / rxr->packets));
|
|
|
|
newitr += 24; /* account for hardware frame, crc */
|
|
|
|
/* set an upper boundary */
|
|
|
|
newitr = min(newitr, 3000);
|
|
|
|
/* Be nice to the mid range */
|
|
|
|
if ((newitr > 300) && (newitr < 1200))
|
|
|
|
newitr = (newitr / 3);
|
|
|
|
else
|
|
|
|
newitr = (newitr / 2);
|
|
|
|
}
|
|
|
|
newitr &= 0x7FFC; /* Mask invalid bits */
|
|
|
|
if (adapter->hw.mac.type == e1000_82575)
|
|
|
|
newitr |= newitr << 16;
|
|
|
|
else
|
2010-04-09 21:16:45 +00:00
|
|
|
newitr |= E1000_EITR_CNT_IGNR;
|
2010-01-26 22:32:22 +00:00
|
|
|
|
|
|
|
/* save for next interrupt */
|
|
|
|
que->eitr_setting = newitr;
|
|
|
|
|
|
|
|
/* Reset state */
|
|
|
|
txr->bytes = 0;
|
|
|
|
txr->packets = 0;
|
|
|
|
rxr->bytes = 0;
|
|
|
|
rxr->packets = 0;
|
|
|
|
|
|
|
|
no_calc:
|
|
|
|
/* Schedule a clean task if needed*/
|
2012-03-30 19:54:48 +00:00
|
|
|
if (more_rx)
|
2010-01-26 22:32:22 +00:00
|
|
|
taskqueue_enqueue(que->tq, &que->que_task);
|
2009-12-08 01:07:44 +00:00
|
|
|
else
|
|
|
|
/* Reenable this interrupt */
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
|
2008-02-29 21:50:11 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-08-28 22:28:28 +00:00
|
|
|
|
2009-04-10 00:05:46 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* MSIX Link Interrupt Service routine
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_msix_link(void *arg)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = arg;
|
|
|
|
u32 icr;
|
|
|
|
|
|
|
|
++adapter->link_irq;
|
|
|
|
icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
|
|
|
|
if (!(icr & E1000_ICR_LSC))
|
|
|
|
goto spurious;
|
2010-06-16 16:37:36 +00:00
|
|
|
igb_handle_link(adapter, 0);
|
2009-04-10 00:05:46 +00:00
|
|
|
|
|
|
|
spurious:
|
|
|
|
/* Rearm */
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Media Ioctl callback
|
|
|
|
*
|
|
|
|
* This routine is called whenever the user queries the status of
|
|
|
|
* the interface using ifconfig.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = ifp->if_softc;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_media_status: begin");
|
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
igb_update_link_status(adapter);
|
|
|
|
|
|
|
|
ifmr->ifm_status = IFM_AVALID;
|
|
|
|
ifmr->ifm_active = IFM_ETHER;
|
|
|
|
|
|
|
|
if (!adapter->link_active) {
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifmr->ifm_status |= IFM_ACTIVE;
|
|
|
|
|
2013-02-21 00:25:45 +00:00
|
|
|
switch (adapter->link_speed) {
|
|
|
|
case 10:
|
|
|
|
ifmr->ifm_active |= IFM_10_T;
|
|
|
|
break;
|
|
|
|
case 100:
|
|
|
|
/*
|
|
|
|
** Support for 100Mb SFP - these are Fiber
|
|
|
|
** but the media type appears as serdes
|
|
|
|
*/
|
|
|
|
if (adapter->hw.phy.media_type ==
|
|
|
|
e1000_media_type_internal_serdes)
|
|
|
|
ifmr->ifm_active |= IFM_100_FX;
|
2008-02-29 21:50:11 +00:00
|
|
|
else
|
2013-02-21 00:25:45 +00:00
|
|
|
ifmr->ifm_active |= IFM_100_TX;
|
|
|
|
break;
|
|
|
|
case 1000:
|
|
|
|
ifmr->ifm_active |= IFM_1000_T;
|
|
|
|
break;
|
2013-10-09 17:32:52 +00:00
|
|
|
case 2500:
|
|
|
|
ifmr->ifm_active |= IFM_2500_SX;
|
|
|
|
break;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2013-02-21 00:25:45 +00:00
|
|
|
|
|
|
|
if (adapter->link_duplex == FULL_DUPLEX)
|
|
|
|
ifmr->ifm_active |= IFM_FDX;
|
|
|
|
else
|
|
|
|
ifmr->ifm_active |= IFM_HDX;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Media Ioctl callback
|
|
|
|
*
|
|
|
|
* This routine is called when the user changes speed/duplex using
|
|
|
|
* media/mediopt option with ifconfig.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_media_change(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = ifp->if_softc;
|
|
|
|
struct ifmedia *ifm = &adapter->media;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_media_change: begin");
|
|
|
|
|
|
|
|
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
switch (IFM_SUBTYPE(ifm->ifm_media)) {
|
|
|
|
case IFM_AUTO:
|
|
|
|
adapter->hw.mac.autoneg = DO_AUTO_NEG;
|
|
|
|
adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
|
|
|
|
break;
|
|
|
|
case IFM_1000_LX:
|
|
|
|
case IFM_1000_SX:
|
|
|
|
case IFM_1000_T:
|
|
|
|
adapter->hw.mac.autoneg = DO_AUTO_NEG;
|
|
|
|
adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
|
|
|
|
break;
|
|
|
|
case IFM_100_TX:
|
|
|
|
adapter->hw.mac.autoneg = FALSE;
|
|
|
|
adapter->hw.phy.autoneg_advertised = 0;
|
|
|
|
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
|
|
|
|
adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
|
|
|
|
else
|
|
|
|
adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
|
|
|
|
break;
|
|
|
|
case IFM_10_T:
|
|
|
|
adapter->hw.mac.autoneg = FALSE;
|
|
|
|
adapter->hw.phy.autoneg_advertised = 0;
|
|
|
|
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
|
|
|
|
adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
|
|
|
|
else
|
|
|
|
adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
device_printf(adapter->dev, "Unsupported media type\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
igb_init_locked(adapter);
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* This routine maps the mbufs to Advanced TX descriptors.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
|
|
|
|
{
|
2013-10-09 17:32:52 +00:00
|
|
|
struct adapter *adapter = txr->adapter;
|
|
|
|
u32 olinfo_status = 0, cmd_type_len;
|
|
|
|
int i, j, error, nsegs;
|
|
|
|
int first;
|
|
|
|
bool remap = TRUE;
|
|
|
|
struct mbuf *m_head;
|
|
|
|
bus_dma_segment_t segs[IGB_MAX_SCATTER];
|
|
|
|
bus_dmamap_t map;
|
|
|
|
struct igb_tx_buf *txbuf;
|
|
|
|
union e1000_adv_tx_desc *txd = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
m_head = *m_headp;
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Basic descriptor defines */
|
|
|
|
cmd_type_len = (E1000_ADVTXD_DTYP_DATA |
|
|
|
|
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
|
|
|
|
|
|
|
|
if (m_head->m_flags & M_VLANTAG)
|
|
|
|
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Important to capture the first descriptor
|
|
|
|
* used because it will contain the index of
|
|
|
|
* the one we tell the hardware to report back
|
|
|
|
*/
|
|
|
|
first = txr->next_avail_desc;
|
|
|
|
txbuf = &txr->tx_buffers[first];
|
|
|
|
map = txbuf->map;
|
2011-12-10 07:08:52 +00:00
|
|
|
|
|
|
|
/*
|
2013-10-09 17:32:52 +00:00
|
|
|
* Map the packet for DMA.
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
2013-10-09 17:32:52 +00:00
|
|
|
retry:
|
2008-02-29 21:50:11 +00:00
|
|
|
error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
|
|
|
|
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
if (__predict_false(error)) {
|
2008-02-29 21:50:11 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
switch (error) {
|
|
|
|
case EFBIG:
|
|
|
|
/* Try it again? - one try */
|
|
|
|
if (remap == TRUE) {
|
|
|
|
remap = FALSE;
|
|
|
|
m = m_defrag(*m_headp, M_NOWAIT);
|
|
|
|
if (m == NULL) {
|
|
|
|
adapter->mbuf_defrag_failed++;
|
|
|
|
m_freem(*m_headp);
|
|
|
|
*m_headp = NULL;
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
*m_headp = m;
|
|
|
|
goto retry;
|
|
|
|
} else
|
|
|
|
return (error);
|
|
|
|
case ENOMEM:
|
|
|
|
txr->no_tx_dma_setup++;
|
|
|
|
return (error);
|
|
|
|
default:
|
|
|
|
txr->no_tx_dma_setup++;
|
2008-02-29 21:50:11 +00:00
|
|
|
m_freem(*m_headp);
|
|
|
|
*m_headp = NULL;
|
2013-10-09 17:32:52 +00:00
|
|
|
return (error);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Make certain there are enough descriptors */
|
|
|
|
if (nsegs > txr->tx_avail - 2) {
|
|
|
|
txr->no_desc_avail++;
|
2008-02-29 21:50:11 +00:00
|
|
|
bus_dmamap_unload(txr->txtag, map);
|
|
|
|
return (ENOBUFS);
|
2013-10-09 17:32:52 +00:00
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
m_head = *m_headp;
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/*
|
|
|
|
** Set up the appropriate offload context
|
|
|
|
** this will consume the first descriptor
|
|
|
|
*/
|
|
|
|
error = igb_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
|
|
|
|
if (__predict_false(error)) {
|
|
|
|
m_freem(*m_headp);
|
|
|
|
*m_headp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* 82575 needs the queue index added */
|
|
|
|
if (adapter->hw.mac.type == e1000_82575)
|
|
|
|
olinfo_status |= txr->me << 4;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
i = txr->next_avail_desc;
|
2013-10-09 17:32:52 +00:00
|
|
|
for (j = 0; j < nsegs; j++) {
|
|
|
|
bus_size_t seglen;
|
|
|
|
bus_addr_t segaddr;
|
|
|
|
|
|
|
|
txbuf = &txr->tx_buffers[i];
|
|
|
|
txd = &txr->tx_base[i];
|
|
|
|
seglen = segs[j].ds_len;
|
|
|
|
segaddr = htole64(segs[j].ds_addr);
|
|
|
|
|
|
|
|
txd->read.buffer_addr = segaddr;
|
|
|
|
txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS |
|
|
|
|
cmd_type_len | seglen);
|
2008-02-29 21:50:11 +00:00
|
|
|
txd->read.olinfo_status = htole32(olinfo_status);
|
2013-10-09 17:32:52 +00:00
|
|
|
|
|
|
|
if (++i == txr->num_desc)
|
2008-02-29 21:50:11 +00:00
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
txd->read.cmd_type_len |=
|
|
|
|
htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
|
2008-02-29 21:50:11 +00:00
|
|
|
txr->tx_avail -= nsegs;
|
2013-10-09 17:32:52 +00:00
|
|
|
txr->next_avail_desc = i;
|
2011-12-10 07:08:52 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
txbuf->m_head = m_head;
|
2011-12-10 07:08:52 +00:00
|
|
|
/*
|
|
|
|
** Here we swap the map so the last descriptor,
|
|
|
|
** which gets the completion interrupt has the
|
|
|
|
** real map, and the first descriptor gets the
|
|
|
|
** unused map from this descriptor.
|
|
|
|
*/
|
2013-10-09 17:32:52 +00:00
|
|
|
txr->tx_buffers[first].map = txbuf->map;
|
|
|
|
txbuf->map = map;
|
|
|
|
bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Set the EOP descriptor that will be marked done */
|
|
|
|
txbuf = &txr->tx_buffers[first];
|
|
|
|
txbuf->eop = txd;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2008-02-29 21:50:11 +00:00
|
|
|
/*
|
2013-10-09 17:32:52 +00:00
|
|
|
* Advance the Transmit Descriptor Tail (Tdt), this tells the
|
|
|
|
* hardware that this frame is available to transmit.
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
2013-10-09 17:32:52 +00:00
|
|
|
++txr->total_packets;
|
2008-02-29 21:50:11 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
static void
|
|
|
|
igb_set_promisc(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2010-06-30 17:26:47 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
u32 reg;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp) {
|
2010-06-30 17:26:47 +00:00
|
|
|
e1000_promisc_set_vf(hw, e1000_promisc_enabled);
|
|
|
|
return;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
reg = E1000_READ_REG(hw, E1000_RCTL);
|
2008-02-29 21:50:11 +00:00
|
|
|
if (ifp->if_flags & IFF_PROMISC) {
|
2010-06-30 17:26:47 +00:00
|
|
|
reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
|
|
|
|
E1000_WRITE_REG(hw, E1000_RCTL, reg);
|
2008-02-29 21:50:11 +00:00
|
|
|
} else if (ifp->if_flags & IFF_ALLMULTI) {
|
2010-06-30 17:26:47 +00:00
|
|
|
reg |= E1000_RCTL_MPE;
|
|
|
|
reg &= ~E1000_RCTL_UPE;
|
|
|
|
E1000_WRITE_REG(hw, E1000_RCTL, reg);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_disable_promisc(struct adapter *adapter)
|
|
|
|
{
|
2010-06-30 17:26:47 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
2013-04-03 23:39:54 +00:00
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2010-06-30 17:26:47 +00:00
|
|
|
u32 reg;
|
2013-04-03 23:39:54 +00:00
|
|
|
int mcnt = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp) {
|
2010-06-30 17:26:47 +00:00
|
|
|
e1000_promisc_set_vf(hw, e1000_promisc_disabled);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
reg = E1000_READ_REG(hw, E1000_RCTL);
|
|
|
|
reg &= (~E1000_RCTL_UPE);
|
2013-04-03 23:39:54 +00:00
|
|
|
if (ifp->if_flags & IFF_ALLMULTI)
|
|
|
|
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
|
|
|
|
else {
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
#if __FreeBSD_version < 800000
|
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
#else
|
|
|
|
if_maddr_rlock(ifp);
|
|
|
|
#endif
|
|
|
|
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
|
|
|
|
break;
|
|
|
|
mcnt++;
|
|
|
|
}
|
|
|
|
#if __FreeBSD_version < 800000
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
#else
|
|
|
|
if_maddr_runlock(ifp);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
/* Don't disable if in MAX groups */
|
|
|
|
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
|
|
|
|
reg &= (~E1000_RCTL_MPE);
|
2010-06-30 17:26:47 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RCTL, reg);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
* Multicast Update
|
|
|
|
*
|
|
|
|
* This routine is called whenever multicast address list is updated.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_set_multi(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
struct ifmultiaddr *ifma;
|
2008-08-28 22:28:28 +00:00
|
|
|
u32 reg_rctl = 0;
|
2010-08-28 00:34:22 +00:00
|
|
|
u8 *mta;
|
2008-08-28 22:28:28 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
int mcnt = 0;
|
|
|
|
|
|
|
|
IOCTL_DEBUGOUT("igb_set_multi: begin");
|
|
|
|
|
2010-08-28 00:34:22 +00:00
|
|
|
mta = adapter->mta;
|
|
|
|
bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN *
|
|
|
|
MAX_NUM_MULTICAST_ADDRESSES);
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
#if __FreeBSD_version < 800000
|
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
#else
|
2009-06-26 11:45:06 +00:00
|
|
|
if_maddr_rlock(ifp);
|
2009-12-08 01:07:44 +00:00
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
|
|
|
|
&mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
|
|
|
|
mcnt++;
|
|
|
|
}
|
2009-12-08 01:07:44 +00:00
|
|
|
#if __FreeBSD_version < 800000
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
#else
|
2009-06-26 11:45:06 +00:00
|
|
|
if_maddr_runlock(ifp);
|
2009-12-08 01:07:44 +00:00
|
|
|
#endif
|
2010-01-26 22:32:22 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
|
|
|
|
reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
|
|
|
|
reg_rctl |= E1000_RCTL_MPE;
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
|
|
|
|
} else
|
2009-04-10 00:05:46 +00:00
|
|
|
e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
2009-12-08 01:07:44 +00:00
|
|
|
* Timer routine:
|
|
|
|
* This routine checks for link status,
|
|
|
|
* updates statistics, and does the watchdog.
|
2008-02-29 21:50:11 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_local_timer(void *arg)
|
|
|
|
{
|
2009-12-08 01:07:44 +00:00
|
|
|
struct adapter *adapter = arg;
|
|
|
|
device_t dev = adapter->dev;
|
2011-12-10 07:08:52 +00:00
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2009-12-08 01:07:44 +00:00
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
2011-12-10 07:08:52 +00:00
|
|
|
struct igb_queue *que = adapter->queues;
|
|
|
|
int hung = 0, busy = 0;
|
2009-12-08 01:07:44 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
IGB_CORE_LOCK_ASSERT(adapter);
|
|
|
|
|
|
|
|
igb_update_link_status(adapter);
|
|
|
|
igb_update_stats_counters(adapter);
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/*
|
2011-12-10 07:08:52 +00:00
|
|
|
** Check the TX queues status
|
|
|
|
** - central locked handling of OACTIVE
|
|
|
|
** - watchdog only if all queues show hung
|
2009-12-08 01:07:44 +00:00
|
|
|
*/
|
2011-12-10 07:08:52 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
|
|
|
|
if ((txr->queue_status & IGB_QUEUE_HUNG) &&
|
|
|
|
(adapter->pause_frames == 0))
|
|
|
|
++hung;
|
|
|
|
if (txr->queue_status & IGB_QUEUE_DEPLETED)
|
|
|
|
++busy;
|
|
|
|
if ((txr->queue_status & IGB_QUEUE_IDLE) == 0)
|
|
|
|
taskqueue_enqueue(que->tq, &que->que_task);
|
|
|
|
}
|
|
|
|
if (hung == adapter->num_queues)
|
|
|
|
goto timeout;
|
|
|
|
if (busy == adapter->num_queues)
|
|
|
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
|
|
|
else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) &&
|
|
|
|
(busy < adapter->num_queues))
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
|
|
|
|
|
|
|
|
adapter->pause_frames = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
|
2011-02-11 19:49:07 +00:00
|
|
|
#ifndef DEVICE_POLLING
|
2011-03-18 18:54:00 +00:00
|
|
|
/* Schedule all queue interrupts - deadlock protection */
|
2011-02-11 19:49:07 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask);
|
|
|
|
#endif
|
2009-12-08 01:07:44 +00:00
|
|
|
return;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
timeout:
|
|
|
|
device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
|
|
|
|
device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
|
|
|
|
E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
|
|
|
|
E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
|
|
|
|
device_printf(dev,"TX(%d) desc avail = %d,"
|
|
|
|
"Next TX to Clean = %d\n",
|
|
|
|
txr->me, txr->tx_avail, txr->next_to_clean);
|
|
|
|
adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
|
|
|
adapter->watchdog_events++;
|
|
|
|
igb_init_locked(adapter);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_update_link_status(struct adapter *adapter)
|
|
|
|
{
|
2013-02-21 00:25:45 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
struct e1000_fc_info *fc = &hw->fc;
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
device_t dev = adapter->dev;
|
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
|
|
|
u32 link_check, thstat, ctrl;
|
|
|
|
char *flowctl = NULL;
|
2011-03-18 18:54:00 +00:00
|
|
|
|
|
|
|
link_check = thstat = ctrl = 0;
|
2008-04-02 22:00:36 +00:00
|
|
|
|
|
|
|
/* Get the cached link value or read for real */
|
|
|
|
switch (hw->phy.media_type) {
|
|
|
|
case e1000_media_type_copper:
|
|
|
|
if (hw->mac.get_link_status) {
|
|
|
|
/* Do the work to read phy */
|
|
|
|
e1000_check_for_link(hw);
|
|
|
|
link_check = !hw->mac.get_link_status;
|
|
|
|
} else
|
|
|
|
link_check = TRUE;
|
|
|
|
break;
|
|
|
|
case e1000_media_type_fiber:
|
|
|
|
e1000_check_for_link(hw);
|
|
|
|
link_check = (E1000_READ_REG(hw, E1000_STATUS) &
|
|
|
|
E1000_STATUS_LU);
|
|
|
|
break;
|
|
|
|
case e1000_media_type_internal_serdes:
|
|
|
|
e1000_check_for_link(hw);
|
|
|
|
link_check = adapter->hw.mac.serdes_has_link;
|
|
|
|
break;
|
2010-06-30 17:26:47 +00:00
|
|
|
/* VF device is type_unknown */
|
2008-04-02 22:00:36 +00:00
|
|
|
case e1000_media_type_unknown:
|
2010-06-30 17:26:47 +00:00
|
|
|
e1000_check_for_link(hw);
|
|
|
|
link_check = !hw->mac.get_link_status;
|
|
|
|
/* Fall thru */
|
|
|
|
default:
|
2008-04-02 22:00:36 +00:00
|
|
|
break;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
/* Check for thermal downshift or shutdown */
|
|
|
|
if (hw->mac.type == e1000_i350) {
|
|
|
|
thstat = E1000_READ_REG(hw, E1000_THSTAT);
|
|
|
|
ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
|
|
|
|
}
|
|
|
|
|
2013-02-21 00:25:45 +00:00
|
|
|
/* Get the flow control for display */
|
|
|
|
switch (fc->current_mode) {
|
|
|
|
case e1000_fc_rx_pause:
|
|
|
|
flowctl = "RX";
|
|
|
|
break;
|
|
|
|
case e1000_fc_tx_pause:
|
|
|
|
flowctl = "TX";
|
|
|
|
break;
|
|
|
|
case e1000_fc_full:
|
|
|
|
flowctl = "Full";
|
|
|
|
break;
|
|
|
|
case e1000_fc_none:
|
|
|
|
default:
|
|
|
|
flowctl = "None";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-04-02 22:00:36 +00:00
|
|
|
/* Now we check if a transition has happened */
|
|
|
|
if (link_check && (adapter->link_active == 0)) {
|
|
|
|
e1000_get_speed_and_duplex(&adapter->hw,
|
|
|
|
&adapter->link_speed, &adapter->link_duplex);
|
|
|
|
if (bootverbose)
|
2013-02-21 00:25:45 +00:00
|
|
|
device_printf(dev, "Link is up %d Mbps %s,"
|
|
|
|
" Flow Control: %s\n",
|
2008-04-02 22:00:36 +00:00
|
|
|
adapter->link_speed,
|
|
|
|
((adapter->link_duplex == FULL_DUPLEX) ?
|
2013-02-21 00:25:45 +00:00
|
|
|
"Full Duplex" : "Half Duplex"), flowctl);
|
2008-04-02 22:00:36 +00:00
|
|
|
adapter->link_active = 1;
|
|
|
|
ifp->if_baudrate = adapter->link_speed * 1000000;
|
2011-03-18 18:54:00 +00:00
|
|
|
if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
|
|
|
|
(thstat & E1000_THSTAT_LINK_THROTTLE))
|
|
|
|
device_printf(dev, "Link: thermal downshift\n");
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Delay Link Up for Phy update */
|
|
|
|
if (((hw->mac.type == e1000_i210) ||
|
|
|
|
(hw->mac.type == e1000_i211)) &&
|
|
|
|
(hw->phy.id == I210_I_PHY_ID))
|
|
|
|
msec_delay(I210_LINK_DELAY);
|
|
|
|
/* Reset if the media type changed. */
|
|
|
|
if (hw->dev_spec._82575.media_changed) {
|
|
|
|
hw->dev_spec._82575.media_changed = false;
|
|
|
|
adapter->flags |= IGB_MEDIA_RESET;
|
|
|
|
igb_reset(adapter);
|
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* This can sleep */
|
2008-04-02 22:00:36 +00:00
|
|
|
if_link_state_change(ifp, LINK_STATE_UP);
|
|
|
|
} else if (!link_check && (adapter->link_active == 1)) {
|
|
|
|
ifp->if_baudrate = adapter->link_speed = 0;
|
|
|
|
adapter->link_duplex = 0;
|
|
|
|
if (bootverbose)
|
|
|
|
device_printf(dev, "Link is Down\n");
|
2011-03-18 18:54:00 +00:00
|
|
|
if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
|
|
|
|
(thstat & E1000_THSTAT_PWR_DOWN))
|
|
|
|
device_printf(dev, "Link: thermal shutdown\n");
|
2008-04-02 22:00:36 +00:00
|
|
|
adapter->link_active = 0;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* This can sleep */
|
2008-04-02 22:00:36 +00:00
|
|
|
if_link_state_change(ifp, LINK_STATE_DOWN);
|
2011-12-10 07:08:52 +00:00
|
|
|
/* Reset queue state */
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++)
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
txr->queue_status = IGB_QUEUE_IDLE;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* This routine disables all traffic on the adapter by issuing a
|
|
|
|
* global reset on the MAC and deallocates TX/RX buffers.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_stop(void *arg)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = arg;
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
IGB_CORE_LOCK_ASSERT(adapter);
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_stop: begin");
|
|
|
|
|
|
|
|
igb_disable_intr(adapter);
|
|
|
|
|
|
|
|
callout_stop(&adapter->timer);
|
|
|
|
|
|
|
|
/* Tell the stack that the interface is no longer active */
|
2011-12-10 07:08:52 +00:00
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
|
|
|
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
/* Disarm watchdog timer. */
|
2010-01-26 22:32:22 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++) {
|
|
|
|
IGB_TX_LOCK(txr);
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
txr->queue_status = IGB_QUEUE_IDLE;
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
e1000_reset_hw(&adapter->hw);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
|
2010-03-31 20:43:24 +00:00
|
|
|
|
|
|
|
e1000_led_off(&adapter->hw);
|
|
|
|
e1000_cleanup_led(&adapter->hw);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Determine hardware revision.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_identify_hardware(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
device_t dev = adapter->dev;
|
|
|
|
|
|
|
|
/* Make sure our PCI config space has the necessary stuff set */
|
2013-08-12 23:30:01 +00:00
|
|
|
pci_enable_busmaster(dev);
|
2008-02-29 21:50:11 +00:00
|
|
|
adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
|
|
|
|
|
|
|
|
/* Save off the information about this board */
|
|
|
|
adapter->hw.vendor_id = pci_get_vendor(dev);
|
|
|
|
adapter->hw.device_id = pci_get_device(dev);
|
|
|
|
adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
|
|
|
|
adapter->hw.subsystem_vendor_id =
|
|
|
|
pci_read_config(dev, PCIR_SUBVEND_0, 2);
|
|
|
|
adapter->hw.subsystem_device_id =
|
|
|
|
pci_read_config(dev, PCIR_SUBDEV_0, 2);
|
2010-08-06 20:55:49 +00:00
|
|
|
|
|
|
|
/* Set MAC type early for PCI setup */
|
|
|
|
e1000_set_mac_type(&adapter->hw);
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
|
|
|
|
/* Are we a VF device? */
|
|
|
|
if ((adapter->hw.mac.type == e1000_vfadapt) ||
|
|
|
|
(adapter->hw.mac.type == e1000_vfadapt_i350))
|
|
|
|
adapter->vf_ifp = 1;
|
|
|
|
else
|
|
|
|
adapter->vf_ifp = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_allocate_pci_resources(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
device_t dev = adapter->dev;
|
2009-06-24 17:41:29 +00:00
|
|
|
int rid;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
rid = PCIR_BAR(0);
|
|
|
|
adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
|
|
|
|
&rid, RF_ACTIVE);
|
|
|
|
if (adapter->pci_mem == NULL) {
|
|
|
|
device_printf(dev, "Unable to allocate bus resource: memory\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
adapter->osdep.mem_bus_space_tag =
|
|
|
|
rman_get_bustag(adapter->pci_mem);
|
|
|
|
adapter->osdep.mem_bus_space_handle =
|
|
|
|
rman_get_bushandle(adapter->pci_mem);
|
2009-06-24 17:41:29 +00:00
|
|
|
adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
adapter->num_queues = 1; /* Defaults for Legacy or MSI */
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* This will setup either MSI/X or MSI */
|
|
|
|
adapter->msix = igb_setup_msix(adapter);
|
|
|
|
adapter->hw.back = &adapter->osdep;
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
return (0);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Setup the Legacy or MSI Interrupt handler
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_allocate_legacy(struct adapter *adapter)
|
|
|
|
{
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
device_t dev = adapter->dev;
|
|
|
|
struct igb_queue *que = adapter->queues;
|
2014-01-25 20:39:23 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2013-10-09 17:32:52 +00:00
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
2014-01-25 20:39:23 +00:00
|
|
|
#endif
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
int error, rid = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* Turn off all interrupts */
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/* MSI RID is 1 */
|
|
|
|
if (adapter->msix == 1)
|
|
|
|
rid = 1;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* We allocate a single interrupt resource */
|
2009-06-24 17:41:29 +00:00
|
|
|
adapter->res = bus_alloc_resource_any(dev,
|
|
|
|
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
|
|
|
|
if (adapter->res == NULL) {
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(dev, "Unable to allocate bus resource: "
|
|
|
|
"interrupt\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2013-10-09 17:32:52 +00:00
|
|
|
TASK_INIT(&txr->txq_task, 0, igb_deferred_mq_start, txr);
|
2012-04-11 21:33:45 +00:00
|
|
|
#endif
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*
|
|
|
|
* Try allocating a fast interrupt and the associated deferred
|
|
|
|
* processing contexts.
|
|
|
|
*/
|
2010-06-15 21:11:51 +00:00
|
|
|
TASK_INIT(&que->que_task, 0, igb_handle_que, que);
|
2010-01-27 20:12:04 +00:00
|
|
|
/* Make tasklet for deferred link handling */
|
|
|
|
TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter);
|
2010-06-15 21:11:51 +00:00
|
|
|
que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
|
|
|
|
taskqueue_thread_enqueue, &que->tq);
|
|
|
|
taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq",
|
2008-02-29 21:50:11 +00:00
|
|
|
device_get_nameunit(adapter->dev));
|
2009-06-24 17:41:29 +00:00
|
|
|
if ((error = bus_setup_intr(dev, adapter->res,
|
|
|
|
INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL,
|
|
|
|
adapter, &adapter->tag)) != 0) {
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(dev, "Failed to register fast interrupt "
|
|
|
|
"handler: %d\n", error);
|
2010-06-15 21:11:51 +00:00
|
|
|
taskqueue_free(que->tq);
|
|
|
|
que->tq = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
2010-01-26 22:32:22 +00:00
|
|
|
* Setup the MSIX Queue Interrupt handlers:
|
2008-02-29 21:50:11 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_allocate_msix(struct adapter *adapter)
|
|
|
|
{
|
2010-01-26 22:32:22 +00:00
|
|
|
device_t dev = adapter->dev;
|
|
|
|
struct igb_queue *que = adapter->queues;
|
|
|
|
int error, rid, vector = 0;
|
2014-06-30 23:34:36 +00:00
|
|
|
int cpu_id = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
/* Be sure to start with all interrupts disabled */
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
|
|
|
|
E1000_WRITE_FLUSH(&adapter->hw);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2014-06-30 04:34:59 +00:00
|
|
|
#ifdef RSS
|
|
|
|
/*
|
|
|
|
* If we're doing RSS, the number of queues needs to
|
|
|
|
* match the number of RSS buckets that are configured.
|
|
|
|
*
|
|
|
|
* + If there's more queues than RSS buckets, we'll end
|
|
|
|
* up with queues that get no traffic.
|
|
|
|
*
|
|
|
|
* + If there's more RSS buckets than queues, we'll end
|
|
|
|
* up having multiple RSS buckets map to the same queue,
|
|
|
|
* so there'll be some contention.
|
|
|
|
*/
|
|
|
|
if (adapter->num_queues != rss_getnumbuckets()) {
|
|
|
|
device_printf(dev,
|
|
|
|
"%s: number of queues (%d) != number of RSS buckets (%d)"
|
|
|
|
"; performance will be impacted.\n",
|
|
|
|
__func__,
|
|
|
|
adapter->num_queues,
|
|
|
|
rss_getnumbuckets());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
|
2009-06-24 17:41:29 +00:00
|
|
|
rid = vector +1;
|
2010-01-26 22:32:22 +00:00
|
|
|
que->res = bus_alloc_resource_any(dev,
|
2009-06-24 17:41:29 +00:00
|
|
|
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
|
2010-01-26 22:32:22 +00:00
|
|
|
if (que->res == NULL) {
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate bus resource: "
|
2010-01-26 22:32:22 +00:00
|
|
|
"MSIX Queue Interrupt\n");
|
2008-02-29 21:50:11 +00:00
|
|
|
return (ENXIO);
|
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
error = bus_setup_intr(dev, que->res,
|
2009-06-24 17:41:29 +00:00
|
|
|
INTR_TYPE_NET | INTR_MPSAFE, NULL,
|
2010-01-26 22:32:22 +00:00
|
|
|
igb_msix_que, que, &que->tag);
|
2008-02-29 21:50:11 +00:00
|
|
|
if (error) {
|
2010-01-26 22:32:22 +00:00
|
|
|
que->res = NULL;
|
|
|
|
device_printf(dev, "Failed to register Queue handler");
|
2008-02-29 21:50:11 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2010-06-11 21:35:19 +00:00
|
|
|
#if __FreeBSD_version >= 800504
|
|
|
|
bus_describe_intr(dev, que->res, que->tag, "que %d", i);
|
|
|
|
#endif
|
2010-01-26 22:32:22 +00:00
|
|
|
que->msix = vector;
|
2009-06-24 17:41:29 +00:00
|
|
|
if (adapter->hw.mac.type == e1000_82575)
|
2010-01-26 22:32:22 +00:00
|
|
|
que->eims = E1000_EICR_TX_QUEUE0 << i;
|
2009-06-24 17:41:29 +00:00
|
|
|
else
|
2010-01-26 22:32:22 +00:00
|
|
|
que->eims = 1 << vector;
|
2014-06-30 04:34:59 +00:00
|
|
|
|
|
|
|
#ifdef RSS
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
2014-06-30 04:34:59 +00:00
|
|
|
* The queue ID is used as the RSS layer bucket ID.
|
|
|
|
* We look up the queue ID -> RSS CPU ID and select
|
|
|
|
* that.
|
|
|
|
*/
|
|
|
|
cpu_id = rss_getcpu(i % rss_getnumbuckets());
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* Bind the msix vector, and thus the
|
|
|
|
* rings to the corresponding cpu.
|
|
|
|
*
|
|
|
|
* This just happens to match the default RSS round-robin
|
|
|
|
* bucket -> queue -> CPU allocation.
|
|
|
|
*/
|
2012-05-10 00:00:28 +00:00
|
|
|
if (adapter->num_queues > 1) {
|
|
|
|
if (igb_last_bind_cpu < 0)
|
|
|
|
igb_last_bind_cpu = CPU_FIRST();
|
2014-06-30 04:34:59 +00:00
|
|
|
cpu_id = igb_last_bind_cpu;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (adapter->num_queues > 1) {
|
|
|
|
bus_bind_intr(dev, que->res, cpu_id);
|
|
|
|
#ifdef RSS
|
|
|
|
device_printf(dev,
|
|
|
|
"Bound queue %d to RSS bucket %d\n",
|
|
|
|
i, cpu_id);
|
|
|
|
#else
|
2012-05-10 00:00:28 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Bound queue %d to cpu %d\n",
|
2014-06-30 04:34:59 +00:00
|
|
|
i, cpu_id);
|
|
|
|
#endif
|
2012-05-10 00:00:28 +00:00
|
|
|
}
|
2014-06-30 04:34:59 +00:00
|
|
|
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2012-04-11 21:33:45 +00:00
|
|
|
TASK_INIT(&que->txr->txq_task, 0, igb_deferred_mq_start,
|
|
|
|
que->txr);
|
|
|
|
#endif
|
2010-01-26 22:32:22 +00:00
|
|
|
/* Make tasklet for deferred handling */
|
|
|
|
TASK_INIT(&que->que_task, 0, igb_handle_que, que);
|
2012-04-11 21:33:45 +00:00
|
|
|
que->tq = taskqueue_create("igb_que", M_NOWAIT,
|
2010-01-26 22:32:22 +00:00
|
|
|
taskqueue_thread_enqueue, &que->tq);
|
2014-06-30 04:34:59 +00:00
|
|
|
if (adapter->num_queues > 1) {
|
|
|
|
/*
|
|
|
|
* Only pin the taskqueue thread to a CPU if
|
|
|
|
* RSS is in use.
|
|
|
|
*
|
|
|
|
* This again just happens to match the default RSS
|
|
|
|
* round-robin bucket -> queue -> CPU allocation.
|
|
|
|
*/
|
|
|
|
#ifdef RSS
|
|
|
|
taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
|
|
|
|
cpu_id,
|
|
|
|
"%s que (bucket %d)",
|
|
|
|
device_get_nameunit(adapter->dev),
|
|
|
|
cpu_id);
|
|
|
|
#else
|
|
|
|
taskqueue_start_threads(&que->tq, 1, PI_NET,
|
|
|
|
"%s que (qid %d)",
|
|
|
|
device_get_nameunit(adapter->dev),
|
|
|
|
cpu_id);
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
|
|
|
|
device_get_nameunit(adapter->dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally update the last bound CPU id */
|
|
|
|
if (adapter->num_queues > 1)
|
|
|
|
igb_last_bind_cpu = CPU_NEXT(igb_last_bind_cpu);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* And Link */
|
2010-01-26 22:32:22 +00:00
|
|
|
rid = vector + 1;
|
2009-06-24 17:41:29 +00:00
|
|
|
adapter->res = bus_alloc_resource_any(dev,
|
|
|
|
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
|
|
|
|
if (adapter->res == NULL) {
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate bus resource: "
|
|
|
|
"MSIX Link Interrupt\n");
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
2009-06-24 17:41:29 +00:00
|
|
|
if ((error = bus_setup_intr(dev, adapter->res,
|
|
|
|
INTR_TYPE_NET | INTR_MPSAFE, NULL,
|
|
|
|
igb_msix_link, adapter, &adapter->tag)) != 0) {
|
2010-01-26 22:32:22 +00:00
|
|
|
device_printf(dev, "Failed to register Link handler");
|
2008-02-29 21:50:11 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2010-06-11 21:35:19 +00:00
|
|
|
#if __FreeBSD_version >= 800504
|
|
|
|
bus_describe_intr(dev, adapter->res, adapter->tag, "link");
|
|
|
|
#endif
|
2009-06-24 17:41:29 +00:00
|
|
|
adapter->linkvec = vector;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
2009-04-10 00:05:46 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
igb_configure_queues(struct adapter *adapter)
|
|
|
|
{
|
2010-01-26 22:32:22 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
struct igb_queue *que;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
u32 tmp, ivar = 0, newitr = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* First turn on RSS capability */
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->hw.mac.type != e1000_82575)
|
2008-07-30 21:56:53 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_GPIE,
|
2009-12-08 01:07:44 +00:00
|
|
|
E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
|
2008-07-30 21:56:53 +00:00
|
|
|
E1000_GPIE_PBA | E1000_GPIE_NSICR);
|
2009-12-08 01:07:44 +00:00
|
|
|
|
|
|
|
/* Turn on MSIX */
|
|
|
|
switch (adapter->hw.mac.type) {
|
|
|
|
case e1000_82580:
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
case e1000_i350:
|
2013-10-09 17:32:52 +00:00
|
|
|
case e1000_i354:
|
2012-07-05 20:26:57 +00:00
|
|
|
case e1000_i210:
|
|
|
|
case e1000_i211:
|
2010-06-30 17:26:47 +00:00
|
|
|
case e1000_vfadapt:
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
case e1000_vfadapt_i350:
|
2010-01-26 22:32:22 +00:00
|
|
|
/* RX entries */
|
2009-12-08 01:07:44 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
|
|
|
u32 index = i >> 1;
|
|
|
|
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
|
2010-01-26 22:32:22 +00:00
|
|
|
que = &adapter->queues[i];
|
2009-12-08 01:07:44 +00:00
|
|
|
if (i & 1) {
|
|
|
|
ivar &= 0xFF00FFFF;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= (que->msix | E1000_IVAR_VALID) << 16;
|
2009-12-08 01:07:44 +00:00
|
|
|
} else {
|
|
|
|
ivar &= 0xFFFFFF00;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= que->msix | E1000_IVAR_VALID;
|
2009-12-08 01:07:44 +00:00
|
|
|
}
|
|
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
|
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
/* TX entries */
|
2009-12-08 01:07:44 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
|
|
|
u32 index = i >> 1;
|
|
|
|
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
|
2010-01-26 22:32:22 +00:00
|
|
|
que = &adapter->queues[i];
|
2009-12-08 01:07:44 +00:00
|
|
|
if (i & 1) {
|
|
|
|
ivar &= 0x00FFFFFF;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= (que->msix | E1000_IVAR_VALID) << 24;
|
2009-12-08 01:07:44 +00:00
|
|
|
} else {
|
|
|
|
ivar &= 0xFFFF00FF;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= (que->msix | E1000_IVAR_VALID) << 8;
|
2009-12-08 01:07:44 +00:00
|
|
|
}
|
|
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
adapter->que_mask |= que->eims;
|
2009-12-08 01:07:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* And for the link interrupt */
|
|
|
|
ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
|
|
|
|
adapter->link_mask = 1 << adapter->linkvec;
|
|
|
|
E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
|
|
|
|
break;
|
|
|
|
case e1000_82576:
|
2010-01-26 22:32:22 +00:00
|
|
|
/* RX entries */
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
2008-07-30 21:56:53 +00:00
|
|
|
u32 index = i & 0x7; /* Each IVAR has two entries */
|
|
|
|
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
|
2010-01-26 22:32:22 +00:00
|
|
|
que = &adapter->queues[i];
|
2008-07-30 21:56:53 +00:00
|
|
|
if (i < 8) {
|
|
|
|
ivar &= 0xFFFFFF00;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= que->msix | E1000_IVAR_VALID;
|
2008-07-30 21:56:53 +00:00
|
|
|
} else {
|
|
|
|
ivar &= 0xFF00FFFF;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= (que->msix | E1000_IVAR_VALID) << 16;
|
2008-07-30 21:56:53 +00:00
|
|
|
}
|
|
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
adapter->que_mask |= que->eims;
|
2008-07-30 21:56:53 +00:00
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
/* TX entries */
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
2008-07-30 21:56:53 +00:00
|
|
|
u32 index = i & 0x7; /* Each IVAR has two entries */
|
|
|
|
ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
|
2010-01-26 22:32:22 +00:00
|
|
|
que = &adapter->queues[i];
|
2008-07-30 21:56:53 +00:00
|
|
|
if (i < 8) {
|
|
|
|
ivar &= 0xFFFF00FF;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= (que->msix | E1000_IVAR_VALID) << 8;
|
2008-07-30 21:56:53 +00:00
|
|
|
} else {
|
|
|
|
ivar &= 0x00FFFFFF;
|
2010-01-26 22:32:22 +00:00
|
|
|
ivar |= (que->msix | E1000_IVAR_VALID) << 24;
|
2008-07-30 21:56:53 +00:00
|
|
|
}
|
|
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
adapter->que_mask |= que->eims;
|
2008-07-30 21:56:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* And for the link interrupt */
|
|
|
|
ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
|
|
|
|
adapter->link_mask = 1 << adapter->linkvec;
|
|
|
|
E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
|
2009-12-08 01:07:44 +00:00
|
|
|
break;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
case e1000_82575:
|
|
|
|
/* enable MSI-X support*/
|
2008-02-29 21:50:11 +00:00
|
|
|
tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
|
|
|
|
tmp |= E1000_CTRL_EXT_PBA_CLR;
|
|
|
|
/* Auto-Mask interrupts upon ICR read. */
|
|
|
|
tmp |= E1000_CTRL_EXT_EIAME;
|
|
|
|
tmp |= E1000_CTRL_EXT_IRCA;
|
|
|
|
E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
/* Queues */
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
2010-01-26 22:32:22 +00:00
|
|
|
que = &adapter->queues[i];
|
|
|
|
tmp = E1000_EICR_RX_QUEUE0 << i;
|
|
|
|
tmp |= E1000_EICR_TX_QUEUE0 << i;
|
|
|
|
que->eims = tmp;
|
|
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
|
|
|
|
i, que->eims);
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
adapter->que_mask |= que->eims;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Link */
|
|
|
|
E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
|
|
|
|
E1000_EIMS_OTHER);
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
adapter->link_mask |= E1000_EIMS_OTHER;
|
2009-12-08 01:07:44 +00:00
|
|
|
default:
|
|
|
|
break;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2009-12-08 01:07:44 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
/* Set the starting interrupt rate */
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
if (igb_max_interrupt_rate > 0)
|
|
|
|
newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC;
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
if (hw->mac.type == e1000_82575)
|
|
|
|
newitr |= newitr << 16;
|
|
|
|
else
|
2010-04-09 21:18:46 +00:00
|
|
|
newitr |= E1000_EITR_CNT_IGNR;
|
2010-01-26 22:32:22 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
|
|
|
que = &adapter->queues[i];
|
|
|
|
E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr);
|
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_free_pci_resources(struct adapter *adapter)
|
|
|
|
{
|
2010-01-26 22:32:22 +00:00
|
|
|
struct igb_queue *que = adapter->queues;
|
2009-06-24 17:41:29 +00:00
|
|
|
device_t dev = adapter->dev;
|
|
|
|
int rid;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
|
|
|
** There is a slight possibility of a failure mode
|
|
|
|
** in attach that will result in entering this function
|
|
|
|
** before interrupt resources have been initialized, and
|
|
|
|
** in that case we do not want to execute the loops below
|
|
|
|
** We can detect this reliably by the state of the adapter
|
|
|
|
** res pointer.
|
|
|
|
*/
|
|
|
|
if (adapter->res == NULL)
|
|
|
|
goto mem;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/*
|
2010-01-26 22:32:22 +00:00
|
|
|
* First release all the interrupt resources:
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, que++) {
|
|
|
|
rid = que->msix + 1;
|
|
|
|
if (que->tag != NULL) {
|
|
|
|
bus_teardown_intr(dev, que->res, que->tag);
|
|
|
|
que->tag = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
if (que->res != NULL)
|
|
|
|
bus_release_resource(dev,
|
|
|
|
SYS_RES_IRQ, rid, que->res);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/* Clean the Legacy or Link interrupt last */
|
|
|
|
if (adapter->linkvec) /* we are doing MSIX */
|
|
|
|
rid = adapter->linkvec + 1;
|
|
|
|
else
|
|
|
|
(adapter->msix != 0) ? (rid = 1):(rid = 0);
|
|
|
|
|
2012-04-11 21:33:45 +00:00
|
|
|
que = adapter->queues;
|
2009-06-24 17:41:29 +00:00
|
|
|
if (adapter->tag != NULL) {
|
2012-04-11 21:33:45 +00:00
|
|
|
taskqueue_drain(que->tq, &adapter->link_task);
|
2009-06-24 17:41:29 +00:00
|
|
|
bus_teardown_intr(dev, adapter->res, adapter->tag);
|
|
|
|
adapter->tag = NULL;
|
|
|
|
}
|
|
|
|
if (adapter->res != NULL)
|
|
|
|
bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
|
|
|
|
|
2012-04-11 21:33:45 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, que++) {
|
|
|
|
if (que->tq != NULL) {
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2012-04-11 21:33:45 +00:00
|
|
|
taskqueue_drain(que->tq, &que->txr->txq_task);
|
|
|
|
#endif
|
|
|
|
taskqueue_drain(que->tq, &que->que_task);
|
|
|
|
taskqueue_free(que->tq);
|
|
|
|
}
|
|
|
|
}
|
2009-06-24 17:41:29 +00:00
|
|
|
mem:
|
2008-02-29 21:50:11 +00:00
|
|
|
if (adapter->msix)
|
|
|
|
pci_release_msi(dev);
|
|
|
|
|
|
|
|
if (adapter->msix_mem != NULL)
|
|
|
|
bus_release_resource(dev, SYS_RES_MEMORY,
|
2013-10-09 17:32:52 +00:00
|
|
|
adapter->memrid, adapter->msix_mem);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
if (adapter->pci_mem != NULL)
|
|
|
|
bus_release_resource(dev, SYS_RES_MEMORY,
|
|
|
|
PCIR_BAR(0), adapter->pci_mem);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup Either MSI/X or MSI
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
igb_setup_msix(struct adapter *adapter)
|
|
|
|
{
|
2013-10-09 17:32:52 +00:00
|
|
|
device_t dev = adapter->dev;
|
|
|
|
int bar, want, queues, msgs, maxqueues;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* tuneable override */
|
|
|
|
if (igb_enable_msix == 0)
|
|
|
|
goto msi;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* First try MSI/X */
|
2013-08-06 21:01:38 +00:00
|
|
|
msgs = pci_msix_count(dev);
|
|
|
|
if (msgs == 0)
|
|
|
|
goto msi;
|
2013-10-09 17:32:52 +00:00
|
|
|
/*
|
|
|
|
** Some new devices, as with ixgbe, now may
|
|
|
|
** use a different BAR, so we need to keep
|
|
|
|
** track of which is used.
|
|
|
|
*/
|
|
|
|
adapter->memrid = PCIR_BAR(IGB_MSIX_BAR);
|
|
|
|
bar = pci_read_config(dev, adapter->memrid, 4);
|
|
|
|
if (bar == 0) /* use next bar */
|
|
|
|
adapter->memrid += 4;
|
2008-02-29 21:50:11 +00:00
|
|
|
adapter->msix_mem = bus_alloc_resource_any(dev,
|
2013-10-09 17:32:52 +00:00
|
|
|
SYS_RES_MEMORY, &adapter->memrid, RF_ACTIVE);
|
2013-08-06 21:01:38 +00:00
|
|
|
if (adapter->msix_mem == NULL) {
|
2008-02-29 21:50:11 +00:00
|
|
|
/* May not be enabled */
|
|
|
|
device_printf(adapter->dev,
|
|
|
|
"Unable to map MSIX table \n");
|
|
|
|
goto msi;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Figure out a reasonable auto config value */
|
2010-01-26 22:32:22 +00:00
|
|
|
queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
|
|
|
|
|
2014-06-30 04:34:59 +00:00
|
|
|
#ifdef RSS
|
|
|
|
/* If we're doing RSS, clamp at the number of RSS buckets */
|
|
|
|
if (queues > rss_getnumbuckets())
|
|
|
|
queues = rss_getnumbuckets();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* Manual override */
|
|
|
|
if (igb_num_queues != 0)
|
|
|
|
queues = igb_num_queues;
|
2010-01-26 22:32:22 +00:00
|
|
|
|
2012-07-05 20:26:57 +00:00
|
|
|
/* Sanity check based on HW */
|
|
|
|
switch (adapter->hw.mac.type) {
|
|
|
|
case e1000_82575:
|
|
|
|
maxqueues = 4;
|
|
|
|
break;
|
|
|
|
case e1000_82576:
|
|
|
|
case e1000_82580:
|
|
|
|
case e1000_i350:
|
2013-10-09 17:32:52 +00:00
|
|
|
case e1000_i354:
|
2012-07-05 20:26:57 +00:00
|
|
|
maxqueues = 8;
|
|
|
|
break;
|
|
|
|
case e1000_i210:
|
|
|
|
maxqueues = 4;
|
|
|
|
break;
|
|
|
|
case e1000_i211:
|
|
|
|
maxqueues = 2;
|
|
|
|
break;
|
|
|
|
default: /* VF interfaces */
|
|
|
|
maxqueues = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (queues > maxqueues)
|
|
|
|
queues = maxqueues;
|
2010-06-30 17:26:47 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Manual override */
|
|
|
|
if (igb_num_queues != 0)
|
|
|
|
queues = igb_num_queues;
|
2013-10-05 19:17:56 +00:00
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
2010-01-26 22:32:22 +00:00
|
|
|
** One vector (RX/TX pair) per queue
|
2009-06-24 17:41:29 +00:00
|
|
|
** plus an additional for Link interrupt
|
|
|
|
*/
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
want = queues + 1;
|
2008-02-29 21:50:11 +00:00
|
|
|
if (msgs >= want)
|
|
|
|
msgs = want;
|
|
|
|
else {
|
|
|
|
device_printf(adapter->dev,
|
|
|
|
"MSIX Configuration Problem, "
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
"%d vectors configured, but %d queues wanted!\n",
|
2008-02-29 21:50:11 +00:00
|
|
|
msgs, want);
|
2013-08-06 21:01:38 +00:00
|
|
|
goto msi;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2013-08-12 22:54:38 +00:00
|
|
|
if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(adapter->dev,
|
|
|
|
"Using MSIX interrupts with %d vectors\n", msgs);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
adapter->num_queues = queues;
|
2008-02-29 21:50:11 +00:00
|
|
|
return (msgs);
|
|
|
|
}
|
2013-08-12 22:54:38 +00:00
|
|
|
/*
|
|
|
|
** If MSIX alloc failed or provided us with
|
|
|
|
** less than needed, free and fall through to MSI
|
|
|
|
*/
|
|
|
|
pci_release_msi(dev);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
msi:
|
2013-08-06 21:01:38 +00:00
|
|
|
if (adapter->msix_mem != NULL) {
|
|
|
|
bus_release_resource(dev, SYS_RES_MEMORY,
|
|
|
|
PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
|
|
|
|
adapter->msix_mem = NULL;
|
|
|
|
}
|
|
|
|
msgs = 1;
|
|
|
|
if (pci_alloc_msi(dev, &msgs) == 0) {
|
2013-08-12 22:54:38 +00:00
|
|
|
device_printf(adapter->dev," Using an MSI interrupt\n");
|
2012-03-01 22:13:10 +00:00
|
|
|
return (msgs);
|
|
|
|
}
|
2013-08-12 22:54:38 +00:00
|
|
|
device_printf(adapter->dev," Using a Legacy interrupt\n");
|
2012-03-01 22:13:10 +00:00
|
|
|
return (0);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize the DMA Coalescing feature
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_init_dmac(struct adapter *adapter, u32 pba)
|
|
|
|
{
|
|
|
|
device_t dev = adapter->dev;
|
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
u32 dmac, reg = ~E1000_DMACR_DMAC_EN;
|
|
|
|
u16 hwm;
|
|
|
|
|
|
|
|
if (hw->mac.type == e1000_i211)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (hw->mac.type > e1000_82580) {
|
|
|
|
|
|
|
|
if (adapter->dmac == 0) { /* Disabling it */
|
|
|
|
E1000_WRITE_REG(hw, E1000_DMACR, reg);
|
|
|
|
return;
|
|
|
|
} else
|
|
|
|
device_printf(dev, "DMA Coalescing enabled\n");
|
|
|
|
|
|
|
|
/* Set starting threshold */
|
|
|
|
E1000_WRITE_REG(hw, E1000_DMCTXTH, 0);
|
|
|
|
|
|
|
|
hwm = 64 * pba - adapter->max_frame_size / 16;
|
|
|
|
if (hwm < 64 * (pba - 6))
|
|
|
|
hwm = 64 * (pba - 6);
|
|
|
|
reg = E1000_READ_REG(hw, E1000_FCRTC);
|
|
|
|
reg &= ~E1000_FCRTC_RTH_COAL_MASK;
|
|
|
|
reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
|
|
|
|
& E1000_FCRTC_RTH_COAL_MASK);
|
|
|
|
E1000_WRITE_REG(hw, E1000_FCRTC, reg);
|
|
|
|
|
|
|
|
|
|
|
|
dmac = pba - adapter->max_frame_size / 512;
|
|
|
|
if (dmac < pba - 10)
|
|
|
|
dmac = pba - 10;
|
|
|
|
reg = E1000_READ_REG(hw, E1000_DMACR);
|
|
|
|
reg &= ~E1000_DMACR_DMACTHR_MASK;
|
|
|
|
reg = ((dmac << E1000_DMACR_DMACTHR_SHIFT)
|
|
|
|
& E1000_DMACR_DMACTHR_MASK);
|
|
|
|
|
|
|
|
/* transition to L0x or L1 if available..*/
|
|
|
|
reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
|
|
|
|
|
|
|
|
/* Check if status is 2.5Gb backplane connection
|
|
|
|
* before configuration of watchdog timer, which is
|
|
|
|
* in msec values in 12.8usec intervals
|
|
|
|
* watchdog timer= msec values in 32usec intervals
|
|
|
|
* for non 2.5Gb connection
|
|
|
|
*/
|
|
|
|
if (hw->mac.type == e1000_i354) {
|
|
|
|
int status = E1000_READ_REG(hw, E1000_STATUS);
|
|
|
|
if ((status & E1000_STATUS_2P5_SKU) &&
|
|
|
|
(!(status & E1000_STATUS_2P5_SKU_OVER)))
|
|
|
|
reg |= ((adapter->dmac * 5) >> 6);
|
|
|
|
else
|
|
|
|
reg |= (adapter->dmac >> 5);
|
|
|
|
} else {
|
|
|
|
reg |= (adapter->dmac >> 5);
|
|
|
|
}
|
|
|
|
|
|
|
|
E1000_WRITE_REG(hw, E1000_DMACR, reg);
|
|
|
|
|
|
|
|
#ifdef I210_OBFF_SUPPORT
|
|
|
|
/*
|
|
|
|
* Set the OBFF Rx threshold to DMA Coalescing Rx
|
|
|
|
* threshold - 2KB and enable the feature in the
|
|
|
|
* hardware for I210.
|
|
|
|
*/
|
|
|
|
if (hw->mac.type == e1000_i210) {
|
|
|
|
int obff = dmac - 2;
|
|
|
|
reg = E1000_READ_REG(hw, E1000_DOBFFCTL);
|
|
|
|
reg &= ~E1000_DOBFFCTL_OBFFTHR_MASK;
|
|
|
|
reg |= (obff & E1000_DOBFFCTL_OBFFTHR_MASK)
|
|
|
|
| E1000_DOBFFCTL_EXIT_ACT_MASK;
|
|
|
|
E1000_WRITE_REG(hw, E1000_DOBFFCTL, reg);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
|
|
|
|
|
|
|
|
/* Set the interval before transition */
|
|
|
|
reg = E1000_READ_REG(hw, E1000_DMCTLX);
|
|
|
|
if (hw->mac.type == e1000_i350)
|
|
|
|
reg |= IGB_DMCTLX_DCFLUSH_DIS;
|
|
|
|
/*
|
|
|
|
** in 2.5Gb connection, TTLX unit is 0.4 usec
|
|
|
|
** which is 0x4*2 = 0xA. But delay is still 4 usec
|
|
|
|
*/
|
|
|
|
if (hw->mac.type == e1000_i354) {
|
|
|
|
int status = E1000_READ_REG(hw, E1000_STATUS);
|
|
|
|
if ((status & E1000_STATUS_2P5_SKU) &&
|
|
|
|
(!(status & E1000_STATUS_2P5_SKU_OVER)))
|
|
|
|
reg |= 0xA;
|
|
|
|
else
|
|
|
|
reg |= 0x4;
|
|
|
|
} else {
|
|
|
|
reg |= 0x4;
|
|
|
|
}
|
|
|
|
|
|
|
|
E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
|
|
|
|
|
|
|
|
/* free space in tx packet buffer to wake from DMA coal */
|
|
|
|
E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_TXPBSIZE -
|
|
|
|
(2 * adapter->max_frame_size)) >> 6);
|
|
|
|
|
|
|
|
/* make low power state decision controlled by DMA coal */
|
|
|
|
reg = E1000_READ_REG(hw, E1000_PCIEMISC);
|
|
|
|
reg &= ~E1000_PCIEMISC_LX_DECISION;
|
|
|
|
E1000_WRITE_REG(hw, E1000_PCIEMISC, reg);
|
|
|
|
|
|
|
|
} else if (hw->mac.type == e1000_82580) {
|
|
|
|
u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
|
|
|
|
E1000_WRITE_REG(hw, E1000_PCIEMISC,
|
|
|
|
reg & ~E1000_PCIEMISC_LX_DECISION);
|
|
|
|
E1000_WRITE_REG(hw, E1000_DMACR, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
2009-12-08 01:07:44 +00:00
|
|
|
* Set up an fresh starting state
|
2008-02-29 21:50:11 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
2009-12-08 01:07:44 +00:00
|
|
|
static void
|
|
|
|
igb_reset(struct adapter *adapter)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
|
|
|
device_t dev = adapter->dev;
|
2009-12-08 01:07:44 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
struct e1000_fc_info *fc = &hw->fc;
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
u32 pba = 0;
|
|
|
|
u16 hwm;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
INIT_DEBUGOUT("igb_reset: begin");
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* Let the firmware know the OS is in control */
|
|
|
|
igb_get_hw_control(adapter);
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/*
|
|
|
|
* Packet Buffer Allocation (PBA)
|
|
|
|
* Writing PBA sets the receive portion of the buffer
|
|
|
|
* the remainder is used for the transmit buffer.
|
|
|
|
*/
|
|
|
|
switch (hw->mac.type) {
|
|
|
|
case e1000_82575:
|
|
|
|
pba = E1000_PBA_32K;
|
|
|
|
break;
|
|
|
|
case e1000_82576:
|
2010-06-30 17:26:47 +00:00
|
|
|
case e1000_vfadapt:
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
pba = E1000_READ_REG(hw, E1000_RXPBS);
|
|
|
|
pba &= E1000_RXPBS_SIZE_MASK_82576;
|
2009-12-08 01:07:44 +00:00
|
|
|
break;
|
|
|
|
case e1000_82580:
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
case e1000_i350:
|
2013-10-09 17:32:52 +00:00
|
|
|
case e1000_i354:
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
case e1000_vfadapt_i350:
|
|
|
|
pba = E1000_READ_REG(hw, E1000_RXPBS);
|
|
|
|
pba = e1000_rxpbs_adjust_82580(pba);
|
|
|
|
break;
|
2012-07-05 20:26:57 +00:00
|
|
|
case e1000_i210:
|
|
|
|
case e1000_i211:
|
|
|
|
pba = E1000_PBA_34K;
|
2009-12-08 01:07:44 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Special needs in case of Jumbo frames */
|
|
|
|
if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
|
|
|
|
u32 tx_space, min_tx, min_rx;
|
|
|
|
pba = E1000_READ_REG(hw, E1000_PBA);
|
|
|
|
tx_space = pba >> 16;
|
|
|
|
pba &= 0xffff;
|
|
|
|
min_tx = (adapter->max_frame_size +
|
|
|
|
sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
|
|
|
|
min_tx = roundup2(min_tx, 1024);
|
|
|
|
min_tx >>= 10;
|
|
|
|
min_rx = adapter->max_frame_size;
|
|
|
|
min_rx = roundup2(min_rx, 1024);
|
|
|
|
min_rx >>= 10;
|
|
|
|
if (tx_space < min_tx &&
|
|
|
|
((min_tx - tx_space) < pba)) {
|
|
|
|
pba = pba - (min_tx - tx_space);
|
|
|
|
/*
|
|
|
|
* if short on rx space, rx wins
|
|
|
|
* and must trump tx adjustment
|
|
|
|
*/
|
|
|
|
if (pba < min_rx)
|
|
|
|
pba = min_rx;
|
|
|
|
}
|
|
|
|
E1000_WRITE_REG(hw, E1000_PBA, pba);
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*
|
|
|
|
* These parameters control the automatic generation (Tx) and
|
|
|
|
* response (Rx) to Ethernet PAUSE frames.
|
|
|
|
* - High water mark should allow for at least two frames to be
|
|
|
|
* received after sending an XOFF.
|
|
|
|
* - Low water mark works best when it is very near the high water mark.
|
|
|
|
* This allows the receiver to restart by sending XON when it has
|
2009-12-08 01:07:44 +00:00
|
|
|
* drained a bit.
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
2009-12-08 01:07:44 +00:00
|
|
|
hwm = min(((pba << 10) * 9 / 10),
|
|
|
|
((pba << 10) - 2 * adapter->max_frame_size));
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
if (hw->mac.type < e1000_82576) {
|
|
|
|
fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
|
|
|
|
fc->low_water = fc->high_water - 8;
|
|
|
|
} else {
|
|
|
|
fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
|
|
|
|
fc->low_water = fc->high_water - 16;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
fc->pause_time = IGB_FC_PAUSE_TIME;
|
|
|
|
fc->send_xon = TRUE;
|
2011-12-10 07:08:52 +00:00
|
|
|
if (adapter->fc)
|
|
|
|
fc->requested_mode = adapter->fc;
|
2011-06-20 22:59:29 +00:00
|
|
|
else
|
2011-12-10 07:08:52 +00:00
|
|
|
fc->requested_mode = e1000_fc_default;
|
2008-07-30 21:56:53 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* Issue a global reset */
|
|
|
|
e1000_reset_hw(hw);
|
|
|
|
E1000_WRITE_REG(hw, E1000_WUC, 0);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Reset for AutoMediaDetect */
|
|
|
|
if (adapter->flags & IGB_MEDIA_RESET) {
|
|
|
|
e1000_setup_init_funcs(hw, TRUE);
|
|
|
|
e1000_get_bus_info(hw);
|
|
|
|
adapter->flags &= ~IGB_MEDIA_RESET;
|
|
|
|
}
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
if (e1000_init_hw(hw) < 0)
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(dev, "Hardware Initialization Failed\n");
|
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
/* Setup DMA Coalescing */
|
2013-10-09 17:32:52 +00:00
|
|
|
igb_init_dmac(adapter, pba);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
|
|
|
|
e1000_get_phy_info(hw);
|
|
|
|
e1000_check_for_link(hw);
|
|
|
|
return;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Setup networking device structure and register an interface.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
2010-08-28 00:09:19 +00:00
|
|
|
static int
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_setup_interface(device_t dev, struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_setup_interface: begin");
|
|
|
|
|
|
|
|
ifp = adapter->ifp = if_alloc(IFT_ETHER);
|
2010-08-28 00:09:19 +00:00
|
|
|
if (ifp == NULL) {
|
|
|
|
device_printf(dev, "can not allocate ifnet structure\n");
|
|
|
|
return (-1);
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
|
|
|
|
ifp->if_init = igb_init;
|
|
|
|
ifp->if_softc = adapter;
|
|
|
|
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
|
|
|
|
ifp->if_ioctl = igb_ioctl;
|
2014-09-19 11:49:41 +00:00
|
|
|
ifp->if_get_counter = igb_get_counter;
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2009-06-24 17:41:29 +00:00
|
|
|
ifp->if_transmit = igb_mq_start;
|
|
|
|
ifp->if_qflush = igb_qflush;
|
2012-06-01 15:52:41 +00:00
|
|
|
#else
|
|
|
|
ifp->if_start = igb_start;
|
2010-01-26 22:32:22 +00:00
|
|
|
IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
|
|
|
|
ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
|
2008-02-29 21:50:11 +00:00
|
|
|
IFQ_SET_READY(&ifp->if_snd);
|
2012-06-01 15:52:41 +00:00
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
ether_ifattach(ifp, adapter->hw.mac.addr);
|
|
|
|
|
|
|
|
ifp->if_capabilities = ifp->if_capenable = 0;
|
|
|
|
|
2010-07-09 17:11:29 +00:00
|
|
|
ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
|
2013-10-09 17:32:52 +00:00
|
|
|
ifp->if_capabilities |= IFCAP_TSO;
|
2009-04-10 00:05:46 +00:00
|
|
|
ifp->if_capabilities |= IFCAP_JUMBO_MTU;
|
2008-02-29 21:50:11 +00:00
|
|
|
ifp->if_capenable = ifp->if_capabilities;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
|
|
|
|
/* Don't enable LRO by default */
|
|
|
|
ifp->if_capabilities |= IFCAP_LRO;
|
|
|
|
|
2010-02-01 19:28:43 +00:00
|
|
|
#ifdef DEVICE_POLLING
|
|
|
|
ifp->if_capabilities |= IFCAP_POLLING;
|
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/*
|
2010-04-08 00:50:43 +00:00
|
|
|
* Tell the upper layer(s) we
|
|
|
|
* support full VLAN capability.
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
2014-08-30 19:55:54 +00:00
|
|
|
ifp->if_hdrlen = sizeof(struct ether_vlan_header);
|
2011-06-20 22:59:29 +00:00
|
|
|
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
|
|
|
|
| IFCAP_VLAN_HWTSO
|
|
|
|
| IFCAP_VLAN_MTU;
|
|
|
|
ifp->if_capenable |= IFCAP_VLAN_HWTAGGING
|
|
|
|
| IFCAP_VLAN_HWTSO
|
|
|
|
| IFCAP_VLAN_MTU;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-04-08 00:50:43 +00:00
|
|
|
/*
|
2011-06-20 22:59:29 +00:00
|
|
|
** Don't turn this on by default, if vlans are
|
2010-04-08 00:50:43 +00:00
|
|
|
** created on another pseudo device (eg. lagg)
|
|
|
|
** then vlan events are not passed thru, breaking
|
|
|
|
** operation, but with HW FILTER off it works. If
|
2011-06-20 22:59:29 +00:00
|
|
|
** using vlans directly on the igb driver you can
|
2010-04-08 00:50:43 +00:00
|
|
|
** enable this and get full hardware tag filtering.
|
|
|
|
*/
|
|
|
|
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*
|
|
|
|
* Specify the media types supported by this adapter and register
|
|
|
|
* callbacks to update media and link information
|
|
|
|
*/
|
|
|
|
ifmedia_init(&adapter->media, IFM_IMASK,
|
|
|
|
igb_media_change, igb_media_status);
|
|
|
|
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
|
|
|
|
(adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
|
|
|
|
ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
|
|
|
|
0, NULL);
|
|
|
|
ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
|
|
|
|
} else {
|
|
|
|
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
|
|
|
|
ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
|
|
|
|
0, NULL);
|
|
|
|
ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
|
|
|
|
0, NULL);
|
|
|
|
ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
|
|
|
|
0, NULL);
|
|
|
|
if (adapter->hw.phy.type != e1000_phy_ife) {
|
|
|
|
ifmedia_add(&adapter->media,
|
|
|
|
IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
|
|
|
|
ifmedia_add(&adapter->media,
|
|
|
|
IFM_ETHER | IFM_1000_T, 0, NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
|
|
|
|
ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
|
2010-08-28 00:09:19 +00:00
|
|
|
return (0);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Manage DMA'able memory.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
|
|
|
|
{
|
|
|
|
if (error)
|
|
|
|
return;
|
|
|
|
*(bus_addr_t *) arg = segs[0].ds_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_dma_malloc(struct adapter *adapter, bus_size_t size,
|
|
|
|
struct igb_dma_alloc *dma, int mapflags)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_DBA_ALIGN, 0, /* alignment, bounds */
|
2008-02-29 21:50:11 +00:00
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
size, /* maxsize */
|
|
|
|
1, /* nsegments */
|
|
|
|
size, /* maxsegsize */
|
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockarg */
|
|
|
|
&dma->dma_tag);
|
|
|
|
if (error) {
|
|
|
|
device_printf(adapter->dev,
|
|
|
|
"%s: bus_dma_tag_create failed: %d\n",
|
|
|
|
__func__, error);
|
|
|
|
goto fail_0;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
|
2011-04-05 21:55:43 +00:00
|
|
|
BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
|
2008-02-29 21:50:11 +00:00
|
|
|
if (error) {
|
|
|
|
device_printf(adapter->dev,
|
|
|
|
"%s: bus_dmamem_alloc(%ju) failed: %d\n",
|
|
|
|
__func__, (uintmax_t)size, error);
|
|
|
|
goto fail_2;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma->dma_paddr = 0;
|
|
|
|
error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
|
|
|
|
size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
|
|
|
|
if (error || dma->dma_paddr == 0) {
|
|
|
|
device_printf(adapter->dev,
|
|
|
|
"%s: bus_dmamap_load failed: %d\n",
|
|
|
|
__func__, error);
|
|
|
|
goto fail_3;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail_3:
|
|
|
|
bus_dmamap_unload(dma->dma_tag, dma->dma_map);
|
|
|
|
fail_2:
|
|
|
|
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
|
|
|
|
bus_dma_tag_destroy(dma->dma_tag);
|
|
|
|
fail_0:
|
|
|
|
dma->dma_tag = NULL;
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma)
|
|
|
|
{
|
|
|
|
if (dma->dma_tag == NULL)
|
|
|
|
return;
|
2014-06-12 11:15:19 +00:00
|
|
|
if (dma->dma_paddr != 0) {
|
2008-02-29 21:50:11 +00:00
|
|
|
bus_dmamap_sync(dma->dma_tag, dma->dma_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(dma->dma_tag, dma->dma_map);
|
2014-06-12 11:15:19 +00:00
|
|
|
dma->dma_paddr = 0;
|
|
|
|
}
|
|
|
|
if (dma->dma_vaddr != NULL) {
|
2008-02-29 21:50:11 +00:00
|
|
|
bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
|
2014-06-12 11:15:19 +00:00
|
|
|
dma->dma_vaddr = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
bus_dma_tag_destroy(dma->dma_tag);
|
|
|
|
dma->dma_tag = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Allocate memory for the transmit and receive rings, and then
|
|
|
|
* the descriptors associated with each, called only once at attach.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_allocate_queues(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
device_t dev = adapter->dev;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct igb_queue *que = NULL;
|
|
|
|
struct tx_ring *txr = NULL;
|
|
|
|
struct rx_ring *rxr = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
int rsize, tsize, error = E1000_SUCCESS;
|
|
|
|
int txconf = 0, rxconf = 0;
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
/* First allocate the top level queue structs */
|
|
|
|
if (!(adapter->queues =
|
|
|
|
(struct igb_queue *) malloc(sizeof(struct igb_queue) *
|
|
|
|
adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
|
|
|
|
device_printf(dev, "Unable to allocate queue memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Next allocate the TX ring struct memory */
|
2008-02-29 21:50:11 +00:00
|
|
|
if (!(adapter->tx_rings =
|
|
|
|
(struct tx_ring *) malloc(sizeof(struct tx_ring) *
|
2009-06-24 17:41:29 +00:00
|
|
|
adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(dev, "Unable to allocate TX ring memory\n");
|
|
|
|
error = ENOMEM;
|
2010-01-26 22:32:22 +00:00
|
|
|
goto tx_fail;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
/* Now allocate the RX */
|
2008-02-29 21:50:11 +00:00
|
|
|
if (!(adapter->rx_rings =
|
|
|
|
(struct rx_ring *) malloc(sizeof(struct rx_ring) *
|
2009-06-24 17:41:29 +00:00
|
|
|
adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
|
2008-02-29 21:50:11 +00:00
|
|
|
device_printf(dev, "Unable to allocate RX ring memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto rx_fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
tsize = roundup2(adapter->num_tx_desc *
|
|
|
|
sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN);
|
|
|
|
/*
|
|
|
|
* Now set up the TX queues, txconf is needed to handle the
|
|
|
|
* possibility that things fail midcourse and we need to
|
|
|
|
* undo memory gracefully
|
|
|
|
*/
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txconf++) {
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Set up some basics */
|
|
|
|
txr = &adapter->tx_rings[i];
|
|
|
|
txr->adapter = adapter;
|
|
|
|
txr->me = i;
|
2013-10-09 17:32:52 +00:00
|
|
|
txr->num_desc = adapter->num_tx_desc;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* Initialize the TX lock */
|
2008-08-28 22:28:28 +00:00
|
|
|
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
|
2008-02-29 21:50:11 +00:00
|
|
|
device_get_nameunit(dev), txr->me);
|
2008-08-28 22:28:28 +00:00
|
|
|
mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
if (igb_dma_malloc(adapter, tsize,
|
|
|
|
&txr->txdma, BUS_DMA_NOWAIT)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate TX Descriptor memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
2013-10-09 17:32:52 +00:00
|
|
|
txr->tx_base = (union e1000_adv_tx_desc *)txr->txdma.dma_vaddr;
|
2008-02-29 21:50:11 +00:00
|
|
|
bzero((void *)txr->tx_base, tsize);
|
|
|
|
|
|
|
|
/* Now allocate transmit buffers for the ring */
|
|
|
|
if (igb_allocate_transmit_buffers(txr)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Critical Failure setting up transmit buffers\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto err_tx_desc;
|
|
|
|
}
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2009-06-24 17:41:29 +00:00
|
|
|
/* Allocate a buf ring */
|
2013-02-07 15:20:54 +00:00
|
|
|
txr->br = buf_ring_alloc(igb_buf_ring_size, M_DEVBUF,
|
2009-06-24 17:41:29 +00:00
|
|
|
M_WAITOK, &txr->tx_mtx);
|
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Next the RX queues...
|
|
|
|
*/
|
|
|
|
rsize = roundup2(adapter->num_rx_desc *
|
|
|
|
sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
|
2008-02-29 21:50:11 +00:00
|
|
|
rxr = &adapter->rx_rings[i];
|
|
|
|
rxr->adapter = adapter;
|
|
|
|
rxr->me = i;
|
|
|
|
|
|
|
|
/* Initialize the RX lock */
|
2008-08-28 22:28:28 +00:00
|
|
|
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
|
2008-02-29 21:50:11 +00:00
|
|
|
device_get_nameunit(dev), txr->me);
|
2008-08-28 22:28:28 +00:00
|
|
|
mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
if (igb_dma_malloc(adapter, rsize,
|
|
|
|
&rxr->rxdma, BUS_DMA_NOWAIT)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Unable to allocate RxDescriptor memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
|
|
|
rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr;
|
|
|
|
bzero((void *)rxr->rx_base, rsize);
|
|
|
|
|
|
|
|
/* Allocate receive buffers for the ring*/
|
|
|
|
if (igb_allocate_receive_buffers(rxr)) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Critical Failure setting up receive buffers\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto err_rx_desc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
/*
|
|
|
|
** Finally set up the queue holding structs
|
|
|
|
*/
|
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
|
|
|
que = &adapter->queues[i];
|
|
|
|
que->adapter = adapter;
|
|
|
|
que->txr = &adapter->tx_rings[i];
|
|
|
|
que->rxr = &adapter->rx_rings[i];
|
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
err_rx_desc:
|
|
|
|
for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
|
|
|
|
igb_dma_free(adapter, &rxr->rxdma);
|
|
|
|
err_tx_desc:
|
|
|
|
for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
|
|
|
|
igb_dma_free(adapter, &txr->txdma);
|
|
|
|
free(adapter->rx_rings, M_DEVBUF);
|
|
|
|
rx_fail:
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2010-01-26 22:32:22 +00:00
|
|
|
buf_ring_free(txr->br, M_DEVBUF);
|
2010-05-14 22:18:34 +00:00
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
free(adapter->tx_rings, M_DEVBUF);
|
2010-01-26 22:32:22 +00:00
|
|
|
tx_fail:
|
|
|
|
free(adapter->queues, M_DEVBUF);
|
2008-02-29 21:50:11 +00:00
|
|
|
fail:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Allocate memory for tx_buffer structures. The tx_buffer stores all
|
|
|
|
* the information needed to transmit a packet on the wire. This is
|
|
|
|
* called only once at attach, setup is done every reset.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_allocate_transmit_buffers(struct tx_ring *txr)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = txr->adapter;
|
|
|
|
device_t dev = adapter->dev;
|
2013-10-09 17:32:52 +00:00
|
|
|
struct igb_tx_buf *txbuf;
|
2008-02-29 21:50:11 +00:00
|
|
|
int error, i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup DMA descriptor areas.
|
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
|
2009-11-11 19:13:40 +00:00
|
|
|
1, 0, /* alignment, bounds */
|
2008-02-29 21:50:11 +00:00
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
|
|
|
IGB_TSO_SIZE, /* maxsize */
|
|
|
|
IGB_MAX_SCATTER, /* nsegments */
|
2010-01-26 22:32:22 +00:00
|
|
|
PAGE_SIZE, /* maxsegsize */
|
2008-02-29 21:50:11 +00:00
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockfuncarg */
|
|
|
|
&txr->txtag))) {
|
|
|
|
device_printf(dev,"Unable to allocate TX DMA tag\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(txr->tx_buffers =
|
2013-10-09 17:32:52 +00:00
|
|
|
(struct igb_tx_buf *) malloc(sizeof(struct igb_tx_buf) *
|
2008-02-29 21:50:11 +00:00
|
|
|
adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
|
|
|
|
device_printf(dev, "Unable to allocate tx_buffer memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create the descriptor buffer dma maps */
|
|
|
|
txbuf = txr->tx_buffers;
|
|
|
|
for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
|
|
|
|
error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
|
|
|
|
if (error != 0) {
|
|
|
|
device_printf(dev, "Unable to create TX DMA map\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
/* We free all, it handles case where we are in the middle */
|
|
|
|
igb_free_transmit_structures(adapter);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize a transmit ring.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_setup_transmit_ring(struct tx_ring *txr)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = txr->adapter;
|
2013-10-09 17:32:52 +00:00
|
|
|
struct igb_tx_buf *txbuf;
|
2008-02-29 21:50:11 +00:00
|
|
|
int i;
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
struct netmap_adapter *na = NA(adapter->ifp);
|
|
|
|
struct netmap_slot *slot;
|
|
|
|
#endif /* DEV_NETMAP */
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/* Clear the old descriptor contents */
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_TX_LOCK(txr);
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
slot = netmap_reset(na, NR_TX, txr->me, 0);
|
|
|
|
#endif /* DEV_NETMAP */
|
2008-02-29 21:50:11 +00:00
|
|
|
bzero((void *)txr->tx_base,
|
|
|
|
(sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc);
|
|
|
|
/* Reset indices */
|
|
|
|
txr->next_avail_desc = 0;
|
|
|
|
txr->next_to_clean = 0;
|
|
|
|
|
|
|
|
/* Free any existing tx buffers. */
|
|
|
|
txbuf = txr->tx_buffers;
|
|
|
|
for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
|
|
|
|
if (txbuf->m_head != NULL) {
|
|
|
|
bus_dmamap_sync(txr->txtag, txbuf->map,
|
|
|
|
BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(txr->txtag, txbuf->map);
|
|
|
|
m_freem(txbuf->m_head);
|
|
|
|
txbuf->m_head = NULL;
|
|
|
|
}
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
if (slot) {
|
2012-02-27 19:05:01 +00:00
|
|
|
int si = netmap_idx_n2k(&na->tx_rings[txr->me], i);
|
2012-02-15 23:13:29 +00:00
|
|
|
/* no need to set the address */
|
Update to the current version of netmap.
Mostly bugfixes or features developed in the past 6 months,
so this is a 10.1 candidate.
Basically no user API changes (some bugfixes in sys/net/netmap_user.h).
In detail:
1. netmap support for virtio-net, including in netmap mode.
Under bhyve and with a netmap backend [2] we reach over 1Mpps
with standard APIs (e.g. libpcap), and 5-8 Mpps in netmap mode.
2. (kernel) add support for multiple memory allocators, so we can
better partition physical and virtual interfaces giving access
to separate users. The most visible effect is one additional
argument to the various kernel functions to compute buffer
addresses. All netmap-supported drivers are affected, but changes
are mechanical and trivial
3. (kernel) simplify the prototype for *txsync() and *rxsync()
driver methods. All netmap drivers affected, changes mostly mechanical.
4. add support for netmap-monitor ports. Think of it as a mirroring
port on a physical switch: a netmap monitor port replicates traffic
present on the main port. Restrictions apply. Drive carefully.
5. if_lem.c: support for various paravirtualization features,
experimental and disabled by default.
Most of these are described in our ANCS'13 paper [1].
Paravirtualized support in netmap mode is new, and beats the
numbers in the paper by a large factor (under qemu-kvm,
we measured gues-host throughput up to 10-12 Mpps).
A lot of refactoring and additional documentation in the files
in sys/dev/netmap, but apart from #2 and #3 above, almost nothing
of this stuff is visible to other kernel parts.
Example programs in tools/tools/netmap have been updated with bugfixes
and to support more of the existing features.
This is meant to go into 10.1 so we plan an MFC before the Aug.22 deadline.
A lot of this code has been contributed by my colleagues at UNIPI,
including Giuseppe Lettieri, Vincenzo Maffione, Stefano Garzarella.
MFC after: 3 days.
2014-08-16 15:00:01 +00:00
|
|
|
netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si));
|
2011-12-22 15:33:41 +00:00
|
|
|
}
|
|
|
|
#endif /* DEV_NETMAP */
|
2008-02-29 21:50:11 +00:00
|
|
|
/* clear the watch index */
|
2013-10-09 17:32:52 +00:00
|
|
|
txbuf->eop = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set number of descriptors available */
|
|
|
|
txr->tx_avail = adapter->num_tx_desc;
|
|
|
|
|
|
|
|
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_TX_UNLOCK(txr);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize all transmit rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_setup_transmit_structures(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++)
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_setup_transmit_ring(txr);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Enable transmit unit.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_initialize_transmit_units(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
2008-08-28 22:28:28 +00:00
|
|
|
u32 tctl, txdctl;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
|
|
|
|
tctl = txdctl = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* Setup the Tx Descriptor Rings */
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++) {
|
2008-02-29 21:50:11 +00:00
|
|
|
u64 bus_addr = txr->txdma.dma_paddr;
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_TDLEN(i),
|
2008-02-29 21:50:11 +00:00
|
|
|
adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_TDBAH(i),
|
2008-02-29 21:50:11 +00:00
|
|
|
(uint32_t)(bus_addr >> 32));
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_TDBAL(i),
|
2008-02-29 21:50:11 +00:00
|
|
|
(uint32_t)bus_addr);
|
|
|
|
|
|
|
|
/* Setup the HW Tx Head and Tail descriptor pointers */
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_TDT(i), 0);
|
|
|
|
E1000_WRITE_REG(hw, E1000_TDH(i), 0);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
HW_DEBUGOUT2("Base = %x, Length = %x\n",
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_READ_REG(hw, E1000_TDBAL(i)),
|
|
|
|
E1000_READ_REG(hw, E1000_TDLEN(i)));
|
2008-02-29 21:50:11 +00:00
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
txr->queue_status = IGB_QUEUE_IDLE;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
txdctl |= IGB_TX_PTHRESH;
|
|
|
|
txdctl |= IGB_TX_HTHRESH << 8;
|
|
|
|
txdctl |= IGB_TX_WTHRESH << 16;
|
2008-02-29 21:50:11 +00:00
|
|
|
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp)
|
2010-06-30 17:26:47 +00:00
|
|
|
return;
|
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
e1000_config_collision_dist(hw);
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Program the Transmit Control Register */
|
2010-01-26 22:32:22 +00:00
|
|
|
tctl = E1000_READ_REG(hw, E1000_TCTL);
|
2008-02-29 21:50:11 +00:00
|
|
|
tctl &= ~E1000_TCTL_CT;
|
|
|
|
tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
|
|
|
|
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
|
|
|
|
|
|
|
|
/* This write will effectively turn on the transmit unit. */
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free all transmit rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_free_transmit_structures(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, txr++) {
|
2008-02-29 21:50:11 +00:00
|
|
|
IGB_TX_LOCK(txr);
|
|
|
|
igb_free_transmit_buffers(txr);
|
|
|
|
igb_dma_free(adapter, &txr->txdma);
|
|
|
|
IGB_TX_UNLOCK(txr);
|
|
|
|
IGB_TX_LOCK_DESTROY(txr);
|
|
|
|
}
|
|
|
|
free(adapter->tx_rings, M_DEVBUF);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free transmit ring related data structures.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_free_transmit_buffers(struct tx_ring *txr)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = txr->adapter;
|
2013-10-09 17:32:52 +00:00
|
|
|
struct igb_tx_buf *tx_buffer;
|
2008-02-29 21:50:11 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
INIT_DEBUGOUT("free_transmit_ring: begin");
|
|
|
|
|
|
|
|
if (txr->tx_buffers == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tx_buffer = txr->tx_buffers;
|
|
|
|
for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
|
|
|
|
if (tx_buffer->m_head != NULL) {
|
|
|
|
bus_dmamap_sync(txr->txtag, tx_buffer->map,
|
|
|
|
BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(txr->txtag,
|
|
|
|
tx_buffer->map);
|
|
|
|
m_freem(tx_buffer->m_head);
|
|
|
|
tx_buffer->m_head = NULL;
|
|
|
|
if (tx_buffer->map != NULL) {
|
|
|
|
bus_dmamap_destroy(txr->txtag,
|
|
|
|
tx_buffer->map);
|
|
|
|
tx_buffer->map = NULL;
|
|
|
|
}
|
|
|
|
} else if (tx_buffer->map != NULL) {
|
|
|
|
bus_dmamap_unload(txr->txtag,
|
|
|
|
tx_buffer->map);
|
|
|
|
bus_dmamap_destroy(txr->txtag,
|
|
|
|
tx_buffer->map);
|
|
|
|
tx_buffer->map = NULL;
|
|
|
|
}
|
|
|
|
}
|
2013-03-29 18:25:45 +00:00
|
|
|
#ifndef IGB_LEGACY_TX
|
2009-07-24 16:57:49 +00:00
|
|
|
if (txr->br != NULL)
|
|
|
|
buf_ring_free(txr->br, M_DEVBUF);
|
2009-06-24 17:41:29 +00:00
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
if (txr->tx_buffers != NULL) {
|
|
|
|
free(txr->tx_buffers, M_DEVBUF);
|
|
|
|
txr->tx_buffers = NULL;
|
|
|
|
}
|
|
|
|
if (txr->txtag != NULL) {
|
|
|
|
bus_dma_tag_destroy(txr->txtag);
|
|
|
|
txr->txtag = NULL;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
2013-10-09 17:32:52 +00:00
|
|
|
* Setup work for hardware segmentation offload (TSO) on
|
|
|
|
* adapters using advanced tx descriptors
|
2008-02-29 21:50:11 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
2013-10-09 17:32:52 +00:00
|
|
|
static int
|
|
|
|
igb_tso_setup(struct tx_ring *txr, struct mbuf *mp,
|
|
|
|
u32 *cmd_type_len, u32 *olinfo_status)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
|
|
|
struct adapter *adapter = txr->adapter;
|
|
|
|
struct e1000_adv_tx_context_desc *TXD;
|
|
|
|
u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
|
2013-10-09 17:32:52 +00:00
|
|
|
u32 mss_l4len_idx = 0, paylen;
|
|
|
|
u16 vtag = 0, eh_type;
|
|
|
|
int ctxd, ehdrlen, ip_hlen, tcp_hlen;
|
|
|
|
struct ether_vlan_header *eh;
|
|
|
|
#ifdef INET6
|
|
|
|
struct ip6_hdr *ip6;
|
|
|
|
#endif
|
|
|
|
#ifdef INET
|
|
|
|
struct ip *ip;
|
|
|
|
#endif
|
|
|
|
struct tcphdr *th;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine where frame payload starts.
|
|
|
|
* Jump over vlan headers if already present
|
|
|
|
*/
|
|
|
|
eh = mtod(mp, struct ether_vlan_header *);
|
|
|
|
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
|
|
|
|
ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
|
|
|
|
eh_type = eh->evl_proto;
|
|
|
|
} else {
|
|
|
|
ehdrlen = ETHER_HDR_LEN;
|
|
|
|
eh_type = eh->evl_encap_proto;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ntohs(eh_type)) {
|
|
|
|
#ifdef INET6
|
|
|
|
case ETHERTYPE_IPV6:
|
|
|
|
ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
|
|
|
|
/* XXX-BZ For now we do not pretend to support ext. hdrs. */
|
|
|
|
if (ip6->ip6_nxt != IPPROTO_TCP)
|
|
|
|
return (ENXIO);
|
|
|
|
ip_hlen = sizeof(struct ip6_hdr);
|
|
|
|
ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
|
|
|
|
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
|
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
#ifdef INET
|
|
|
|
case ETHERTYPE_IP:
|
|
|
|
ip = (struct ip *)(mp->m_data + ehdrlen);
|
|
|
|
if (ip->ip_p != IPPROTO_TCP)
|
|
|
|
return (ENXIO);
|
|
|
|
ip->ip_sum = 0;
|
|
|
|
ip_hlen = ip->ip_hl << 2;
|
|
|
|
th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
|
|
|
|
th->th_sum = in_pseudo(ip->ip_src.s_addr,
|
|
|
|
ip->ip_dst.s_addr, htons(IPPROTO_TCP));
|
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
|
|
|
|
/* Tell transmit desc to also do IPv4 checksum. */
|
|
|
|
*olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
|
|
|
|
__func__, ntohs(eh_type));
|
|
|
|
break;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
ctxd = txr->next_avail_desc;
|
|
|
|
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
|
|
|
|
|
|
|
|
tcp_hlen = th->th_off << 2;
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* This is used in the transmit desc in encap */
|
|
|
|
paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* VLAN MACLEN IPLEN */
|
|
|
|
if (mp->m_flags & M_VLANTAG) {
|
|
|
|
vtag = htole16(mp->m_pkthdr.ether_vtag);
|
2013-10-09 17:32:52 +00:00
|
|
|
vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
|
2008-02-29 21:50:11 +00:00
|
|
|
vlan_macip_lens |= ip_hlen;
|
2013-10-09 17:32:52 +00:00
|
|
|
TXD->vlan_macip_lens = htole32(vlan_macip_lens);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* ADV DTYPE TUCMD */
|
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
|
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
|
2013-10-09 17:32:52 +00:00
|
|
|
TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* MSS L4LEN IDX */
|
|
|
|
mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
|
|
|
|
mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
|
2009-12-08 01:07:44 +00:00
|
|
|
/* 82575 needs the queue index added */
|
|
|
|
if (adapter->hw.mac.type == e1000_82575)
|
|
|
|
mss_l4len_idx |= txr->me << 4;
|
2008-02-29 21:50:11 +00:00
|
|
|
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
|
|
|
|
|
|
|
|
TXD->seqnum_seed = htole32(0);
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
if (++ctxd == txr->num_desc)
|
2008-02-29 21:50:11 +00:00
|
|
|
ctxd = 0;
|
|
|
|
|
|
|
|
txr->tx_avail--;
|
|
|
|
txr->next_avail_desc = ctxd;
|
2013-10-09 17:32:52 +00:00
|
|
|
*cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
|
|
|
|
*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
|
|
|
|
*olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT;
|
|
|
|
++txr->tso_tx;
|
|
|
|
return (0);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
2013-10-09 17:32:52 +00:00
|
|
|
* Advanced Context Descriptor setup for VLAN, CSUM or TSO
|
2008-02-29 21:50:11 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
static int
|
|
|
|
igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
|
|
|
|
u32 *cmd_type_len, u32 *olinfo_status)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
|
|
|
struct e1000_adv_tx_context_desc *TXD;
|
2013-10-09 17:32:52 +00:00
|
|
|
struct adapter *adapter = txr->adapter;
|
2008-02-29 21:50:11 +00:00
|
|
|
struct ether_vlan_header *eh;
|
2013-10-09 17:32:52 +00:00
|
|
|
struct ip *ip;
|
2008-02-29 21:50:11 +00:00
|
|
|
struct ip6_hdr *ip6;
|
2013-10-09 17:32:52 +00:00
|
|
|
u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0, mss_l4len_idx = 0;
|
|
|
|
int ehdrlen, ip_hlen = 0;
|
|
|
|
u16 etype;
|
2008-02-29 21:50:11 +00:00
|
|
|
u8 ipproto = 0;
|
2013-10-09 17:32:52 +00:00
|
|
|
int offload = TRUE;
|
|
|
|
int ctxd = txr->next_avail_desc;
|
|
|
|
u16 vtag = 0;
|
|
|
|
|
|
|
|
/* First check if TSO is to be used */
|
|
|
|
if (mp->m_pkthdr.csum_flags & CSUM_TSO)
|
|
|
|
return (igb_tso_setup(txr, mp, cmd_type_len, olinfo_status));
|
2009-06-24 17:41:29 +00:00
|
|
|
|
|
|
|
if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
|
|
|
|
offload = FALSE;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Indicate the whole packet as payload when not doing TSO */
|
|
|
|
*olinfo_status |= mp->m_pkthdr.len << E1000_ADVTXD_PAYLEN_SHIFT;
|
|
|
|
|
|
|
|
/* Now ready a context descriptor */
|
2008-02-29 21:50:11 +00:00
|
|
|
TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
|
|
|
|
|
|
|
|
/*
|
|
|
|
** In advanced descriptors the vlan tag must
|
2013-10-09 17:32:52 +00:00
|
|
|
** be placed into the context descriptor. Hence
|
|
|
|
** we need to make one even if not doing offloads.
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
|
|
|
if (mp->m_flags & M_VLANTAG) {
|
|
|
|
vtag = htole16(mp->m_pkthdr.ether_vtag);
|
|
|
|
vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
|
2013-10-09 17:32:52 +00:00
|
|
|
} else if (offload == FALSE) /* ... no offload to do */
|
|
|
|
return (0);
|
2009-06-24 17:41:29 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*
|
|
|
|
* Determine where frame payload starts.
|
|
|
|
* Jump over vlan headers if already present,
|
|
|
|
* helpful for QinQ too.
|
|
|
|
*/
|
|
|
|
eh = mtod(mp, struct ether_vlan_header *);
|
|
|
|
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
|
|
|
|
etype = ntohs(eh->evl_proto);
|
|
|
|
ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
|
|
|
|
} else {
|
|
|
|
etype = ntohs(eh->evl_encap_proto);
|
|
|
|
ehdrlen = ETHER_HDR_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the ether header length */
|
|
|
|
vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
|
|
|
|
|
|
|
|
switch (etype) {
|
|
|
|
case ETHERTYPE_IP:
|
|
|
|
ip = (struct ip *)(mp->m_data + ehdrlen);
|
|
|
|
ip_hlen = ip->ip_hl << 2;
|
|
|
|
ipproto = ip->ip_p;
|
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
|
|
|
|
break;
|
|
|
|
case ETHERTYPE_IPV6:
|
|
|
|
ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
|
|
|
|
ip_hlen = sizeof(struct ip6_hdr);
|
2013-10-09 17:32:52 +00:00
|
|
|
/* XXX-BZ this will go badly in case of ext hdrs. */
|
2008-02-29 21:50:11 +00:00
|
|
|
ipproto = ip6->ip6_nxt;
|
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
|
|
|
|
break;
|
|
|
|
default:
|
2009-06-24 17:41:29 +00:00
|
|
|
offload = FALSE;
|
|
|
|
break;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
vlan_macip_lens |= ip_hlen;
|
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
|
|
|
|
|
|
|
|
switch (ipproto) {
|
|
|
|
case IPPROTO_TCP:
|
2009-06-24 17:41:29 +00:00
|
|
|
if (mp->m_pkthdr.csum_flags & CSUM_TCP)
|
2008-02-29 21:50:11 +00:00
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
|
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
2009-06-24 17:41:29 +00:00
|
|
|
if (mp->m_pkthdr.csum_flags & CSUM_UDP)
|
2008-02-29 21:50:11 +00:00
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
|
|
|
|
break;
|
2013-10-09 17:32:52 +00:00
|
|
|
|
2009-04-10 00:05:46 +00:00
|
|
|
#if __FreeBSD_version >= 800000
|
|
|
|
case IPPROTO_SCTP:
|
2009-06-24 17:41:29 +00:00
|
|
|
if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
|
2009-04-10 00:05:46 +00:00
|
|
|
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
|
2008-02-29 21:50:11 +00:00
|
|
|
break;
|
2009-04-10 00:05:46 +00:00
|
|
|
#endif
|
|
|
|
default:
|
2009-06-24 17:41:29 +00:00
|
|
|
offload = FALSE;
|
|
|
|
break;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
if (offload) /* For the TX descriptor setup */
|
|
|
|
*olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
/* 82575 needs the queue index added */
|
|
|
|
if (adapter->hw.mac.type == e1000_82575)
|
|
|
|
mss_l4len_idx = txr->me << 4;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Now copy bits into descriptor */
|
2013-10-09 17:32:52 +00:00
|
|
|
TXD->vlan_macip_lens = htole32(vlan_macip_lens);
|
|
|
|
TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
|
2008-02-29 21:50:11 +00:00
|
|
|
TXD->seqnum_seed = htole32(0);
|
2009-12-08 01:07:44 +00:00
|
|
|
TXD->mss_l4len_idx = htole32(mss_l4len_idx);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* We've consumed the first desc, adjust counters */
|
2013-10-09 17:32:52 +00:00
|
|
|
if (++ctxd == txr->num_desc)
|
2008-02-29 21:50:11 +00:00
|
|
|
ctxd = 0;
|
|
|
|
txr->next_avail_desc = ctxd;
|
|
|
|
--txr->tx_avail;
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
return (0);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Examine each tx_buffer in the used queue. If the hardware is done
|
|
|
|
* processing the packet then free associated resources. The
|
|
|
|
* tx_buffer is put back on the free queue.
|
|
|
|
*
|
|
|
|
* TRUE return means there's work in the ring to clean, FALSE its empty.
|
|
|
|
**********************************************************************/
|
|
|
|
static bool
|
|
|
|
igb_txeof(struct tx_ring *txr)
|
|
|
|
{
|
2013-10-09 17:32:52 +00:00
|
|
|
struct adapter *adapter = txr->adapter;
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
|
|
|
u32 work, processed = 0;
|
|
|
|
u16 limit = txr->process_limit;
|
|
|
|
struct igb_tx_buf *buf;
|
|
|
|
union e1000_adv_tx_desc *txd;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
mtx_assert(&txr->tx_mtx, MA_OWNED);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
2013-11-01 21:21:14 +00:00
|
|
|
if (netmap_tx_irq(ifp, txr->me))
|
2013-04-30 16:18:29 +00:00
|
|
|
return (FALSE);
|
2011-12-22 15:33:41 +00:00
|
|
|
#endif /* DEV_NETMAP */
|
2013-10-09 17:32:52 +00:00
|
|
|
|
|
|
|
if (txr->tx_avail == txr->num_desc) {
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
txr->queue_status = IGB_QUEUE_IDLE;
|
2013-10-09 17:32:52 +00:00
|
|
|
return FALSE;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Get work starting point */
|
|
|
|
work = txr->next_to_clean;
|
|
|
|
buf = &txr->tx_buffers[work];
|
|
|
|
txd = &txr->tx_base[work];
|
|
|
|
work -= txr->num_desc; /* The distance to ring end */
|
2008-02-29 21:50:11 +00:00
|
|
|
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
|
2010-01-26 22:32:22 +00:00
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2013-10-09 17:32:52 +00:00
|
|
|
do {
|
|
|
|
union e1000_adv_tx_desc *eop = buf->eop;
|
|
|
|
if (eop == NULL) /* No work */
|
|
|
|
break;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
if ((eop->wb.status & E1000_TXD_STAT_DD) == 0)
|
|
|
|
break; /* I/O not complete */
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
if (buf->m_head) {
|
|
|
|
txr->bytes +=
|
|
|
|
buf->m_head->m_pkthdr.len;
|
|
|
|
bus_dmamap_sync(txr->txtag,
|
|
|
|
buf->map,
|
|
|
|
BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(txr->txtag,
|
|
|
|
buf->map);
|
|
|
|
m_freem(buf->m_head);
|
|
|
|
buf->m_head = NULL;
|
|
|
|
}
|
|
|
|
buf->eop = NULL;
|
|
|
|
++txr->tx_avail;
|
|
|
|
|
|
|
|
/* We clean the range if multi segment */
|
|
|
|
while (txd != eop) {
|
|
|
|
++txd;
|
|
|
|
++buf;
|
|
|
|
++work;
|
|
|
|
/* wrap the ring? */
|
|
|
|
if (__predict_false(!work)) {
|
|
|
|
work -= txr->num_desc;
|
|
|
|
buf = txr->tx_buffers;
|
|
|
|
txd = txr->tx_base;
|
|
|
|
}
|
|
|
|
if (buf->m_head) {
|
2010-01-26 22:32:22 +00:00
|
|
|
txr->bytes +=
|
2013-10-09 17:32:52 +00:00
|
|
|
buf->m_head->m_pkthdr.len;
|
2008-02-29 21:50:11 +00:00
|
|
|
bus_dmamap_sync(txr->txtag,
|
2013-10-09 17:32:52 +00:00
|
|
|
buf->map,
|
2008-02-29 21:50:11 +00:00
|
|
|
BUS_DMASYNC_POSTWRITE);
|
|
|
|
bus_dmamap_unload(txr->txtag,
|
2013-10-09 17:32:52 +00:00
|
|
|
buf->map);
|
|
|
|
m_freem(buf->m_head);
|
|
|
|
buf->m_head = NULL;
|
|
|
|
}
|
|
|
|
++txr->tx_avail;
|
|
|
|
buf->eop = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
++txr->packets;
|
2013-10-09 17:32:52 +00:00
|
|
|
++processed;
|
2014-09-19 11:49:41 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
|
2013-10-09 17:32:52 +00:00
|
|
|
txr->watchdog_time = ticks;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
/* Try the next packet */
|
|
|
|
++txd;
|
|
|
|
++buf;
|
|
|
|
++work;
|
|
|
|
/* reset with a wrap */
|
|
|
|
if (__predict_false(!work)) {
|
|
|
|
work -= txr->num_desc;
|
|
|
|
buf = txr->tx_buffers;
|
|
|
|
txd = txr->tx_base;
|
|
|
|
}
|
|
|
|
prefetch(txd);
|
|
|
|
} while (__predict_true(--limit));
|
|
|
|
|
|
|
|
bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
|
|
|
|
|
|
|
work += txr->num_desc;
|
|
|
|
txr->next_to_clean = work;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/*
|
|
|
|
** Watchdog calculation, we know there's
|
|
|
|
** work outstanding or the first return
|
|
|
|
** would have been taken, so none processed
|
|
|
|
** for too long indicates a hang.
|
|
|
|
*/
|
|
|
|
if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG))
|
2011-12-10 07:08:52 +00:00
|
|
|
txr->queue_status |= IGB_QUEUE_HUNG;
|
|
|
|
|
2013-10-09 17:32:52 +00:00
|
|
|
if (txr->tx_avail >= IGB_QUEUE_THRESHOLD)
|
|
|
|
txr->queue_status &= ~IGB_QUEUE_DEPLETED;
|
|
|
|
|
|
|
|
if (txr->tx_avail == txr->num_desc) {
|
2011-12-10 07:08:52 +00:00
|
|
|
txr->queue_status = IGB_QUEUE_IDLE;
|
|
|
|
return (FALSE);
|
2013-10-09 17:32:52 +00:00
|
|
|
}
|
2011-12-10 07:08:52 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
return (TRUE);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
* Refresh mbuf buffers for RX descriptor rings
|
|
|
|
* - now keeps its own state so discards due to resource
|
|
|
|
* exhaustion are unnecessary, if an mbuf cannot be obtained
|
|
|
|
* it just returns, keeping its placeholder, thus it can simply
|
|
|
|
* be recalled to try again.
|
2008-02-29 21:50:11 +00:00
|
|
|
*
|
|
|
|
**********************************************************************/
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
static void
|
|
|
|
igb_refresh_mbufs(struct rx_ring *rxr, int limit)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
|
|
|
struct adapter *adapter = rxr->adapter;
|
2010-01-26 22:32:22 +00:00
|
|
|
bus_dma_segment_t hseg[1];
|
|
|
|
bus_dma_segment_t pseg[1];
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
struct igb_rx_buf *rxbuf;
|
|
|
|
struct mbuf *mh, *mp;
|
2011-03-18 18:54:00 +00:00
|
|
|
int i, j, nsegs, error;
|
|
|
|
bool refreshed = FALSE;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
i = j = rxr->next_to_refresh;
|
|
|
|
/*
|
|
|
|
** Get one descriptor beyond
|
|
|
|
** our work mark to control
|
|
|
|
** the loop.
|
|
|
|
*/
|
|
|
|
if (++j == adapter->num_rx_desc)
|
|
|
|
j = 0;
|
|
|
|
|
|
|
|
while (j != limit) {
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
rxbuf = &rxr->rx_buffers[i];
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/* No hdr mbuf used with header split off */
|
|
|
|
if (rxr->hdr_split == FALSE)
|
|
|
|
goto no_split;
|
|
|
|
if (rxbuf->m_head == NULL) {
|
2012-12-04 09:32:43 +00:00
|
|
|
mh = m_gethdr(M_NOWAIT, MT_DATA);
|
2011-03-18 18:54:00 +00:00
|
|
|
if (mh == NULL)
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
goto update;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
} else
|
|
|
|
mh = rxbuf->m_head;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
mh->m_pkthdr.len = mh->m_len = MHLEN;
|
|
|
|
mh->m_len = MHLEN;
|
|
|
|
mh->m_flags |= M_PKTHDR;
|
|
|
|
/* Get the memory mapping */
|
|
|
|
error = bus_dmamap_load_mbuf_sg(rxr->htag,
|
|
|
|
rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
|
|
|
|
if (error != 0) {
|
|
|
|
printf("Refresh mbufs: hdr dmamap load"
|
|
|
|
" failure - %d\n", error);
|
|
|
|
m_free(mh);
|
|
|
|
rxbuf->m_head = NULL;
|
|
|
|
goto update;
|
|
|
|
}
|
|
|
|
rxbuf->m_head = mh;
|
|
|
|
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
|
|
|
|
BUS_DMASYNC_PREREAD);
|
|
|
|
rxr->rx_base[i].read.hdr_addr =
|
|
|
|
htole64(hseg[0].ds_addr);
|
|
|
|
no_split:
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
if (rxbuf->m_pack == NULL) {
|
2012-12-04 09:32:43 +00:00
|
|
|
mp = m_getjcl(M_NOWAIT, MT_DATA,
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
M_PKTHDR, adapter->rx_mbuf_sz);
|
2011-03-18 18:54:00 +00:00
|
|
|
if (mp == NULL)
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
goto update;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
} else
|
|
|
|
mp = rxbuf->m_pack;
|
|
|
|
|
|
|
|
mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
|
|
|
|
/* Get the memory mapping */
|
|
|
|
error = bus_dmamap_load_mbuf_sg(rxr->ptag,
|
|
|
|
rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
|
|
|
|
if (error != 0) {
|
|
|
|
printf("Refresh mbufs: payload dmamap load"
|
|
|
|
" failure - %d\n", error);
|
|
|
|
m_free(mp);
|
|
|
|
rxbuf->m_pack = NULL;
|
|
|
|
goto update;
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
rxbuf->m_pack = mp;
|
|
|
|
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
|
|
|
|
BUS_DMASYNC_PREREAD);
|
|
|
|
rxr->rx_base[i].read.pkt_addr =
|
|
|
|
htole64(pseg[0].ds_addr);
|
2011-03-18 18:54:00 +00:00
|
|
|
refreshed = TRUE; /* I feel wefreshed :) */
|
2009-12-08 01:07:44 +00:00
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
i = j; /* our next is precalculated */
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
rxr->next_to_refresh = i;
|
2011-03-18 18:54:00 +00:00
|
|
|
if (++j == adapter->num_rx_desc)
|
|
|
|
j = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
update:
|
2011-03-18 18:54:00 +00:00
|
|
|
if (refreshed) /* update tail */
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw,
|
2011-03-18 18:54:00 +00:00
|
|
|
E1000_RDT(rxr->me), rxr->next_to_refresh);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
return;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Allocate memory for rx_buffer structures. Since we use one
|
|
|
|
* rx_buffer per received packet, the maximum number of rx_buffer's
|
|
|
|
* that we'll need is equal to the number of receive descriptors
|
|
|
|
* that we've allocated.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_allocate_receive_buffers(struct rx_ring *rxr)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = rxr->adapter;
|
|
|
|
device_t dev = adapter->dev;
|
2009-12-08 01:07:44 +00:00
|
|
|
struct igb_rx_buf *rxbuf;
|
2008-02-29 21:50:11 +00:00
|
|
|
int i, bsize, error;
|
|
|
|
|
2009-12-08 01:07:44 +00:00
|
|
|
bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc;
|
2008-02-29 21:50:11 +00:00
|
|
|
if (!(rxr->rx_buffers =
|
2009-12-08 01:07:44 +00:00
|
|
|
(struct igb_rx_buf *) malloc(bsize,
|
2008-02-29 21:50:11 +00:00
|
|
|
M_DEVBUF, M_NOWAIT | M_ZERO))) {
|
|
|
|
device_printf(dev, "Unable to allocate rx_buffer memory\n");
|
|
|
|
error = ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
|
2009-11-11 19:13:40 +00:00
|
|
|
1, 0, /* alignment, bounds */
|
2008-02-29 21:50:11 +00:00
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
2010-01-26 22:32:22 +00:00
|
|
|
MSIZE, /* maxsize */
|
|
|
|
1, /* nsegments */
|
|
|
|
MSIZE, /* maxsegsize */
|
2008-02-29 21:50:11 +00:00
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockfuncarg */
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
&rxr->htag))) {
|
2009-04-10 00:05:46 +00:00
|
|
|
device_printf(dev, "Unable to create RX DMA tag\n");
|
2008-02-29 21:50:11 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
|
|
|
|
1, 0, /* alignment, bounds */
|
|
|
|
BUS_SPACE_MAXADDR, /* lowaddr */
|
|
|
|
BUS_SPACE_MAXADDR, /* highaddr */
|
|
|
|
NULL, NULL, /* filter, filterarg */
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
MJUM9BYTES, /* maxsize */
|
2010-01-26 22:32:22 +00:00
|
|
|
1, /* nsegments */
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
MJUM9BYTES, /* maxsegsize */
|
2010-01-26 22:32:22 +00:00
|
|
|
0, /* flags */
|
|
|
|
NULL, /* lockfunc */
|
|
|
|
NULL, /* lockfuncarg */
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
&rxr->ptag))) {
|
2010-01-26 22:32:22 +00:00
|
|
|
device_printf(dev, "Unable to create RX payload DMA tag\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < adapter->num_rx_desc; i++) {
|
2008-02-29 21:50:11 +00:00
|
|
|
rxbuf = &rxr->rx_buffers[i];
|
2013-11-02 09:16:11 +00:00
|
|
|
error = bus_dmamap_create(rxr->htag, 0, &rxbuf->hmap);
|
2008-02-29 21:50:11 +00:00
|
|
|
if (error) {
|
2010-01-26 22:32:22 +00:00
|
|
|
device_printf(dev,
|
|
|
|
"Unable to create RX head DMA maps\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
2013-11-02 09:16:11 +00:00
|
|
|
error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap);
|
2010-01-26 22:32:22 +00:00
|
|
|
if (error) {
|
|
|
|
device_printf(dev,
|
|
|
|
"Unable to create RX packet DMA maps\n");
|
2008-02-29 21:50:11 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
/* Frees all, but can handle partial completion */
|
|
|
|
igb_free_receive_structures(adapter);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
igb_free_receive_ring(struct rx_ring *rxr)
|
|
|
|
{
|
2011-04-05 21:55:43 +00:00
|
|
|
struct adapter *adapter = rxr->adapter;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct igb_rx_buf *rxbuf;
|
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < adapter->num_rx_desc; i++) {
|
2010-01-26 22:32:22 +00:00
|
|
|
rxbuf = &rxr->rx_buffers[i];
|
|
|
|
if (rxbuf->m_head != NULL) {
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
|
2010-01-26 22:32:22 +00:00
|
|
|
BUS_DMASYNC_POSTREAD);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_unload(rxr->htag, rxbuf->hmap);
|
2010-01-26 22:32:22 +00:00
|
|
|
rxbuf->m_head->m_flags |= M_PKTHDR;
|
|
|
|
m_freem(rxbuf->m_head);
|
|
|
|
}
|
|
|
|
if (rxbuf->m_pack != NULL) {
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
|
2010-01-26 22:32:22 +00:00
|
|
|
BUS_DMASYNC_POSTREAD);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
|
2010-01-26 22:32:22 +00:00
|
|
|
rxbuf->m_pack->m_flags |= M_PKTHDR;
|
|
|
|
m_freem(rxbuf->m_pack);
|
|
|
|
}
|
|
|
|
rxbuf->m_head = NULL;
|
|
|
|
rxbuf->m_pack = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize a receive ring and its buffers.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_setup_receive_ring(struct rx_ring *rxr)
|
|
|
|
{
|
2008-07-30 21:56:53 +00:00
|
|
|
struct adapter *adapter;
|
2009-06-24 17:41:29 +00:00
|
|
|
struct ifnet *ifp;
|
2008-07-30 21:56:53 +00:00
|
|
|
device_t dev;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
struct igb_rx_buf *rxbuf;
|
|
|
|
bus_dma_segment_t pseg[1], hseg[1];
|
2008-07-30 21:56:53 +00:00
|
|
|
struct lro_ctrl *lro = &rxr->lro;
|
2011-04-05 21:55:43 +00:00
|
|
|
int rsize, nsegs, error = 0;
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
struct netmap_adapter *na = NA(rxr->adapter->ifp);
|
|
|
|
struct netmap_slot *slot;
|
|
|
|
#endif /* DEV_NETMAP */
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
adapter = rxr->adapter;
|
2008-07-30 21:56:53 +00:00
|
|
|
dev = adapter->dev;
|
2009-06-24 17:41:29 +00:00
|
|
|
ifp = adapter->ifp;
|
2009-04-10 00:05:46 +00:00
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
/* Clear the ring contents */
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_RX_LOCK(rxr);
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
slot = netmap_reset(na, NR_RX, rxr->me, 0);
|
|
|
|
#endif /* DEV_NETMAP */
|
2011-04-05 21:55:43 +00:00
|
|
|
rsize = roundup2(adapter->num_rx_desc *
|
|
|
|
sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
|
|
|
|
bzero((void *)rxr->rx_base, rsize);
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Free current RX buffer structures and their mbufs
|
|
|
|
*/
|
|
|
|
igb_free_receive_ring(rxr);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-06-11 20:54:27 +00:00
|
|
|
/* Configure for header split? */
|
|
|
|
if (igb_header_split)
|
|
|
|
rxr->hdr_split = TRUE;
|
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* Now replenish the ring mbufs */
|
2011-04-05 21:55:43 +00:00
|
|
|
for (int j = 0; j < adapter->num_rx_desc; ++j) {
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
struct mbuf *mh, *mp;
|
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
rxbuf = &rxr->rx_buffers[j];
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
if (slot) {
|
2014-05-28 06:50:16 +00:00
|
|
|
/* slot sj is mapped to the j-th NIC-ring entry */
|
2012-02-27 19:05:01 +00:00
|
|
|
int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j);
|
2012-01-10 19:57:23 +00:00
|
|
|
uint64_t paddr;
|
2011-12-22 15:33:41 +00:00
|
|
|
void *addr;
|
|
|
|
|
Update to the current version of netmap.
Mostly bugfixes or features developed in the past 6 months,
so this is a 10.1 candidate.
Basically no user API changes (some bugfixes in sys/net/netmap_user.h).
In detail:
1. netmap support for virtio-net, including in netmap mode.
Under bhyve and with a netmap backend [2] we reach over 1Mpps
with standard APIs (e.g. libpcap), and 5-8 Mpps in netmap mode.
2. (kernel) add support for multiple memory allocators, so we can
better partition physical and virtual interfaces giving access
to separate users. The most visible effect is one additional
argument to the various kernel functions to compute buffer
addresses. All netmap-supported drivers are affected, but changes
are mechanical and trivial
3. (kernel) simplify the prototype for *txsync() and *rxsync()
driver methods. All netmap drivers affected, changes mostly mechanical.
4. add support for netmap-monitor ports. Think of it as a mirroring
port on a physical switch: a netmap monitor port replicates traffic
present on the main port. Restrictions apply. Drive carefully.
5. if_lem.c: support for various paravirtualization features,
experimental and disabled by default.
Most of these are described in our ANCS'13 paper [1].
Paravirtualized support in netmap mode is new, and beats the
numbers in the paper by a large factor (under qemu-kvm,
we measured gues-host throughput up to 10-12 Mpps).
A lot of refactoring and additional documentation in the files
in sys/dev/netmap, but apart from #2 and #3 above, almost nothing
of this stuff is visible to other kernel parts.
Example programs in tools/tools/netmap have been updated with bugfixes
and to support more of the existing features.
This is meant to go into 10.1 so we plan an MFC before the Aug.22 deadline.
A lot of this code has been contributed by my colleagues at UNIPI,
including Giuseppe Lettieri, Vincenzo Maffione, Stefano Garzarella.
MFC after: 3 days.
2014-08-16 15:00:01 +00:00
|
|
|
addr = PNMB(na, slot + sj, &paddr);
|
|
|
|
netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr);
|
2011-12-22 15:33:41 +00:00
|
|
|
/* Update descriptor */
|
2012-01-10 19:57:23 +00:00
|
|
|
rxr->rx_base[j].read.pkt_addr = htole64(paddr);
|
2011-12-22 15:33:41 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif /* DEV_NETMAP */
|
2010-06-11 20:54:27 +00:00
|
|
|
if (rxr->hdr_split == FALSE)
|
|
|
|
goto skip_head;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
|
|
|
/* First the header */
|
2012-12-04 09:32:43 +00:00
|
|
|
rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
|
2010-06-11 20:54:27 +00:00
|
|
|
if (rxbuf->m_head == NULL) {
|
|
|
|
error = ENOBUFS;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
goto fail;
|
2010-06-11 20:54:27 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
m_adj(rxbuf->m_head, ETHER_ALIGN);
|
|
|
|
mh = rxbuf->m_head;
|
|
|
|
mh->m_len = mh->m_pkthdr.len = MHLEN;
|
|
|
|
mh->m_flags |= M_PKTHDR;
|
|
|
|
/* Get the memory mapping */
|
|
|
|
error = bus_dmamap_load_mbuf_sg(rxr->htag,
|
|
|
|
rxbuf->hmap, rxbuf->m_head, hseg,
|
|
|
|
&nsegs, BUS_DMA_NOWAIT);
|
|
|
|
if (error != 0) /* Nothing elegant to do here */
|
|
|
|
goto fail;
|
|
|
|
bus_dmamap_sync(rxr->htag,
|
|
|
|
rxbuf->hmap, BUS_DMASYNC_PREREAD);
|
|
|
|
/* Update descriptor */
|
2011-04-05 21:55:43 +00:00
|
|
|
rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
2010-06-11 20:54:27 +00:00
|
|
|
skip_head:
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* Now the payload cluster */
|
2012-12-04 09:32:43 +00:00
|
|
|
rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
M_PKTHDR, adapter->rx_mbuf_sz);
|
2010-06-11 20:54:27 +00:00
|
|
|
if (rxbuf->m_pack == NULL) {
|
|
|
|
error = ENOBUFS;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
goto fail;
|
2010-06-11 20:54:27 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
mp = rxbuf->m_pack;
|
|
|
|
mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
|
|
|
|
/* Get the memory mapping */
|
|
|
|
error = bus_dmamap_load_mbuf_sg(rxr->ptag,
|
|
|
|
rxbuf->pmap, mp, pseg,
|
|
|
|
&nsegs, BUS_DMA_NOWAIT);
|
|
|
|
if (error != 0)
|
|
|
|
goto fail;
|
|
|
|
bus_dmamap_sync(rxr->ptag,
|
|
|
|
rxbuf->pmap, BUS_DMASYNC_PREREAD);
|
|
|
|
/* Update descriptor */
|
2011-04-05 21:55:43 +00:00
|
|
|
rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
}
|
2010-04-08 00:50:43 +00:00
|
|
|
|
|
|
|
/* Setup our descriptor indices */
|
2011-04-05 21:55:43 +00:00
|
|
|
rxr->next_to_check = 0;
|
|
|
|
rxr->next_to_refresh = adapter->num_rx_desc - 1;
|
2009-12-08 01:07:44 +00:00
|
|
|
rxr->lro_enabled = FALSE;
|
2010-06-11 20:54:27 +00:00
|
|
|
rxr->rx_split_packets = 0;
|
|
|
|
rxr->rx_bytes = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
rxr->fmp = NULL;
|
|
|
|
rxr->lmp = NULL;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
|
|
|
** Now set up the LRO interface, we
|
|
|
|
** also only do head split when LRO
|
|
|
|
** is enabled, since so often they
|
|
|
|
** are undesireable in similar setups.
|
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_LRO) {
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
error = tcp_lro_init(lro);
|
|
|
|
if (error) {
|
2010-01-26 22:32:22 +00:00
|
|
|
device_printf(dev, "LRO Initialization failed!\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
2008-11-26 23:57:23 +00:00
|
|
|
INIT_DEBUGOUT("RX LRO Initialized\n");
|
2009-06-24 17:41:29 +00:00
|
|
|
rxr->lro_enabled = TRUE;
|
2008-07-30 21:56:53 +00:00
|
|
|
lro->ifp = adapter->ifp;
|
|
|
|
}
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
IGB_RX_UNLOCK(rxr);
|
2008-02-29 21:50:11 +00:00
|
|
|
return (0);
|
2010-01-26 22:32:22 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
fail:
|
2010-01-26 22:32:22 +00:00
|
|
|
igb_free_receive_ring(rxr);
|
|
|
|
IGB_RX_UNLOCK(rxr);
|
|
|
|
return (error);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize all receive rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
|
|
|
igb_setup_receive_structures(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct rx_ring *rxr = adapter->rx_rings;
|
2010-06-11 20:54:27 +00:00
|
|
|
int i;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
for (i = 0; i < adapter->num_queues; i++, rxr++)
|
2008-02-29 21:50:11 +00:00
|
|
|
if (igb_setup_receive_ring(rxr))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
fail:
|
|
|
|
/*
|
|
|
|
* Free RX buffers allocated so far, we will only handle
|
|
|
|
* the rings that completed, the failing case will have
|
2010-06-11 20:54:27 +00:00
|
|
|
* cleaned up for itself. 'i' is the endpoint.
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
2013-01-12 16:05:55 +00:00
|
|
|
for (int j = 0; j < i; ++j) {
|
|
|
|
rxr = &adapter->rx_rings[j];
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
IGB_RX_LOCK(rxr);
|
2010-06-11 20:54:27 +00:00
|
|
|
igb_free_receive_ring(rxr);
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
IGB_RX_UNLOCK(rxr);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
|
2014-06-30 04:34:59 +00:00
|
|
|
/*
|
|
|
|
* Initialise the RSS mapping for NICs that support multiple transmit/
|
|
|
|
* receive rings.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_initialise_rss_mapping(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
int i;
|
|
|
|
int queue_id;
|
2014-07-23 05:40:28 +00:00
|
|
|
u32 reta;
|
2014-06-30 04:34:59 +00:00
|
|
|
u32 rss_key[10], mrqc, shift = 0;
|
|
|
|
|
|
|
|
/* XXX? */
|
|
|
|
if (adapter->hw.mac.type == e1000_82575)
|
|
|
|
shift = 6;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The redirection table controls which destination
|
|
|
|
* queue each bucket redirects traffic to.
|
|
|
|
* Each DWORD represents four queues, with the LSB
|
|
|
|
* being the first queue in the DWORD.
|
|
|
|
*
|
|
|
|
* This just allocates buckets to queues using round-robin
|
|
|
|
* allocation.
|
|
|
|
*
|
|
|
|
* NOTE: It Just Happens to line up with the default
|
|
|
|
* RSS allocation method.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Warning FM follows */
|
2014-07-23 05:40:28 +00:00
|
|
|
reta = 0;
|
2014-06-30 04:34:59 +00:00
|
|
|
for (i = 0; i < 128; i++) {
|
|
|
|
#ifdef RSS
|
|
|
|
queue_id = rss_get_indirection_to_bucket(i);
|
|
|
|
/*
|
|
|
|
* If we have more queues than buckets, we'll
|
|
|
|
* end up mapping buckets to a subset of the
|
|
|
|
* queues.
|
|
|
|
*
|
|
|
|
* If we have more buckets than queues, we'll
|
|
|
|
* end up instead assigning multiple buckets
|
|
|
|
* to queues.
|
|
|
|
*
|
|
|
|
* Both are suboptimal, but we need to handle
|
|
|
|
* the case so we don't go out of bounds
|
|
|
|
* indexing arrays and such.
|
|
|
|
*/
|
|
|
|
queue_id = queue_id % adapter->num_queues;
|
|
|
|
#else
|
|
|
|
queue_id = (i % adapter->num_queues);
|
|
|
|
#endif
|
2014-07-23 05:40:28 +00:00
|
|
|
/* Adjust if required */
|
|
|
|
queue_id = queue_id << shift;
|
2014-06-30 04:34:59 +00:00
|
|
|
|
2014-07-23 05:40:28 +00:00
|
|
|
/*
|
|
|
|
* The low 8 bits are for hash value (n+0);
|
|
|
|
* The next 8 bits are for hash value (n+1), etc.
|
|
|
|
*/
|
|
|
|
reta = reta >> 8;
|
|
|
|
reta = reta | ( ((uint32_t) queue_id) << 24);
|
|
|
|
if ((i & 3) == 3) {
|
|
|
|
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta);
|
|
|
|
reta = 0;
|
|
|
|
}
|
2014-06-30 04:34:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now fill in hash table */
|
|
|
|
|
|
|
|
/* XXX This means RSS enable + 8 queues for my igb (82580.) */
|
|
|
|
mrqc = E1000_MRQC_ENABLE_RSS_4Q;
|
|
|
|
|
|
|
|
#ifdef RSS
|
|
|
|
/* XXX ew typecasting */
|
|
|
|
rss_getkey((uint8_t *) &rss_key);
|
|
|
|
#else
|
|
|
|
arc4rand(&rss_key, sizeof(rss_key), 0);
|
|
|
|
#endif
|
|
|
|
for (i = 0; i < 10; i++)
|
|
|
|
E1000_WRITE_REG_ARRAY(hw,
|
|
|
|
E1000_RSSRK(0), i, rss_key[i]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure the RSS fields to hash upon.
|
|
|
|
*/
|
|
|
|
mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
|
|
|
|
E1000_MRQC_RSS_FIELD_IPV4_TCP);
|
|
|
|
mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
|
|
|
|
E1000_MRQC_RSS_FIELD_IPV6_TCP);
|
|
|
|
mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
|
|
|
|
E1000_MRQC_RSS_FIELD_IPV6_UDP);
|
|
|
|
mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
|
|
|
|
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
|
|
|
|
|
|
|
|
E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
|
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Enable receive unit.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_initialize_receive_units(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct rx_ring *rxr = adapter->rx_rings;
|
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
2009-04-10 00:05:46 +00:00
|
|
|
u32 rctl, rxcsum, psize, srrctl = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
INIT_DEBUGOUT("igb_initialize_receive_unit: begin");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure receives are disabled while setting
|
|
|
|
* up the descriptor ring
|
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
|
|
|
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2009-04-10 00:05:46 +00:00
|
|
|
/*
|
|
|
|
** Set up for header split
|
|
|
|
*/
|
2011-03-18 18:54:00 +00:00
|
|
|
if (igb_header_split) {
|
2009-04-10 00:05:46 +00:00
|
|
|
/* Use a standard mbuf for the header */
|
|
|
|
srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
|
|
|
|
srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
|
|
|
|
} else
|
|
|
|
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Set up for jumbo frames
|
|
|
|
*/
|
|
|
|
if (ifp->if_mtu > ETHERMTU) {
|
|
|
|
rctl |= E1000_RCTL_LPE;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
|
|
|
|
srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
|
|
|
|
rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
|
|
|
|
} else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
|
|
|
|
srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
|
|
|
|
rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
|
|
|
|
}
|
2009-04-10 00:05:46 +00:00
|
|
|
/* Set maximum packet len */
|
|
|
|
psize = adapter->max_frame_size;
|
|
|
|
/* are we on a vlan? */
|
|
|
|
if (adapter->ifp->if_vlantrunk != NULL)
|
|
|
|
psize += VLAN_TAG_SIZE;
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
|
|
|
|
} else {
|
|
|
|
rctl &= ~E1000_RCTL_LPE;
|
|
|
|
srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
|
|
|
|
rctl |= E1000_RCTL_SZ_2048;
|
|
|
|
}
|
|
|
|
|
2014-09-15 19:53:49 +00:00
|
|
|
/*
|
|
|
|
* If TX flow control is disabled and there's >1 queue defined,
|
|
|
|
* enable DROP.
|
|
|
|
*
|
|
|
|
* This drops frames rather than hanging the RX MAC for all queues.
|
|
|
|
*/
|
|
|
|
if ((adapter->num_queues > 1) &&
|
|
|
|
(adapter->fc == e1000_fc_none ||
|
|
|
|
adapter->fc == e1000_fc_rx_pause)) {
|
|
|
|
srrctl |= E1000_SRRCTL_DROP_EN;
|
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Setup the Base and Length of the Rx Descriptor Rings */
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
|
2008-02-29 21:50:11 +00:00
|
|
|
u64 bus_addr = rxr->rxdma.dma_paddr;
|
2009-04-10 00:05:46 +00:00
|
|
|
u32 rxdctl;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RDLEN(i),
|
2008-02-29 21:50:11 +00:00
|
|
|
adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RDBAH(i),
|
2008-02-29 21:50:11 +00:00
|
|
|
(uint32_t)(bus_addr >> 32));
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RDBAL(i),
|
2008-02-29 21:50:11 +00:00
|
|
|
(uint32_t)bus_addr);
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Enable this Queue */
|
2010-01-26 22:32:22 +00:00
|
|
|
rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
|
2008-02-29 21:50:11 +00:00
|
|
|
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
|
|
|
|
rxdctl &= 0xFFF00000;
|
|
|
|
rxdctl |= IGB_RX_PTHRESH;
|
|
|
|
rxdctl |= IGB_RX_HTHRESH << 8;
|
|
|
|
rxdctl |= IGB_RX_WTHRESH << 16;
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Setup for RX MultiQueue
|
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
|
2009-06-24 17:41:29 +00:00
|
|
|
if (adapter->num_queues >1) {
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2014-06-30 04:34:59 +00:00
|
|
|
/* rss setup */
|
|
|
|
igb_initialise_rss_mapping(adapter);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
** NOTE: Receive Full-Packet Checksum Offload
|
|
|
|
** is mutually exclusive with Multiqueue. However
|
|
|
|
** this is not the same as TCP/IP checksums which
|
|
|
|
** still work.
|
|
|
|
*/
|
|
|
|
rxcsum |= E1000_RXCSUM_PCSD;
|
2009-04-10 00:05:46 +00:00
|
|
|
#if __FreeBSD_version >= 800000
|
|
|
|
/* For SCTP Offload */
|
2010-01-26 22:32:22 +00:00
|
|
|
if ((hw->mac.type == e1000_82576)
|
2009-04-10 00:05:46 +00:00
|
|
|
&& (ifp->if_capenable & IFCAP_RXCSUM))
|
|
|
|
rxcsum |= E1000_RXCSUM_CRCOFL;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
/* Non RSS setup */
|
|
|
|
if (ifp->if_capenable & IFCAP_RXCSUM) {
|
|
|
|
rxcsum |= E1000_RXCSUM_IPPCSE;
|
|
|
|
#if __FreeBSD_version >= 800000
|
|
|
|
if (adapter->hw.mac.type == e1000_82576)
|
|
|
|
rxcsum |= E1000_RXCSUM_CRCOFL;
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
rxcsum &= ~E1000_RXCSUM_TUOFL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* Setup the Receive Control Register */
|
|
|
|
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
|
|
|
|
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
|
|
|
|
E1000_RCTL_RDMTS_HALF |
|
2010-01-26 22:32:22 +00:00
|
|
|
(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
|
|
|
|
/* Strip CRC bytes. */
|
|
|
|
rctl |= E1000_RCTL_SECRC;
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Make sure VLAN Filters are off */
|
|
|
|
rctl &= ~E1000_RCTL_VFE;
|
2009-04-10 00:05:46 +00:00
|
|
|
/* Don't store bad packets */
|
2008-02-29 21:50:11 +00:00
|
|
|
rctl &= ~E1000_RCTL_SBP;
|
|
|
|
|
|
|
|
/* Enable Receives */
|
2010-01-26 22:32:22 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup the HW Rx Head and Tail Descriptor Pointers
|
|
|
|
* - needs to be after enable
|
|
|
|
*/
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++) {
|
2011-03-18 18:54:00 +00:00
|
|
|
rxr = &adapter->rx_rings[i];
|
|
|
|
E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
|
|
|
/*
|
|
|
|
* an init() while a netmap client is active must
|
|
|
|
* preserve the rx buffers passed to userspace.
|
|
|
|
* In this driver it means we adjust RDT to
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
* something different from next_to_refresh
|
2011-12-22 15:33:41 +00:00
|
|
|
* (which is not used in netmap mode).
|
|
|
|
*/
|
|
|
|
if (ifp->if_capenable & IFCAP_NETMAP) {
|
|
|
|
struct netmap_adapter *na = NA(adapter->ifp);
|
|
|
|
struct netmap_kring *kring = &na->rx_rings[i];
|
It is 2014 and we have a new version of netmap.
Most relevant features:
- netmap emulation on any NIC, even those without native netmap support.
On the ixgbe we have measured about 4Mpps/core/queue in this mode,
which is still a lot more than with sockets/bpf.
- seamless interconnection of VALE switch, NICs and host stack.
If you disable accelerations on your NIC (say em0)
ifconfig em0 -txcsum -txcsum
you can use the VALE switch to connect the NIC and the host stack:
vale-ctl -h valeXX:em0
allowing sharing the NIC with other netmap clients.
- THE USER API HAS SLIGHTLY CHANGED (head/cur/tail pointers
instead of pointers/count as before). This was unavoidable to support,
in the future, multiple threads operating on the same rings.
Netmap clients require very small source code changes to compile again.
On the plus side, the new API should be easier to understand
and the internals are a lot simpler.
The manual page has been updated extensively to reflect the current
features and give some examples.
This is the result of work of several people including Giuseppe Lettieri,
Vincenzo Maffione, Michio Honda and myself, and has been financially
supported by EU projects CHANGE and OPENLAB, from NetApp University
Research Fund, NEC, and of course the Universita` di Pisa.
2014-01-06 12:53:15 +00:00
|
|
|
int t = rxr->next_to_refresh - nm_kr_rxspace(kring);
|
2011-12-22 15:33:41 +00:00
|
|
|
|
|
|
|
if (t >= adapter->num_rx_desc)
|
|
|
|
t -= adapter->num_rx_desc;
|
|
|
|
else if (t < 0)
|
|
|
|
t += adapter->num_rx_desc;
|
|
|
|
E1000_WRITE_REG(hw, E1000_RDT(i), t);
|
|
|
|
} else
|
|
|
|
#endif /* DEV_NETMAP */
|
2011-03-18 18:54:00 +00:00
|
|
|
E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free receive rings.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_free_receive_structures(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct rx_ring *rxr = adapter->rx_rings;
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
|
2008-07-30 21:56:53 +00:00
|
|
|
struct lro_ctrl *lro = &rxr->lro;
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_free_receive_buffers(rxr);
|
2008-07-30 21:56:53 +00:00
|
|
|
tcp_lro_free(lro);
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_dma_free(adapter, &rxr->rxdma);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(adapter->rx_rings, M_DEVBUF);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Free receive ring data structures.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_free_receive_buffers(struct rx_ring *rxr)
|
|
|
|
{
|
2009-12-08 01:07:44 +00:00
|
|
|
struct adapter *adapter = rxr->adapter;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct igb_rx_buf *rxbuf;
|
|
|
|
int i;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
INIT_DEBUGOUT("free_receive_structures: begin");
|
|
|
|
|
|
|
|
/* Cleanup any existing buffers */
|
|
|
|
if (rxr->rx_buffers != NULL) {
|
2010-01-26 22:32:22 +00:00
|
|
|
for (i = 0; i < adapter->num_rx_desc; i++) {
|
|
|
|
rxbuf = &rxr->rx_buffers[i];
|
|
|
|
if (rxbuf->m_head != NULL) {
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_sync(rxr->htag, rxbuf->hmap,
|
2008-02-29 21:50:11 +00:00
|
|
|
BUS_DMASYNC_POSTREAD);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_unload(rxr->htag, rxbuf->hmap);
|
2010-01-26 22:32:22 +00:00
|
|
|
rxbuf->m_head->m_flags |= M_PKTHDR;
|
|
|
|
m_freem(rxbuf->m_head);
|
|
|
|
}
|
|
|
|
if (rxbuf->m_pack != NULL) {
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
|
2010-01-26 22:32:22 +00:00
|
|
|
BUS_DMASYNC_POSTREAD);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
|
2010-01-26 22:32:22 +00:00
|
|
|
rxbuf->m_pack->m_flags |= M_PKTHDR;
|
|
|
|
m_freem(rxbuf->m_pack);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
rxbuf->m_head = NULL;
|
|
|
|
rxbuf->m_pack = NULL;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
if (rxbuf->hmap != NULL) {
|
|
|
|
bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
|
|
|
|
rxbuf->hmap = NULL;
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
if (rxbuf->pmap != NULL) {
|
|
|
|
bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
|
|
|
|
rxbuf->pmap = NULL;
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rxr->rx_buffers != NULL) {
|
|
|
|
free(rxr->rx_buffers, M_DEVBUF);
|
|
|
|
rxr->rx_buffers = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
if (rxr->htag != NULL) {
|
|
|
|
bus_dma_tag_destroy(rxr->htag);
|
|
|
|
rxr->htag = NULL;
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
if (rxr->ptag != NULL) {
|
|
|
|
bus_dma_tag_destroy(rxr->ptag);
|
|
|
|
rxr->ptag = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
static __inline void
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
igb_rx_discard(struct rx_ring *rxr, int i)
|
2010-01-26 22:32:22 +00:00
|
|
|
{
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
struct igb_rx_buf *rbuf;
|
2010-01-26 22:32:22 +00:00
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
rbuf = &rxr->rx_buffers[i];
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
|
|
|
|
/* Partially received? Free the chain */
|
2010-01-26 22:32:22 +00:00
|
|
|
if (rxr->fmp != NULL) {
|
|
|
|
rxr->fmp->m_flags |= M_PKTHDR;
|
|
|
|
m_freem(rxr->fmp);
|
|
|
|
rxr->fmp = NULL;
|
|
|
|
rxr->lmp = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/*
|
|
|
|
** With advanced descriptors the writeback
|
|
|
|
** clobbers the buffer addrs, so its easier
|
|
|
|
** to just free the existing mbufs and take
|
|
|
|
** the normal refresh path to get new buffers
|
|
|
|
** and mapping.
|
|
|
|
*/
|
|
|
|
if (rbuf->m_head) {
|
|
|
|
m_free(rbuf->m_head);
|
|
|
|
rbuf->m_head = NULL;
|
2013-11-02 09:16:11 +00:00
|
|
|
bus_dmamap_unload(rxr->htag, rbuf->hmap);
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
if (rbuf->m_pack) {
|
|
|
|
m_free(rbuf->m_pack);
|
|
|
|
rbuf->m_pack = NULL;
|
2013-11-02 09:16:11 +00:00
|
|
|
bus_dmamap_unload(rxr->ptag, rbuf->pmap);
|
2010-09-28 00:13:15 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
|
|
|
return;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
|
|
|
|
static __inline void
|
|
|
|
igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
|
|
|
|
* should be computed by hardware. Also it should not have VLAN tag in
|
|
|
|
* ethernet header.
|
|
|
|
*/
|
|
|
|
if (rxr->lro_enabled &&
|
|
|
|
(ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
|
|
|
|
(ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
|
|
|
|
(ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) ==
|
|
|
|
(E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) &&
|
|
|
|
(m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
|
|
|
|
(CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
|
|
|
|
/*
|
|
|
|
* Send to the stack if:
|
|
|
|
** - LRO not enabled, or
|
|
|
|
** - no LRO resources, or
|
|
|
|
** - lro enqueue fails
|
|
|
|
*/
|
|
|
|
if (rxr->lro.lro_cnt != 0)
|
|
|
|
if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
|
|
|
|
return;
|
|
|
|
}
|
2010-06-11 20:54:27 +00:00
|
|
|
IGB_RX_UNLOCK(rxr);
|
2010-01-26 22:32:22 +00:00
|
|
|
(*ifp->if_input)(ifp, m);
|
2010-06-11 20:54:27 +00:00
|
|
|
IGB_RX_LOCK(rxr);
|
2010-01-26 22:32:22 +00:00
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* This routine executes in interrupt context. It replenishes
|
|
|
|
* the mbufs in the descriptor and sends data which has been
|
|
|
|
* dma'ed into host memory to upper layer.
|
|
|
|
*
|
|
|
|
* We loop at most count times if count is > 0, or until done if
|
|
|
|
* count < 0.
|
|
|
|
*
|
2009-04-10 00:05:46 +00:00
|
|
|
* Return TRUE if more to clean, FALSE otherwise
|
2008-02-29 21:50:11 +00:00
|
|
|
*********************************************************************/
|
|
|
|
static bool
|
2010-06-16 16:37:36 +00:00
|
|
|
igb_rxeof(struct igb_queue *que, int count, int *done)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
struct adapter *adapter = que->adapter;
|
|
|
|
struct rx_ring *rxr = que->rxr;
|
2010-01-26 22:32:22 +00:00
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2008-07-30 21:56:53 +00:00
|
|
|
struct lro_ctrl *lro = &rxr->lro;
|
|
|
|
struct lro_entry *queued;
|
2010-06-16 16:37:36 +00:00
|
|
|
int i, processed = 0, rxdone = 0;
|
2010-01-26 22:32:22 +00:00
|
|
|
u32 ptype, staterr = 0;
|
2008-07-30 21:56:53 +00:00
|
|
|
union e1000_adv_rx_desc *cur;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
IGB_RX_LOCK(rxr);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* Sync the ring. */
|
|
|
|
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
|
|
|
|
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
|
2009-12-08 01:07:44 +00:00
|
|
|
|
2011-12-22 15:33:41 +00:00
|
|
|
#ifdef DEV_NETMAP
|
2013-11-01 21:21:14 +00:00
|
|
|
if (netmap_rx_irq(ifp, rxr->me, &processed)) {
|
|
|
|
IGB_RX_UNLOCK(rxr);
|
2013-04-30 16:18:29 +00:00
|
|
|
return (FALSE);
|
2013-11-01 21:21:14 +00:00
|
|
|
}
|
2011-12-22 15:33:41 +00:00
|
|
|
#endif /* DEV_NETMAP */
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
/* Main clean loop */
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
for (i = rxr->next_to_check; count != 0;) {
|
|
|
|
struct mbuf *sendmp, *mh, *mp;
|
|
|
|
struct igb_rx_buf *rxbuf;
|
2014-06-30 04:34:59 +00:00
|
|
|
u16 hlen, plen, hdr, vtag, pkt_info;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
bool eop = FALSE;
|
2010-01-26 22:32:22 +00:00
|
|
|
|
|
|
|
cur = &rxr->rx_base[i];
|
|
|
|
staterr = le32toh(cur->wb.upper.status_error);
|
|
|
|
if ((staterr & E1000_RXD_STAT_DD) == 0)
|
|
|
|
break;
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
|
|
|
break;
|
|
|
|
count--;
|
|
|
|
sendmp = mh = mp = NULL;
|
|
|
|
cur->wb.upper.status_error = 0;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
rxbuf = &rxr->rx_buffers[i];
|
2010-01-26 22:32:22 +00:00
|
|
|
plen = le16toh(cur->wb.upper.length);
|
|
|
|
ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
|
2013-10-09 17:32:52 +00:00
|
|
|
if (((adapter->hw.mac.type == e1000_i350) ||
|
|
|
|
(adapter->hw.mac.type == e1000_i354)) &&
|
2011-03-18 18:54:00 +00:00
|
|
|
(staterr & E1000_RXDEXT_STATERR_LB))
|
|
|
|
vtag = be16toh(cur->wb.upper.vlan);
|
|
|
|
else
|
|
|
|
vtag = le16toh(cur->wb.upper.vlan);
|
2010-01-26 22:32:22 +00:00
|
|
|
hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
|
2014-06-30 04:34:59 +00:00
|
|
|
pkt_info = le16toh(cur->wb.lower.lo_dword.hs_rss.pkt_info);
|
2010-01-26 22:32:22 +00:00
|
|
|
eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
|
|
|
|
|
2014-09-18 16:20:17 +00:00
|
|
|
/*
|
|
|
|
* Free the frame (all segments) if we're at EOP and
|
|
|
|
* it's an error.
|
|
|
|
*
|
|
|
|
* The datasheet states that EOP + status is only valid for
|
|
|
|
* the final segment in a multi-segment frame.
|
|
|
|
*/
|
|
|
|
if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
|
2012-09-23 22:53:39 +00:00
|
|
|
adapter->dropped_pkts++;
|
2010-01-26 22:32:22 +00:00
|
|
|
++rxr->rx_discarded;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
igb_rx_discard(rxr, i);
|
2010-01-26 22:32:22 +00:00
|
|
|
goto next_desc;
|
2009-12-08 01:07:44 +00:00
|
|
|
}
|
2009-04-10 00:05:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
** The way the hardware is configured to
|
|
|
|
** split, it will ONLY use the header buffer
|
|
|
|
** when header split is enabled, otherwise we
|
2010-01-26 22:32:22 +00:00
|
|
|
** get normal behavior, ie, both header and
|
|
|
|
** payload are DMA'd into the payload buffer.
|
2009-04-10 00:05:46 +00:00
|
|
|
**
|
2010-01-26 22:32:22 +00:00
|
|
|
** The fmp test is to catch the case where a
|
|
|
|
** packet spans multiple descriptors, in that
|
|
|
|
** case only the first header is valid.
|
2009-04-10 00:05:46 +00:00
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
if (rxr->hdr_split && rxr->fmp == NULL) {
|
2013-11-02 09:16:11 +00:00
|
|
|
bus_dmamap_unload(rxr->htag, rxbuf->hmap);
|
2009-04-10 00:05:46 +00:00
|
|
|
hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
|
|
|
|
E1000_RXDADV_HDRBUFLEN_SHIFT;
|
|
|
|
if (hlen > IGB_HDR_BUF)
|
|
|
|
hlen = IGB_HDR_BUF;
|
2010-01-26 22:32:22 +00:00
|
|
|
mh = rxr->rx_buffers[i].m_head;
|
2009-04-10 00:05:46 +00:00
|
|
|
mh->m_len = hlen;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/* clear buf pointer for refresh */
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
rxbuf->m_head = NULL;
|
2009-04-10 00:05:46 +00:00
|
|
|
/*
|
|
|
|
** Get the payload length, this
|
|
|
|
** could be zero if its a small
|
|
|
|
** packet.
|
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
if (plen > 0) {
|
|
|
|
mp = rxr->rx_buffers[i].m_pack;
|
2009-04-10 00:05:46 +00:00
|
|
|
mp->m_len = plen;
|
|
|
|
mh->m_next = mp;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/* clear buf pointer */
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
rxbuf->m_pack = NULL;
|
2009-04-10 00:05:46 +00:00
|
|
|
rxr->rx_split_packets++;
|
2009-12-08 01:07:44 +00:00
|
|
|
}
|
2009-04-10 00:05:46 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
** Either no header split, or a
|
|
|
|
** secondary piece of a fragmented
|
2010-01-26 22:32:22 +00:00
|
|
|
** split packet.
|
2009-04-10 00:05:46 +00:00
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
mh = rxr->rx_buffers[i].m_pack;
|
|
|
|
mh->m_len = plen;
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* clear buf info for refresh */
|
|
|
|
rxbuf->m_pack = NULL;
|
2009-12-08 01:07:44 +00:00
|
|
|
}
|
2013-11-02 09:16:11 +00:00
|
|
|
bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
++processed; /* So we know when to refresh */
|
2010-01-26 22:32:22 +00:00
|
|
|
|
|
|
|
/* Initial frame - setup */
|
|
|
|
if (rxr->fmp == NULL) {
|
|
|
|
mh->m_pkthdr.len = mh->m_len;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/* Save the head of the chain */
|
2010-01-26 22:32:22 +00:00
|
|
|
rxr->fmp = mh;
|
|
|
|
rxr->lmp = mh;
|
|
|
|
if (mp != NULL) {
|
|
|
|
/* Add payload if split */
|
|
|
|
mh->m_pkthdr.len += mp->m_len;
|
|
|
|
rxr->lmp = mh->m_next;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
} else {
|
2010-01-26 22:32:22 +00:00
|
|
|
/* Chain mbuf's together */
|
|
|
|
rxr->lmp->m_next = mh;
|
|
|
|
rxr->lmp = rxr->lmp->m_next;
|
|
|
|
rxr->fmp->m_pkthdr.len += mh->m_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (eop) {
|
|
|
|
rxr->fmp->m_pkthdr.rcvif = ifp;
|
2014-09-19 11:49:41 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
|
2010-01-26 22:32:22 +00:00
|
|
|
rxr->rx_packets++;
|
|
|
|
/* capture data for AIM */
|
|
|
|
rxr->packets++;
|
|
|
|
rxr->bytes += rxr->fmp->m_pkthdr.len;
|
|
|
|
rxr->rx_bytes += rxr->fmp->m_pkthdr.len;
|
|
|
|
|
|
|
|
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
|
|
|
|
igb_rx_checksum(staterr, rxr->fmp, ptype);
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
|
|
|
|
(staterr & E1000_RXD_STAT_VP) != 0) {
|
|
|
|
rxr->fmp->m_pkthdr.ether_vtag = vtag;
|
|
|
|
rxr->fmp->m_flags |= M_VLANTAG;
|
2009-12-08 01:07:44 +00:00
|
|
|
}
|
2014-06-30 04:34:59 +00:00
|
|
|
#ifdef RSS
|
|
|
|
/* XXX set flowtype once this works right */
|
|
|
|
rxr->fmp->m_pkthdr.flowid =
|
|
|
|
le32toh(cur->wb.lower.hi_dword.rss);
|
|
|
|
rxr->fmp->m_flags |= M_FLOWID;
|
|
|
|
switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV4_TCP:
|
|
|
|
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_TCP_IPV4);
|
|
|
|
break;
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV4:
|
|
|
|
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_IPV4);
|
|
|
|
break;
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV6_TCP:
|
|
|
|
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_TCP_IPV6);
|
|
|
|
break;
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV6_EX:
|
|
|
|
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_IPV6_EX);
|
|
|
|
break;
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV6:
|
|
|
|
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_IPV6);
|
|
|
|
break;
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV6_TCP_EX:
|
|
|
|
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_TCP_IPV6_EX);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* XXX no UDP support in RSS just yet */
|
|
|
|
#ifdef notyet
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV4_UDP:
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV6_UDP:
|
|
|
|
case E1000_RXDADV_RSSTYPE_IPV6_UDP_EX:
|
|
|
|
#endif
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* XXX fallthrough */
|
|
|
|
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_NONE);
|
|
|
|
}
|
|
|
|
#elif !defined(IGB_LEGACY_TX)
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
rxr->fmp->m_pkthdr.flowid = que->msix;
|
2010-01-26 22:32:22 +00:00
|
|
|
rxr->fmp->m_flags |= M_FLOWID;
|
|
|
|
#endif
|
|
|
|
sendmp = rxr->fmp;
|
|
|
|
/* Make sure to set M_PKTHDR. */
|
|
|
|
sendmp->m_flags |= M_PKTHDR;
|
|
|
|
rxr->fmp = NULL;
|
|
|
|
rxr->lmp = NULL;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-01-26 22:32:22 +00:00
|
|
|
|
|
|
|
next_desc:
|
2008-02-29 21:50:11 +00:00
|
|
|
bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
|
|
|
|
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
/* Advance our pointers to the next descriptor. */
|
2008-02-29 21:50:11 +00:00
|
|
|
if (++i == adapter->num_rx_desc)
|
|
|
|
i = 0;
|
2009-04-10 00:05:46 +00:00
|
|
|
/*
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
** Send to the stack or LRO
|
2009-04-10 00:05:46 +00:00
|
|
|
*/
|
2010-06-11 20:54:27 +00:00
|
|
|
if (sendmp != NULL) {
|
|
|
|
rxr->next_to_check = i;
|
2010-01-26 22:32:22 +00:00
|
|
|
igb_rx_input(rxr, ifp, sendmp, ptype);
|
2010-06-11 20:54:27 +00:00
|
|
|
i = rxr->next_to_check;
|
2010-06-16 16:37:36 +00:00
|
|
|
rxdone++;
|
2010-06-11 20:54:27 +00:00
|
|
|
}
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
|
|
|
|
/* Every 8 descriptors we go to refresh mbufs */
|
|
|
|
if (processed == 8) {
|
|
|
|
igb_refresh_mbufs(rxr, i);
|
|
|
|
processed = 0;
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
/* Catch any remainders */
|
2011-04-05 21:55:43 +00:00
|
|
|
if (igb_rx_unrefreshed(rxr))
|
Update to igb and em:
em revision 7.0.0:
- Using driver devclass, seperate legacy (pre-pcie) code
into a seperate source file. This will at least help
protect against regression issues. It compiles along
with em, and is transparent to end use, devices in each
appear to be 'emX'. When using em in a modular form this
also allows the legacy stuff to be defined out.
- Add tx and rx rings as in igb, in the 82574 this becomes
actual multiqueue for the first time (2 queues) while in
other PCIE adapters its just make code cleaner.
- Add RX mbuf handling logic that matches igb, this will
eliminate packet drops due to temporary mbuf shortage.
igb revision 1.9.3:
- Following the ixgbe code, use a new approach in what
was called 'get_buf', the routine now has been made
independent of rxeof, it now does the update to the
engine TDT register, this design allows temporary
mbuf resources to become non-critical, not requiring
a packet to be discarded, instead it just returns and
does not increment the tail pointer.
- With the above change it was also unnecessary to keep
'spare' maps around, since we do not have the discard
issue.
- Performance tweaks and improvements to the code also.
MFC in a week
2010-03-29 23:36:34 +00:00
|
|
|
igb_refresh_mbufs(rxr, i);
|
2009-12-08 01:07:44 +00:00
|
|
|
|
|
|
|
rxr->next_to_check = i;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2008-07-30 21:56:53 +00:00
|
|
|
/*
|
2008-08-28 22:28:28 +00:00
|
|
|
* Flush any outstanding LRO work
|
|
|
|
*/
|
2010-01-26 22:32:22 +00:00
|
|
|
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
|
2008-07-30 21:56:53 +00:00
|
|
|
SLIST_REMOVE_HEAD(&lro->lro_active, next);
|
|
|
|
tcp_lro_flush(lro, queued);
|
|
|
|
}
|
|
|
|
|
2010-06-16 16:37:36 +00:00
|
|
|
if (done != NULL)
|
2012-08-06 22:43:49 +00:00
|
|
|
*done += rxdone;
|
2010-06-16 16:37:36 +00:00
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
IGB_RX_UNLOCK(rxr);
|
|
|
|
return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2009-04-10 00:05:46 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/*********************************************************************
|
|
|
|
*
|
|
|
|
* Verify that the hardware indicated that the checksum is valid.
|
|
|
|
* Inform the stack about the status of checksum so that stack
|
|
|
|
* doesn't spend time verifying the checksum.
|
|
|
|
*
|
|
|
|
*********************************************************************/
|
|
|
|
static void
|
2010-01-26 22:32:22 +00:00
|
|
|
igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
|
|
|
u16 status = (u16)staterr;
|
|
|
|
u8 errors = (u8) (staterr >> 24);
|
2010-01-26 22:32:22 +00:00
|
|
|
int sctp;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* Ignore Checksum bit is set */
|
|
|
|
if (status & E1000_RXD_STAT_IXSM) {
|
|
|
|
mp->m_pkthdr.csum_flags = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
|
|
|
|
(ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)
|
|
|
|
sctp = 1;
|
|
|
|
else
|
|
|
|
sctp = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
if (status & E1000_RXD_STAT_IPCS) {
|
|
|
|
/* Did it pass? */
|
|
|
|
if (!(errors & E1000_RXD_ERR_IPE)) {
|
|
|
|
/* IP Checksum Good */
|
|
|
|
mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
|
|
|
|
mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
|
|
|
|
} else
|
|
|
|
mp->m_pkthdr.csum_flags = 0;
|
|
|
|
}
|
|
|
|
|
2009-04-10 00:05:46 +00:00
|
|
|
if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
|
2013-08-24 19:51:18 +00:00
|
|
|
u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
|
2009-04-10 00:05:46 +00:00
|
|
|
#if __FreeBSD_version >= 800000
|
|
|
|
if (sctp) /* reassign */
|
|
|
|
type = CSUM_SCTP_VALID;
|
|
|
|
#endif
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Did it pass? */
|
|
|
|
if (!(errors & E1000_RXD_ERR_TCPE)) {
|
2009-04-14 17:14:35 +00:00
|
|
|
mp->m_pkthdr.csum_flags |= type;
|
2010-01-26 22:32:22 +00:00
|
|
|
if (sctp == 0)
|
2009-04-10 00:05:46 +00:00
|
|
|
mp->m_pkthdr.csum_data = htons(0xffff);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2008-07-30 21:56:53 +00:00
|
|
|
* This routine is run via an vlan
|
|
|
|
* config EVENT
|
2008-02-29 21:50:11 +00:00
|
|
|
*/
|
|
|
|
static void
|
2009-07-24 21:35:52 +00:00
|
|
|
igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
2008-07-30 21:56:53 +00:00
|
|
|
struct adapter *adapter = ifp->if_softc;
|
2009-06-24 17:41:29 +00:00
|
|
|
u32 index, bit;
|
2008-07-30 21:56:53 +00:00
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
if (ifp->if_softc != arg) /* Not our event */
|
2009-07-24 16:57:49 +00:00
|
|
|
return;
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
|
|
|
|
return;
|
2008-07-30 21:56:53 +00:00
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
IGB_CORE_LOCK(adapter);
|
2009-06-24 17:41:29 +00:00
|
|
|
index = (vtag >> 5) & 0x7F;
|
|
|
|
bit = vtag & 0x1F;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
adapter->shadow_vfta[index] |= (1 << bit);
|
2009-06-24 17:41:29 +00:00
|
|
|
++adapter->num_vlans;
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
/* Change hw filter setting */
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
igb_setup_vlan_hw_support(adapter);
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
IGB_CORE_UNLOCK(adapter);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2008-07-30 21:56:53 +00:00
|
|
|
/*
|
|
|
|
* This routine is run via an vlan
|
|
|
|
* unconfig EVENT
|
|
|
|
*/
|
|
|
|
static void
|
2009-07-24 21:35:52 +00:00
|
|
|
igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
|
2008-07-30 21:56:53 +00:00
|
|
|
{
|
|
|
|
struct adapter *adapter = ifp->if_softc;
|
2009-06-24 17:41:29 +00:00
|
|
|
u32 index, bit;
|
|
|
|
|
2010-01-26 22:32:22 +00:00
|
|
|
if (ifp->if_softc != arg)
|
2009-07-24 16:57:49 +00:00
|
|
|
return;
|
|
|
|
|
2009-06-24 17:41:29 +00:00
|
|
|
if ((vtag == 0) || (vtag > 4095)) /* Invalid */
|
|
|
|
return;
|
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
IGB_CORE_LOCK(adapter);
|
2009-06-24 17:41:29 +00:00
|
|
|
index = (vtag >> 5) & 0x7F;
|
|
|
|
bit = vtag & 0x1F;
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
adapter->shadow_vfta[index] &= ~(1 << bit);
|
2009-06-24 17:41:29 +00:00
|
|
|
--adapter->num_vlans;
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
/* Change hw filter setting */
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
igb_setup_vlan_hw_support(adapter);
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
IGB_CORE_UNLOCK(adapter);
|
2009-06-24 17:41:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_setup_vlan_hw_support(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
struct ifnet *ifp = adapter->ifp;
|
2009-06-24 17:41:29 +00:00
|
|
|
u32 reg;
|
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp) {
|
|
|
|
e1000_rlpml_set_vf(hw,
|
|
|
|
adapter->max_frame_size + VLAN_TAG_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
2009-06-24 17:41:29 +00:00
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
reg = E1000_READ_REG(hw, E1000_CTRL);
|
|
|
|
reg |= E1000_CTRL_VME;
|
|
|
|
E1000_WRITE_REG(hw, E1000_CTRL, reg);
|
|
|
|
|
|
|
|
/* Enable the Filter Table */
|
|
|
|
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
|
|
|
|
reg = E1000_READ_REG(hw, E1000_RCTL);
|
|
|
|
reg &= ~E1000_RCTL_CFIEN;
|
|
|
|
reg |= E1000_RCTL_VFE;
|
|
|
|
E1000_WRITE_REG(hw, E1000_RCTL, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the frame size */
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
|
|
|
|
adapter->max_frame_size + VLAN_TAG_SIZE);
|
|
|
|
|
|
|
|
/* Don't bother with table if no vlans */
|
|
|
|
if ((adapter->num_vlans == 0) ||
|
|
|
|
((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
|
|
|
|
return;
|
2009-06-24 17:41:29 +00:00
|
|
|
/*
|
|
|
|
** A soft reset zero's out the VFTA, so
|
|
|
|
** we need to repopulate it now.
|
|
|
|
*/
|
|
|
|
for (int i = 0; i < IGB_VFTA_SIZE; i++)
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
if (adapter->shadow_vfta[i] != 0) {
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp)
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
e1000_vfta_set_vf(hw,
|
|
|
|
adapter->shadow_vfta[i], TRUE);
|
2010-06-30 17:26:47 +00:00
|
|
|
else
|
2011-12-10 07:08:52 +00:00
|
|
|
e1000_write_vfta(hw,
|
|
|
|
i, adapter->shadow_vfta[i]);
|
2010-06-30 17:26:47 +00:00
|
|
|
}
|
2008-07-30 21:56:53 +00:00
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
static void
|
|
|
|
igb_enable_intr(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
/* With RSS set up what to auto clear */
|
|
|
|
if (adapter->msix_mem) {
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
u32 mask = (adapter->que_mask | adapter->link_mask);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask);
|
2008-02-29 21:50:11 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_IMS,
|
|
|
|
E1000_IMS_LSC);
|
|
|
|
} else {
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_IMS,
|
|
|
|
IMS_ENABLE_MASK);
|
|
|
|
}
|
|
|
|
E1000_WRITE_FLUSH(&adapter->hw);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
igb_disable_intr(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
if (adapter->msix_mem) {
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
|
|
|
|
}
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
|
2008-02-29 21:50:11 +00:00
|
|
|
E1000_WRITE_FLUSH(&adapter->hw);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bit of a misnomer, what this really means is
|
|
|
|
* to enable OS management of the system... aka
|
|
|
|
* to disable special hardware management features
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_init_manageability(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
if (adapter->has_manage) {
|
|
|
|
int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
|
|
|
|
int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
|
|
|
|
|
|
|
|
/* disable hardware interception of ARP */
|
|
|
|
manc &= ~(E1000_MANC_ARP_EN);
|
|
|
|
|
|
|
|
/* enable receiving management packets to the host */
|
|
|
|
manc |= E1000_MANC_EN_MNG2HOST;
|
2009-04-10 00:05:46 +00:00
|
|
|
manc2h |= 1 << 5; /* Mng Port 623 */
|
|
|
|
manc2h |= 1 << 6; /* Mng Port 664 */
|
2008-02-29 21:50:11 +00:00
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Give control back to hardware management
|
|
|
|
* controller if there is one.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_release_manageability(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
if (adapter->has_manage) {
|
|
|
|
int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
|
|
|
|
|
|
|
|
/* re-enable hardware interception of ARP */
|
|
|
|
manc |= E1000_MANC_ARP_EN;
|
|
|
|
manc &= ~E1000_MANC_EN_MNG2HOST;
|
|
|
|
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
|
|
|
|
* For ASF and Pass Through versions of f/w this means that
|
|
|
|
* the driver is loaded.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_get_hw_control(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
u32 ctrl_ext;
|
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp)
|
2010-06-30 17:26:47 +00:00
|
|
|
return;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Let firmware know the driver has taken over */
|
|
|
|
ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
|
|
|
|
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
|
|
|
|
* For ASF and Pass Through versions of f/w this means that the
|
|
|
|
* driver is no longer loaded.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
igb_release_hw_control(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
u32 ctrl_ext;
|
|
|
|
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp)
|
2010-06-30 17:26:47 +00:00
|
|
|
return;
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/* Let firmware taken over control of h/w */
|
|
|
|
ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
|
|
|
|
E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
|
|
|
|
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
igb_is_valid_ether_addr(uint8_t *addr)
|
|
|
|
{
|
|
|
|
char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
|
|
|
|
|
|
|
|
if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable PCI Wake On Lan capability
|
|
|
|
*/
|
2010-03-31 20:43:24 +00:00
|
|
|
static void
|
2008-02-29 21:50:11 +00:00
|
|
|
igb_enable_wakeup(device_t dev)
|
|
|
|
{
|
|
|
|
u16 cap, status;
|
|
|
|
u8 id;
|
|
|
|
|
|
|
|
/* First find the capabilities pointer*/
|
|
|
|
cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
|
|
|
|
/* Read the PM Capabilities */
|
|
|
|
id = pci_read_config(dev, cap, 1);
|
|
|
|
if (id != PCIY_PMG) /* Something wrong */
|
|
|
|
return;
|
|
|
|
/* OK, we have the power capabilities, so
|
|
|
|
now get the status register */
|
|
|
|
cap += PCIR_POWER_STATUS;
|
|
|
|
status = pci_read_config(dev, cap, 2);
|
|
|
|
status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
|
|
|
|
pci_write_config(dev, cap, status, 2);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-03-31 20:43:24 +00:00
|
|
|
static void
|
|
|
|
igb_led_func(void *arg, int onoff)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = arg;
|
|
|
|
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
if (onoff) {
|
|
|
|
e1000_setup_led(&adapter->hw);
|
|
|
|
e1000_led_on(&adapter->hw);
|
|
|
|
} else {
|
|
|
|
e1000_led_off(&adapter->hw);
|
|
|
|
e1000_cleanup_led(&adapter->hw);
|
|
|
|
}
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2014-09-19 11:49:41 +00:00
|
|
|
static uint64_t
|
|
|
|
igb_get_counter(if_t ifp, ift_counter cnt)
|
|
|
|
{
|
|
|
|
struct adapter *adapter;
|
|
|
|
struct e1000_hw_stats *stats;
|
|
|
|
|
|
|
|
adapter = if_getsoftc(ifp);
|
|
|
|
stats = (struct e1000_hw_stats *)adapter->stats;
|
|
|
|
|
|
|
|
switch (cnt) {
|
|
|
|
case IFCOUNTER_IERRORS:
|
|
|
|
return (adapter->dropped_pkts + stats->rxerrc +
|
|
|
|
stats->crcerrs + stats->algnerrc +
|
|
|
|
stats->ruc + stats->roc + stats->mpc + stats->cexterr);
|
|
|
|
case IFCOUNTER_OERRORS:
|
|
|
|
return (stats->ecol + stats->latecol +
|
|
|
|
adapter->watchdog_events);
|
|
|
|
case IFCOUNTER_COLLISIONS:
|
|
|
|
return (stats->colc);
|
|
|
|
default:
|
|
|
|
return (if_get_counter_default(ifp, cnt));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Update the board statistics counters.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_update_stats_counters(struct adapter *adapter)
|
|
|
|
{
|
2010-06-30 17:26:47 +00:00
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
struct e1000_hw_stats *stats;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
/*
|
|
|
|
** The virtual function adapter has only a
|
|
|
|
** small controlled set of stats, do only
|
|
|
|
** those and return.
|
|
|
|
*/
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp) {
|
2010-06-30 17:26:47 +00:00
|
|
|
igb_update_vf_stats_counters(adapter);
|
|
|
|
return;
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-06-30 17:26:47 +00:00
|
|
|
|
|
|
|
stats = (struct e1000_hw_stats *)adapter->stats;
|
|
|
|
|
|
|
|
if(adapter->hw.phy.media_type == e1000_media_type_copper ||
|
|
|
|
(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
|
|
|
|
stats->symerrs +=
|
|
|
|
E1000_READ_REG(hw,E1000_SYMERRS);
|
|
|
|
stats->sec += E1000_READ_REG(hw, E1000_SEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
|
|
|
|
stats->mpc += E1000_READ_REG(hw, E1000_MPC);
|
|
|
|
stats->scc += E1000_READ_REG(hw, E1000_SCC);
|
|
|
|
stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
|
|
|
|
|
|
|
|
stats->mcc += E1000_READ_REG(hw, E1000_MCC);
|
|
|
|
stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
|
|
|
|
stats->colc += E1000_READ_REG(hw, E1000_COLC);
|
|
|
|
stats->dc += E1000_READ_REG(hw, E1000_DC);
|
|
|
|
stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
|
|
|
|
stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
|
|
|
|
stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
|
2010-09-28 00:13:15 +00:00
|
|
|
/*
|
|
|
|
** For watchdog management we need to know if we have been
|
|
|
|
** paused during the last interval, so capture that here.
|
|
|
|
*/
|
|
|
|
adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
|
|
|
|
stats->xoffrxc += adapter->pause_frames;
|
2010-06-30 17:26:47 +00:00
|
|
|
stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
|
|
|
|
stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
|
|
|
|
stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
|
|
|
|
stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
|
|
|
|
stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
|
|
|
|
stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
|
|
|
|
stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
|
|
|
|
stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
|
|
|
|
stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
|
|
|
|
stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
|
|
|
|
stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
|
|
|
|
stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/* For the 64-bit byte counters the low dword must be read first. */
|
|
|
|
/* Both registers clear on the read of the high dword */
|
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
|
2010-09-20 16:04:44 +00:00
|
|
|
((u64)E1000_READ_REG(hw, E1000_GORCH) << 32);
|
2010-06-30 17:26:47 +00:00
|
|
|
stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
|
2010-09-20 16:04:44 +00:00
|
|
|
((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32);
|
2010-06-30 17:26:47 +00:00
|
|
|
|
|
|
|
stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
|
|
|
|
stats->ruc += E1000_READ_REG(hw, E1000_RUC);
|
|
|
|
stats->rfc += E1000_READ_REG(hw, E1000_RFC);
|
|
|
|
stats->roc += E1000_READ_REG(hw, E1000_ROC);
|
|
|
|
stats->rjc += E1000_READ_REG(hw, E1000_RJC);
|
|
|
|
|
2014-10-10 16:36:25 +00:00
|
|
|
stats->mgprc += E1000_READ_REG(hw, E1000_MGTPRC);
|
|
|
|
stats->mgpdc += E1000_READ_REG(hw, E1000_MGTPDC);
|
|
|
|
stats->mgptc += E1000_READ_REG(hw, E1000_MGTPTC);
|
|
|
|
|
|
|
|
stats->tor += E1000_READ_REG(hw, E1000_TORL) +
|
|
|
|
((u64)E1000_READ_REG(hw, E1000_TORH) << 32);
|
|
|
|
stats->tot += E1000_READ_REG(hw, E1000_TOTL) +
|
|
|
|
((u64)E1000_READ_REG(hw, E1000_TOTH) << 32);
|
2010-06-30 17:26:47 +00:00
|
|
|
|
|
|
|
stats->tpr += E1000_READ_REG(hw, E1000_TPR);
|
|
|
|
stats->tpt += E1000_READ_REG(hw, E1000_TPT);
|
|
|
|
stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
|
|
|
|
stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
|
|
|
|
stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
|
|
|
|
stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
|
|
|
|
stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
|
|
|
|
stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
|
|
|
|
stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
|
|
|
|
stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
/* Interrupt Counts */
|
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
stats->iac += E1000_READ_REG(hw, E1000_IAC);
|
|
|
|
stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
|
|
|
|
stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
|
|
|
|
stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
|
|
|
|
stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
|
|
|
|
stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
|
|
|
|
stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
|
|
|
|
stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
|
|
|
|
stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
|
2010-06-16 17:36:53 +00:00
|
|
|
|
|
|
|
/* Host to Card Statistics */
|
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
|
|
|
|
stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
|
|
|
|
stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
|
|
|
|
stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
|
|
|
|
stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
|
|
|
|
stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
|
|
|
|
stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
|
|
|
|
stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
|
|
|
|
((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32));
|
|
|
|
stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
|
|
|
|
((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
|
|
|
|
stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
|
|
|
|
stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
|
|
|
|
stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
|
|
|
|
|
|
|
|
stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
|
|
|
|
stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
|
|
|
|
stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
|
|
|
|
stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
|
|
|
|
stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
|
|
|
|
stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-06-24 21:17:58 +00:00
|
|
|
/* Driver specific counters */
|
2010-06-30 17:26:47 +00:00
|
|
|
adapter->device_control = E1000_READ_REG(hw, E1000_CTRL);
|
|
|
|
adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL);
|
|
|
|
adapter->int_mask = E1000_READ_REG(hw, E1000_IMS);
|
|
|
|
adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
|
|
|
|
adapter->packet_buf_alloc_tx =
|
|
|
|
((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
|
|
|
|
adapter->packet_buf_alloc_rx =
|
2010-06-30 21:05:51 +00:00
|
|
|
(E1000_READ_REG(hw, E1000_PBA) & 0xffff);
|
2010-06-30 17:26:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Initialize the VF board statistics counters.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_vf_init_stats(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
struct e1000_vf_stats *stats;
|
2010-06-24 21:17:58 +00:00
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
stats = (struct e1000_vf_stats *)adapter->stats;
|
2010-08-27 23:50:13 +00:00
|
|
|
if (stats == NULL)
|
|
|
|
return;
|
2010-06-30 17:26:47 +00:00
|
|
|
stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
|
|
|
|
stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
|
|
|
|
stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
|
|
|
|
stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
|
|
|
|
stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* Update the VF board statistics counters.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static void
|
|
|
|
igb_update_vf_stats_counters(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
struct e1000_hw *hw = &adapter->hw;
|
|
|
|
struct e1000_vf_stats *stats;
|
|
|
|
|
|
|
|
if (adapter->link_speed == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
stats = (struct e1000_vf_stats *)adapter->stats;
|
|
|
|
|
|
|
|
UPDATE_VF_REG(E1000_VFGPRC,
|
|
|
|
stats->last_gprc, stats->gprc);
|
|
|
|
UPDATE_VF_REG(E1000_VFGORC,
|
|
|
|
stats->last_gorc, stats->gorc);
|
|
|
|
UPDATE_VF_REG(E1000_VFGPTC,
|
|
|
|
stats->last_gptc, stats->gptc);
|
|
|
|
UPDATE_VF_REG(E1000_VFGOTC,
|
|
|
|
stats->last_gotc, stats->gotc);
|
|
|
|
UPDATE_VF_REG(E1000_VFMPRC,
|
|
|
|
stats->last_mprc, stats->mprc);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
2010-09-20 16:04:44 +00:00
|
|
|
/* Export a single 32-bit register via a read-only sysctl. */
|
|
|
|
static int
|
|
|
|
igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
|
2010-07-23 17:53:39 +00:00
|
|
|
{
|
2010-09-20 16:04:44 +00:00
|
|
|
struct adapter *adapter;
|
|
|
|
u_int val;
|
2010-07-23 17:53:39 +00:00
|
|
|
|
2010-09-20 16:04:44 +00:00
|
|
|
adapter = oidp->oid_arg1;
|
|
|
|
val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
|
|
|
|
return (sysctl_handle_int(oidp, &val, 0, req));
|
2010-07-23 17:53:39 +00:00
|
|
|
}
|
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
/*
|
|
|
|
** Tuneable interrupt rate handler
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1);
|
|
|
|
int error;
|
|
|
|
u32 reg, usec, rate;
|
|
|
|
|
|
|
|
reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix));
|
|
|
|
usec = ((reg & 0x7FFC) >> 2);
|
|
|
|
if (usec > 0)
|
|
|
|
rate = 1000000 / usec;
|
|
|
|
else
|
|
|
|
rate = 0;
|
|
|
|
error = sysctl_handle_int(oidp, &rate, 0, req);
|
|
|
|
if (error || !req->newptr)
|
|
|
|
return error;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
/*
|
|
|
|
* Add sysctl variables, one per statistic, to the system.
|
|
|
|
*/
|
2008-02-29 21:50:11 +00:00
|
|
|
static void
|
2010-06-16 17:36:53 +00:00
|
|
|
igb_add_hw_stats(struct adapter *adapter)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
2010-06-16 17:36:53 +00:00
|
|
|
device_t dev = adapter->dev;
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
struct tx_ring *txr = adapter->tx_rings;
|
|
|
|
struct rx_ring *rxr = adapter->rx_rings;
|
This delta has a few important items:
PR 122839 is fixed in both em and in igb
Second, the issue on building modules since the static kernel
build changes is now resolved. I was not able to get the fancier
directory hierarchy working, but this works, both em and igb
build as modules now.
Third, there is now support in em for two new NICs, Hartwell
(or 82574) is a low cost PCIE dual port adapter that has MSIX,
for this release it uses 3 vectors only, RX, TX, and LINK. In
the next release I will add a second TX and RX queue. Also, there
is support here for ICH10, the followon to ICH9. Both of these are
early releases, general availability will follow soon.
Fourth: On Hartwell and ICH10 we now have IEEE 1588 PTP support,
I have implemented this in a provisional way so that early adopters
may try and comment on the functionality. The IOCTL structure may
change. This feature is off by default, you need to edit the Makefile
and add the EM_TIMESYNC define to get the code.
Enjoy all!!
2008-04-25 21:19:41 +00:00
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
|
|
|
|
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
|
|
|
|
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
|
2010-06-30 21:05:51 +00:00
|
|
|
struct e1000_hw_stats *stats = adapter->stats;
|
2010-06-16 17:36:53 +00:00
|
|
|
|
|
|
|
struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node;
|
|
|
|
struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list;
|
|
|
|
|
|
|
|
#define QUEUE_NAME_LEN 32
|
|
|
|
char namebuf[QUEUE_NAME_LEN];
|
|
|
|
|
|
|
|
/* Driver Statistics */
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &adapter->link_irq, 0,
|
|
|
|
"Link MSIX IRQ Handled");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
|
|
|
|
CTLFLAG_RD, &adapter->dropped_pkts,
|
|
|
|
"Driver dropped packets");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail",
|
|
|
|
CTLFLAG_RD, &adapter->no_tx_dma_setup,
|
|
|
|
"Driver tx dma failure in xmit");
|
2010-09-20 16:04:44 +00:00
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
|
|
|
|
CTLFLAG_RD, &adapter->rx_overruns,
|
|
|
|
"RX overruns");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
|
|
|
|
CTLFLAG_RD, &adapter->watchdog_events,
|
|
|
|
"Watchdog timeouts");
|
2010-06-16 17:36:53 +00:00
|
|
|
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control",
|
|
|
|
CTLFLAG_RD, &adapter->device_control,
|
|
|
|
"Device Control Register");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control",
|
|
|
|
CTLFLAG_RD, &adapter->rx_control,
|
|
|
|
"Receiver Control Register");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask",
|
|
|
|
CTLFLAG_RD, &adapter->int_mask,
|
|
|
|
"Interrupt Mask");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask",
|
|
|
|
CTLFLAG_RD, &adapter->eint_mask,
|
|
|
|
"Extended Interrupt Mask");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc",
|
|
|
|
CTLFLAG_RD, &adapter->packet_buf_alloc_tx,
|
|
|
|
"Transmit Buffer Packet Allocation");
|
|
|
|
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc",
|
|
|
|
CTLFLAG_RD, &adapter->packet_buf_alloc_rx,
|
|
|
|
"Receive Buffer Packet Allocation");
|
|
|
|
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
|
|
|
|
CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
|
|
|
|
"Flow Control High Watermark");
|
|
|
|
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
|
|
|
|
CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
|
|
|
|
"Flow Control Low Watermark");
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-09-20 16:04:44 +00:00
|
|
|
for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
|
|
|
|
struct lro_ctrl *lro = &rxr->lro;
|
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
|
|
|
|
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
|
|
|
|
CTLFLAG_RD, NULL, "Queue Name");
|
|
|
|
queue_list = SYSCTL_CHILDREN(queue_node);
|
|
|
|
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
|
2011-03-18 18:54:00 +00:00
|
|
|
CTLFLAG_RD, &adapter->queues[i],
|
- New 82580 devices supported
- Fixes from John Baldwin: vlan shadow tables made per/interface,
make vlan hw setup only happen when capability enabled, and
finally, make a tuneable interrupt rate. Thanks John!
- Tweaked watchdog handling to avoid any false positives, now
detection is in the TX clean path, with only the final check
and init happening in the local timer.
- limit queues to 8 for all devices, with 82576 or 82580 on
larger machines it can get greater than this, and it seems
mostly a resource waste to do so. Even 8 might be high but
it can be manually reduced.
- use 2k, 4k and now 9k clusters based on the MTU size.
- rework the igb_refresh_mbuf() code, its important to
make sure the descriptor is rewritten even when reusing
mbufs since writeback clobbers things.
MFC: in a few days, this delta needs to get to 8.2
2010-11-23 22:12:02 +00:00
|
|
|
sizeof(&adapter->queues[i]),
|
|
|
|
igb_sysctl_interrupt_rate_handler,
|
|
|
|
"IU", "Interrupt Rate");
|
|
|
|
|
2010-07-23 17:53:39 +00:00
|
|
|
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
|
2011-03-18 18:54:00 +00:00
|
|
|
CTLFLAG_RD, adapter, E1000_TDH(txr->me),
|
2010-09-20 16:04:44 +00:00
|
|
|
igb_sysctl_reg_handler, "IU",
|
2010-07-23 17:53:39 +00:00
|
|
|
"Transmit Descriptor Head");
|
|
|
|
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
|
2011-03-18 18:54:00 +00:00
|
|
|
CTLFLAG_RD, adapter, E1000_TDT(txr->me),
|
2010-09-20 16:04:44 +00:00
|
|
|
igb_sysctl_reg_handler, "IU",
|
2010-07-23 17:53:39 +00:00
|
|
|
"Transmit Descriptor Tail");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &txr->no_desc_avail,
|
2014-07-18 16:25:35 +00:00
|
|
|
"Queue Descriptors Unavailable");
|
2013-10-09 17:32:52 +00:00
|
|
|
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
|
|
|
|
CTLFLAG_RD, &txr->total_packets,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Queue Packets Transmitted");
|
|
|
|
|
2010-07-23 17:53:39 +00:00
|
|
|
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
|
2011-03-18 18:54:00 +00:00
|
|
|
CTLFLAG_RD, adapter, E1000_RDH(rxr->me),
|
2010-09-20 16:04:44 +00:00
|
|
|
igb_sysctl_reg_handler, "IU",
|
2010-06-16 17:36:53 +00:00
|
|
|
"Receive Descriptor Head");
|
2010-07-23 17:53:39 +00:00
|
|
|
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
|
2011-03-18 18:54:00 +00:00
|
|
|
CTLFLAG_RD, adapter, E1000_RDT(rxr->me),
|
2010-09-20 16:04:44 +00:00
|
|
|
igb_sysctl_reg_handler, "IU",
|
2010-06-16 17:36:53 +00:00
|
|
|
"Receive Descriptor Tail");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &rxr->rx_packets,
|
|
|
|
"Queue Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &rxr->rx_bytes,
|
|
|
|
"Queue Bytes Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &lro->lro_queued, 0,
|
|
|
|
"LRO Queued");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &lro->lro_flushed, 0,
|
|
|
|
"LRO Flushed");
|
|
|
|
}
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-09-20 16:04:44 +00:00
|
|
|
/* MAC stats get their own sub node */
|
2010-06-16 17:36:53 +00:00
|
|
|
|
|
|
|
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
|
|
|
|
CTLFLAG_RD, NULL, "MAC Statistics");
|
|
|
|
stat_list = SYSCTL_CHILDREN(stat_node);
|
|
|
|
|
2010-06-30 17:26:47 +00:00
|
|
|
/*
|
|
|
|
** VF adapter has a very limited set of stats
|
|
|
|
** since its not managing the metal, so to speak.
|
|
|
|
*/
|
Add support for the new I350 family of 1G interfaces.
- this also includes virtualization support on these devices
Correct some vlan issues we were seeing in test, jumbo frames on vlans
did not work correctly, this was all due to confused logic around HW
filters, the new code should now work for all uses.
Important fix: when mbuf resources are depeleted, it was possible to
completely empty the RX ring, and then the RX engine would stall
forever. This is fixed by a flag being set whenever the refresh code
fails due to an mbuf shortage, also the local timer now makes sure
that all queues get an interrupt when it runs, the interrupt code
will then always call rxeof, and in that routine the first thing done
is now to check the refresh flag and call refresh_mbufs. This has been
verified to fix this type 'hang'. Similar code will follow in the other
drivers.
Finally, sync up shared code for the I350 support.
Thanks to everyone that has been reporting issues, and helping in the
debug/test process!!
2011-02-11 01:00:26 +00:00
|
|
|
if (adapter->vf_ifp) {
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gprc,
|
2010-06-30 17:26:47 +00:00
|
|
|
"Good Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gptc,
|
2010-06-30 17:26:47 +00:00
|
|
|
"Good Packets Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gorc,
|
2010-06-30 17:26:47 +00:00
|
|
|
"Good Octets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gotc,
|
2010-09-20 16:04:44 +00:00
|
|
|
"Good Octets Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->mprc,
|
2010-06-30 17:26:47 +00:00
|
|
|
"Multicast Packets Received");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &stats->ecol,
|
|
|
|
"Excessive collisions");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &stats->scc,
|
|
|
|
"Single collisions");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &stats->mcc,
|
|
|
|
"Multiple collisions");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &stats->latecol,
|
|
|
|
"Late collisions");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count",
|
2010-06-16 17:36:53 +00:00
|
|
|
CTLFLAG_RD, &stats->colc,
|
|
|
|
"Collision Count");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->symerrs,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Symbol Errors");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->sec,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Sequence Errors");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->dc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Defer Count");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->mpc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Missed Packets");
|
2014-10-10 16:36:25 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_length_errors",
|
|
|
|
CTLFLAG_RD, &stats->rlec,
|
|
|
|
"Receive Length Errors");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->rnbc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Receive No Buffers");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ruc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Receive Undersize");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->rfc,
|
2014-10-10 16:36:25 +00:00
|
|
|
"Fragmented Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->roc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Oversized Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->rjc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Recevied Jabber");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->rxerrc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Receive Errors");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->crcerrs,
|
2010-06-16 17:36:53 +00:00
|
|
|
"CRC errors");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->algnerrc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Alignment Errors");
|
2014-10-10 16:36:25 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_no_crs",
|
|
|
|
CTLFLAG_RD, &stats->tncrs,
|
|
|
|
"Transmit with No CRS");
|
2008-02-29 21:50:11 +00:00
|
|
|
/* On 82575 these are collision counts */
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->cexterr,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Collision/Carrier extension errors");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->xonrxc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"XON Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->xontxc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"XON Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->xoffrxc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"XOFF Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->xofftxc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"XOFF Transmitted");
|
2014-10-10 16:36:25 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd",
|
|
|
|
CTLFLAG_RD, &stats->fcruc,
|
|
|
|
"Unsupported Flow Control Received");
|
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd",
|
|
|
|
CTLFLAG_RD, &stats->mgprc,
|
|
|
|
"Management Packets Received");
|
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop",
|
|
|
|
CTLFLAG_RD, &stats->mgpdc,
|
|
|
|
"Management Packets Dropped");
|
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd",
|
|
|
|
CTLFLAG_RD, &stats->mgptc,
|
|
|
|
"Management Packets Transmitted");
|
2010-06-16 17:36:53 +00:00
|
|
|
/* Packet Reception Stats */
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->tpr,
|
2014-10-10 16:36:25 +00:00
|
|
|
"Total Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gprc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Good Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->bprc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Broadcast Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->mprc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Multicast Packets Received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->prc64,
|
2014-10-10 16:36:25 +00:00
|
|
|
"64 byte frames received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->prc127,
|
2010-06-16 17:36:53 +00:00
|
|
|
"65-127 byte frames received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->prc255,
|
2010-06-16 17:36:53 +00:00
|
|
|
"128-255 byte frames received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->prc511,
|
2010-06-16 17:36:53 +00:00
|
|
|
"256-511 byte frames received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->prc1023,
|
2010-06-16 17:36:53 +00:00
|
|
|
"512-1023 byte frames received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->prc1522,
|
2010-06-16 17:36:53 +00:00
|
|
|
"1023-1522 byte frames received");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gorc,
|
2014-10-10 16:36:25 +00:00
|
|
|
"Good Octets Received");
|
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_recvd",
|
|
|
|
CTLFLAG_RD, &stats->tor,
|
|
|
|
"Total Octets Received");
|
2010-06-16 17:36:53 +00:00
|
|
|
|
|
|
|
/* Packet Transmission Stats */
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gotc,
|
2010-09-20 16:04:44 +00:00
|
|
|
"Good Octets Transmitted");
|
2014-10-10 16:36:25 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_txd",
|
|
|
|
CTLFLAG_RD, &stats->tot,
|
|
|
|
"Total Octets Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->tpt,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Total Packets Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->gptc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Good Packets Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->bptc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Broadcast Packets Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->mptc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Multicast Packets Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ptc64,
|
2014-10-10 16:36:25 +00:00
|
|
|
"64 byte frames transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ptc127,
|
2010-06-16 17:36:53 +00:00
|
|
|
"65-127 byte frames transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ptc255,
|
2010-06-16 17:36:53 +00:00
|
|
|
"128-255 byte frames transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ptc511,
|
2010-06-16 17:36:53 +00:00
|
|
|
"256-511 byte frames transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ptc1023,
|
2010-06-16 17:36:53 +00:00
|
|
|
"512-1023 byte frames transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ptc1522,
|
2010-06-16 17:36:53 +00:00
|
|
|
"1024-1522 byte frames transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->tsctc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"TSO Contexts Transmitted");
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->tsctfc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"TSO Contexts Failed");
|
|
|
|
|
|
|
|
|
|
|
|
/* Interrupt Stats */
|
|
|
|
|
|
|
|
int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
|
|
|
|
CTLFLAG_RD, NULL, "Interrupt Statistics");
|
|
|
|
int_list = SYSCTL_CHILDREN(int_node);
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->iac,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Assertion Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->icrxptc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Rx Pkt Timer Expire Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->icrxatc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Rx Abs Timer Expire Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ictxptc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Tx Pkt Timer Expire Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ictxatc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Tx Abs Timer Expire Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ictxqec,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Tx Queue Empty Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->ictxqmtc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Tx Queue Min Thresh Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->icrxdmtc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Rx Desc Min Thresh Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->icrxoc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Interrupt Cause Receiver Overrun Count");
|
|
|
|
|
|
|
|
/* Host to Card Stats */
|
|
|
|
|
|
|
|
host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host",
|
|
|
|
CTLFLAG_RD, NULL,
|
|
|
|
"Host to Card Statistics");
|
|
|
|
|
|
|
|
host_list = SYSCTL_CHILDREN(host_node);
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->cbtmpc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Circuit Breaker Tx Packet Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->htdpmc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Host Transmit Discarded Packets");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->rpthc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Rx Packets To Host");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->cbrmpc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Circuit Breaker Rx Packet Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->cbrdpc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Circuit Breaker Rx Dropped Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->hgptc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Host Good Packets Tx Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->htcbdpc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Host Tx Circuit Breaker Dropped Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->hgorc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Host Good Octets Received Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->hgotc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Host Good Octets Transmit Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->lenerrs,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Length Errors");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->scvpc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"SerDes/SGMII Code Violation Pkt Count");
|
|
|
|
|
2011-03-18 18:54:00 +00:00
|
|
|
SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed",
|
2010-06-30 21:05:51 +00:00
|
|
|
CTLFLAG_RD, &stats->hrmpc,
|
2010-06-16 17:36:53 +00:00
|
|
|
"Header Redirection Missed Packet Count");
|
2010-06-30 17:26:47 +00:00
|
|
|
}
|
2010-06-16 17:36:53 +00:00
|
|
|
|
2008-02-29 21:50:11 +00:00
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
*
|
|
|
|
* This routine provides a way to dump out the adapter eeprom,
|
|
|
|
* often a useful debug/service tool. This only dumps the first
|
|
|
|
* 32 words, stuff that matters is in that extent.
|
|
|
|
*
|
|
|
|
**********************************************************************/
|
|
|
|
static int
|
2010-06-16 17:36:53 +00:00
|
|
|
igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
|
|
|
struct adapter *adapter;
|
|
|
|
int error;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
result = -1;
|
|
|
|
error = sysctl_handle_int(oidp, &result, 0, req);
|
|
|
|
|
|
|
|
if (error || !req->newptr)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This value will cause a hex dump of the
|
|
|
|
* first 32 16-bit words of the EEPROM to
|
|
|
|
* the screen.
|
|
|
|
*/
|
2010-06-16 17:36:53 +00:00
|
|
|
if (result == 1) {
|
2008-02-29 21:50:11 +00:00
|
|
|
adapter = (struct adapter *)arg1;
|
|
|
|
igb_print_nvm_info(adapter);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
static void
|
|
|
|
igb_print_nvm_info(struct adapter *adapter)
|
2008-02-29 21:50:11 +00:00
|
|
|
{
|
2010-06-16 17:36:53 +00:00
|
|
|
u16 eeprom_data;
|
|
|
|
int i, j, row = 0;
|
2008-02-29 21:50:11 +00:00
|
|
|
|
2010-06-16 17:36:53 +00:00
|
|
|
/* Its a bit crude, but it gets the job done */
|
|
|
|
printf("\nInterface EEPROM Dump:\n");
|
|
|
|
printf("Offset\n0x0000 ");
|
|
|
|
for (i = 0, j = 0; i < 32; i++, j++) {
|
|
|
|
if (j == 8) { /* Make the offset block */
|
|
|
|
j = 0; ++row;
|
|
|
|
printf("\n0x00%x0 ",row);
|
|
|
|
}
|
|
|
|
e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
|
|
|
|
printf("%04x ", eeprom_data);
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
2010-06-16 17:36:53 +00:00
|
|
|
printf("\n");
|
2008-02-29 21:50:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2011-03-18 18:54:00 +00:00
|
|
|
igb_set_sysctl_value(struct adapter *adapter, const char *name,
|
2008-02-29 21:50:11 +00:00
|
|
|
const char *description, int *limit, int value)
|
|
|
|
{
|
|
|
|
*limit = value;
|
|
|
|
SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
|
|
|
|
SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
|
|
|
|
OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
|
|
|
|
}
|
2011-03-18 18:54:00 +00:00
|
|
|
|
2011-04-05 21:55:43 +00:00
|
|
|
/*
|
|
|
|
** Set flow control using sysctl:
|
|
|
|
** Flow control values:
|
|
|
|
** 0 - off
|
|
|
|
** 1 - rx pause
|
|
|
|
** 2 - tx pause
|
|
|
|
** 3 - full
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
igb_set_flowcntl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
2011-12-10 07:08:52 +00:00
|
|
|
int error;
|
|
|
|
static int input = 3; /* default is full */
|
|
|
|
struct adapter *adapter = (struct adapter *) arg1;
|
2011-04-05 21:55:43 +00:00
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
error = sysctl_handle_int(oidp, &input, 0, req);
|
2011-04-05 21:55:43 +00:00
|
|
|
|
2011-06-20 22:59:29 +00:00
|
|
|
if ((error) || (req->newptr == NULL))
|
2011-04-05 21:55:43 +00:00
|
|
|
return (error);
|
|
|
|
|
2011-12-10 07:08:52 +00:00
|
|
|
switch (input) {
|
2011-04-05 21:55:43 +00:00
|
|
|
case e1000_fc_rx_pause:
|
|
|
|
case e1000_fc_tx_pause:
|
|
|
|
case e1000_fc_full:
|
|
|
|
case e1000_fc_none:
|
2011-12-10 07:08:52 +00:00
|
|
|
adapter->hw.fc.requested_mode = input;
|
|
|
|
adapter->fc = input;
|
|
|
|
break;
|
2011-04-05 21:55:43 +00:00
|
|
|
default:
|
2011-12-10 07:08:52 +00:00
|
|
|
/* Do nothing */
|
|
|
|
return (error);
|
2011-04-05 21:55:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
|
|
|
|
e1000_force_mac_fc(&adapter->hw);
|
2014-09-15 19:53:49 +00:00
|
|
|
/* XXX TODO: update DROP_EN on each RX queue if appropriate */
|
2011-06-20 22:59:29 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
** Manage DMA Coalesce:
|
|
|
|
** Control values:
|
|
|
|
** 0/1 - off/on
|
|
|
|
** Legal timer values are:
|
|
|
|
** 250,500,1000-10000 in thousands
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
igb_sysctl_dmac(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = (struct adapter *) arg1;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = sysctl_handle_int(oidp, &adapter->dmac, 0, req);
|
|
|
|
|
|
|
|
if ((error) || (req->newptr == NULL))
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
switch (adapter->dmac) {
|
|
|
|
case 0:
|
|
|
|
/*Disabling */
|
|
|
|
break;
|
|
|
|
case 1: /* Just enable and use default */
|
|
|
|
adapter->dmac = 1000;
|
|
|
|
break;
|
|
|
|
case 250:
|
|
|
|
case 500:
|
|
|
|
case 1000:
|
|
|
|
case 2000:
|
|
|
|
case 3000:
|
|
|
|
case 4000:
|
|
|
|
case 5000:
|
|
|
|
case 6000:
|
|
|
|
case 7000:
|
|
|
|
case 8000:
|
|
|
|
case 9000:
|
|
|
|
case 10000:
|
|
|
|
/* Legal values - allow */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Do nothing, illegal value */
|
|
|
|
adapter->dmac = 0;
|
2013-10-09 17:32:52 +00:00
|
|
|
return (EINVAL);
|
2011-06-20 22:59:29 +00:00
|
|
|
}
|
|
|
|
/* Reinit the interface */
|
|
|
|
igb_init(adapter);
|
|
|
|
return (error);
|
2011-04-05 21:55:43 +00:00
|
|
|
}
|
2012-07-07 20:21:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
** Manage Energy Efficient Ethernet:
|
|
|
|
** Control values:
|
|
|
|
** 0/1 - enabled/disabled
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
igb_sysctl_eee(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = (struct adapter *) arg1;
|
|
|
|
int error, value;
|
|
|
|
|
|
|
|
value = adapter->hw.dev_spec._82575.eee_disable;
|
|
|
|
error = sysctl_handle_int(oidp, &value, 0, req);
|
|
|
|
if (error || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
IGB_CORE_LOCK(adapter);
|
|
|
|
adapter->hw.dev_spec._82575.eee_disable = (value != 0);
|
|
|
|
igb_init_locked(adapter);
|
|
|
|
IGB_CORE_UNLOCK(adapter);
|
|
|
|
return (0);
|
|
|
|
}
|