diff --git a/sys/conf/files b/sys/conf/files index 6203bc4ccd39..6e9b67ab6c9f 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -850,6 +850,8 @@ dev/ixgb/ixgb_ee.c optional ixgb dev/ixgb/ixgb_hw.c optional ixgb dev/ixgbe/ixgbe.c optional ixgbe \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" +dev/ixgbe/tcp_lro.c optional ixgbe \ + compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_phy.c optional ixgbe \ compile-with "${NORMAL_C} -I$S/dev/ixgbe" dev/ixgbe/ixgbe_api.c optional ixgbe \ diff --git a/sys/dev/ixgbe/LICENSE b/sys/dev/ixgbe/LICENSE index ab2250974614..b6fd3b77edb3 100644 --- a/sys/dev/ixgbe/LICENSE +++ b/sys/dev/ixgbe/LICENSE @@ -1,6 +1,6 @@ -$FreeBSD$ +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,3 +29,5 @@ $FreeBSD$ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +******************************************************************************/ +/*$FreeBSD$*/ diff --git a/sys/dev/ixgbe/README b/sys/dev/ixgbe/README new file mode 100644 index 000000000000..40bbe5fede58 --- /dev/null +++ b/sys/dev/ixgbe/README @@ -0,0 +1,278 @@ +FreeBSD Driver for 10 Gigabit PCI Express Server Adapters +============================================= +$FreeBSD$ + + +Contents +======== + +- Overview +- Supported Adapters +- Building and Installation +- Additional Configurations +- Known Limitations + + +Overview +======== + +This file describes the FreeBSD* driver for the 10 Gigabit PCIE Family of +Adapters. Drivers has been developed for use with FreeBSD 7 or later. + +For questions related to hardware requirements, refer to the documentation +supplied with your Intel 10GbE adapter. All hardware requirements listed +apply to use with FreeBSD. + + +Supported Adapters +================== + +The following Intel network adapters are compatible with the drivers in this +release: + +Controller Adapter Name Physical Layer +---------- ------------ -------------- +82598EB Intel(R) 10 Gigabit XF SR/AF 10G Base -LR (850 nm optical fiber) + Dual Port Server Adapter 10G Base -SR (1310 nm optical fiber) +82598EB Intel(R) 10 Gigabit XF SR/LR + Server Adapter + Intel(R) 82598EB 10 Gigabit AF + Network Connection + Intel(R) 82598EB 10 Gigabit AT + CX4 Network Connection + + +Building and Installation +========================= + +NOTE: You must have kernel sources installed in order to compile the driver + module. + + In the instructions below, x.x.x is the driver version as indicated in + the name of the driver tar. + +1. Move the base driver tar file to the directory of your choice. For + example, use /home/username/ixgbe or /usr/local/src/ixgbe. + +2. Untar/unzip the archive: + tar xfz ixgbe-x.x.x.tar.gz + +3. To install man page: + cd ixgbe-x.x.x + gzip -c ixgbe.4 > /usr/share/man/man4/ixgbee.4.gz + +4. To load the driver onto a running system: + cd ixgbe-x.x.x/src + make load + +5. To assign an IP address to the interface, enter the following: + ifconfig ix + +6. Verify that the interface works. Enter the following, where + is the IP address for another machine on the same subnet as the interface + that is being tested: + ping + +7. If you want the driver to load automatically when the system is booted: + + cd ixgbe-x.x.x/src + make + make install + + Edit /boot/loader.conf, and add the following line: + ixgbe_load="YES" + + OR + + compile the driver into the kernel (see item 8). + + + Edit /etc/rc.conf, and create the appropriate ifconfig_ixgbe + entry: + + ifconfig_ix="" + + Example usage: + + ifconfig_ix0="inet 192.168.10.1 netmask 255.255.255.0" + + NOTE: For assistance, see the ifconfig man page. + +8. If you want to compile the driver into the kernel, enter: + + FreeBSD 7 or later: + + cd ixgbe-x.x.x/src + + cp *.[ch] /usr/src/sys/dev/ixgbe + + cp Makefile.kernel /usr/src/sys/modules/ixgbe/Makefile + + Edit the kernel configuration file (i.e., GENERIC or MYKERNEL) in + /usr/src/sys/i386/conf (replace "i386" with the appropriate system + architecture if necessary), and ensure the following line is present: + + device ixgbe + + Compile and install the kernel. The system must be reboot for the kernel + updates to take affect. For additional information on compiling the kernel, + consult the FreeBSD operating system documentation. + + +Configuration and Tuning +========================= + +The driver supports Transmit/Receive Checksum Offload and Jumbo Frames on +all 10 Gigabit adapters. + + Jumbo Frames + ------------ + To enable Jumbo Frames, use the ifconfig utility to increase the MTU + beyond 1500 bytes. + + NOTES: + + - The Jumbo Frames setting on the switch must be set to at least + 22 bytes larger than that of the adapter. + + - There are known performance issues with this driver when running + UDP traffic with Jumbo Frames. + + The Jumbo Frames MTU range for Intel Adapters is 1500 to 16114. The default + MTU range is 1500. To modify the setting, enter the following: + + ifconfig ix mtu 9000 + + To confirm an interface's MTU value, use the ifconfig command. To confirm + the MTU used between two specific devices, use: + + route get + + VLANs + ----- + To create a new VLAN pseudo-interface: + + ifconfig create + + To associate the VLAN pseudo-interface with a physical interface and + assign a VLAN ID, IP address, and netmask: + + ifconfig netmask vlan + vlandev + + Example: + + ifconfig vlan10 10.0.0.1 netmask 255.255.255.0 vlan 10 vlandev ixgbe0 + + In this example, all packets will be marked on egress with 802.1Q VLAN + tags, specifying a VLAN ID of 10. + + To remove a VLAN pseudo-interface: + + ifconfig destroy + + + Checksum Offload + ---------------- + + Checksum offloading supports both TCP and UDP packets and is + supported for both transmit and receive. + + Checksum offloading can be enabled or disabled using ifconfig. + Both transmit and receive offloading will be either enabled or + disabled together. You cannot enable/disable one without the other. + + To enable checksum offloading: + + ifconfig rxcsum + + To disable checksum offloading: + + ifconfig -rxcsum + + To confirm the current setting: + + ifconfig + + + TSO + --- + + To disable: + + ifconfig -tso + + To re-enable: + + ifconfig tso + + LRO + ___ + + Large Receive Offload is available in version 1.4.4, it is on + by default. It can be toggled off and on by using: + sysctl dev.ix.X.enable_lro=[0,1] + + NOTE: when changing this feature you MUST be sure the interface + is reinitialized, it is easy to do this with ifconfig down/up. + The LRO code will ultimately move into the kernel stack code, + but for this first release it was included with the driver. + + Important system configuration changes: + --------------------------------------- + + When there is a choice run on a 64bit OS rather than 32, it makes + a significant difference in improvement. + + The default scheduler SCHED_4BSD is not smart about SMP locality issues. + Significant improvement can be achieved by switching to the ULE scheduler. + + This is done by changing the entry in the config file from SCHED_4BSD to + SCHED_ULE. Note that this is only advisable on FreeBSD 7, on 6.X there have + been stability problems with ULE. + + Change the file /etc/sysctl.conf, add the line: + + hw.intr_storm_threshold: 8000 (the default is 1000) + + Best throughput results are seen with a large MTU; use 9000 if possible. + + The default number of descriptors is 256, increasing this to 1024 or even + 2048 may improve performance. + + +Known Limitations +================= + Under small packets UDP stress test with 10GbE driver, the FreeBSD system + will drop UDP packets due to the fullness of socket buffers. You may want + to change the driver's Flow Control variables to the minimum value for + controlling packet reception. + + +Support +======= + +For general information and support, go to the Intel support website at: + + http://support.intel.com + +If an issue is identified with the released source code on the supported +kernel with a supported adapter, email the specific information related to +the issue to freebsdnic@mailbox.intel.com. + + + +License +======= + +This software program is released under the terms of a license agreement +between you ('Licensee') and Intel. Do not use or load this software or any +associated materials (collectively, the 'Software') until you have carefully +read the full terms and conditions of the LICENSE located in this software +package. By loading or using the Software, you agree to the terms of this +Agreement. If you do not agree with the terms of this Agreement, do not +install or use the Software. + +* Other names and brands may be claimed as the property of others. + + diff --git a/sys/dev/ixgbe/ixgbe.c b/sys/dev/ixgbe/ixgbe.c index fbb24d7ecb53..9055fc29f641 100644 --- a/sys/dev/ixgbe/ixgbe.c +++ b/sys/dev/ixgbe/ixgbe.c @@ -1,36 +1,36 @@ -/******************************************************************************* +/****************************************************************************** -Copyright (c) 2001-2007, Intel Corporation -All rights reserved. + Copyright (c) 2001-2008, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -***************************************************************************/ -/* $FreeBSD$ */ +******************************************************************************/ +/*$FreeBSD$*/ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" @@ -46,7 +46,7 @@ int ixgbe_display_debug_stats = 0; /********************************************************************* * Driver version *********************************************************************/ -char ixgbe_driver_version[] = "1.2.6"; +char ixgbe_driver_version[] = "1.4.4"; /********************************************************************* * PCI Device ID Table @@ -62,7 +62,11 @@ static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = { {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT_DUAL_PORT, 0, 0, 0}, {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; @@ -83,7 +87,7 @@ static int ixgbe_attach(device_t); static int ixgbe_detach(device_t); static int ixgbe_shutdown(device_t); static void ixgbe_start(struct ifnet *); -static void ixgbe_start_locked(struct ifnet *); +static void ixgbe_start_locked(struct tx_ring *, struct ifnet *); static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); static void ixgbe_watchdog(struct adapter *); static void ixgbe_init(void *); @@ -93,15 +97,15 @@ static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); static int ixgbe_media_change(struct ifnet *); static void ixgbe_identify_hardware(struct adapter *); static int ixgbe_allocate_pci_resources(struct adapter *); -static void ixgbe_free_pci_resources(struct adapter *); +static int ixgbe_allocate_msix(struct adapter *); +static int ixgbe_allocate_legacy(struct adapter *); +static int ixgbe_allocate_queues(struct adapter *); +static int ixgbe_setup_msix(struct adapter *); +static void ixgbe_free_pci_resources(struct adapter *); static void ixgbe_local_timer(void *); static int ixgbe_hardware_init(struct adapter *); static void ixgbe_setup_interface(device_t, struct adapter *); -static int ixgbe_allocate_queues(struct adapter *); -static int ixgbe_allocate_msix_resources(struct adapter *); -#if __FreeBSD_version >= 700000 -static int ixgbe_setup_msix(struct adapter *); -#endif + static int ixgbe_allocate_transmit_buffers(struct tx_ring *); static int ixgbe_setup_transmit_structures(struct adapter *); static void ixgbe_setup_transmit_ring(struct tx_ring *); @@ -120,8 +124,8 @@ static void ixgbe_enable_intr(struct adapter *); static void ixgbe_disable_intr(struct adapter *); static void ixgbe_update_stats_counters(struct adapter *); static bool ixgbe_txeof(struct tx_ring *); -static int ixgbe_rxeof(struct rx_ring *, int); -static void ixgbe_rx_checksum(struct adapter *, uint32_t, struct mbuf *); +static bool ixgbe_rxeof(struct rx_ring *, int); +static void ixgbe_rx_checksum(struct adapter *, u32, struct mbuf *); static void ixgbe_set_promisc(struct adapter *); static void ixgbe_disable_promisc(struct adapter *); static void ixgbe_set_multi(struct adapter *); @@ -129,8 +133,8 @@ static void ixgbe_print_hw_stats(struct adapter *); static void ixgbe_print_debug_info(struct adapter *); static void ixgbe_update_link_status(struct adapter *); static int ixgbe_get_buf(struct rx_ring *, int); -static void ixgbe_enable_vlans(struct adapter * adapter); -static int ixgbe_encap(struct adapter *, struct mbuf **); +static void ixgbe_enable_hw_vlans(struct adapter * adapter); +static int ixgbe_xmit(struct tx_ring *, struct mbuf **); static int ixgbe_sysctl_stats(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_debug(SYSCTL_HANDLER_ARGS); static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS); @@ -139,24 +143,23 @@ static int ixgbe_dma_malloc(struct adapter *, bus_size_t, static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); static void ixgbe_add_rx_process_limit(struct adapter *, const char *, const char *, int *, int); -static boolean_t ixgbe_tx_csum_setup(struct tx_ring *, struct mbuf *); +static boolean_t ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *); static boolean_t ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *); static void ixgbe_set_ivar(struct adapter *, u16, u8); static void ixgbe_configure_ivars(struct adapter *); +static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); + +/* Legacy (single vector interrupt handler */ +static void ixgbe_legacy_irq(void *); -/* Legacy Fast Interrupt routine and handlers */ -#if __FreeBSD_version >= 700000 -static int ixgbe_fast_irq(void *); /* The MSI/X Interrupt handlers */ static void ixgbe_msix_tx(void *); static void ixgbe_msix_rx(void *); static void ixgbe_msix_link(void *); -#else -static void ixgbe_fast_irq(void *); -#endif -static void ixgbe_rxtx(void *context, int pending); -static void ixgbe_link(void *context, int pending); +static void ixgbe_handle_tx(void *context, int pending); +static void ixgbe_handle_rx(void *context, int pending); +static void ixgbe_handle_link(void *context, int pending); #ifndef NO_82598_A0_SUPPORT static void desc_flip(void *); @@ -197,18 +200,31 @@ TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit); static int ixgbe_flow_control = 3; TUNABLE_INT("hw.ixgbe.flow_control", &ixgbe_flow_control); -/* Number of TX Queues, note multi tx is not working */ +/* + * Should the driver do LRO on the RX end + * this can be toggled on the fly, but the + * interface must be reset (down/up) for it + * to take effect. + */ +static int ixgbe_enable_lro = 1; +TUNABLE_INT("hw.ixgbe.enable_lro", &ixgbe_enable_lro); + +/* + * MSIX should be the default for best performance, + * but this allows it to be forced off for testing. + */ +static int ixgbe_enable_msix = 1; +TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix); + +/* + * Number of TX/RX Queues, with 0 setting + * it autoconfigures to the number of cpus. + */ static int ixgbe_tx_queues = 1; TUNABLE_INT("hw.ixgbe.tx_queues", &ixgbe_tx_queues); - -/* Number of RX Queues */ -static int ixgbe_rx_queues = 8; +static int ixgbe_rx_queues = 1; TUNABLE_INT("hw.ixgbe.rx_queues", &ixgbe_rx_queues); -/* Number of Other Queues, this is used for link interrupts */ -static int ixgbe_other_queues = 1; -TUNABLE_INT("hw.ixgbe.other_queues", &ixgbe_other_queues); - /* Number of TX descriptors per ring */ static int ixgbe_txd = DEFAULT_TXD; TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd); @@ -220,6 +236,9 @@ TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd); /* Total number of Interfaces - need for config sanity check */ static int ixgbe_total_ports; +/* Optics type of this interface */ +static int ixgbe_optics; + /********************************************************************* * Device identification routine * @@ -264,10 +283,30 @@ ixgbe_probe(device_t dev) ixgbe_strings[ent->index], ixgbe_driver_version); switch (pci_device_id) { + case IXGBE_DEV_ID_82598AT_DUAL_PORT : + ixgbe_total_ports += 2; + break; + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT : + ixgbe_optics = IFM_10G_CX4; + ixgbe_total_ports += 2; + break; case IXGBE_DEV_ID_82598AF_DUAL_PORT : + ixgbe_optics = IFM_10G_SR; ixgbe_total_ports += 2; break; case IXGBE_DEV_ID_82598AF_SINGLE_PORT : + ixgbe_optics = IFM_10G_SR; + ixgbe_total_ports += 1; + break; + case IXGBE_DEV_ID_82598EB_XF_LR : + ixgbe_optics = IFM_10G_LR; + ixgbe_total_ports += 1; + break; + case IXGBE_DEV_ID_82598EB_CX4 : + ixgbe_optics = IFM_10G_CX4; + ixgbe_total_ports += 1; + break; + case IXGBE_DEV_ID_82598AT : ixgbe_total_ports += 1; default: break; @@ -296,18 +335,16 @@ ixgbe_attach(device_t dev) { struct adapter *adapter; int error = 0; - uint32_t ctrl_ext; - char name_string[16]; + u32 ctrl_ext; INIT_DEBUGOUT("ixgbe_attach: begin"); /* Allocate, clear, and link in our adapter structure */ adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; - /* General Lock Init*/ - snprintf(name_string, sizeof(name_string), "%s:core", - device_get_nameunit(dev)); - mtx_init(&adapter->core_mtx, name_string, NULL, MTX_DEF); + + /* Core Lock Init*/ + IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); /* SYSCTL APIs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), @@ -325,6 +362,11 @@ ixgbe_attach(device_t dev) OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW, adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control"); + SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "enable_lro", CTLTYPE_INT|CTLFLAG_RW, + &ixgbe_enable_lro, 1, "Large Receive Offload"); + /* Set up the timer callout */ callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); @@ -378,28 +420,27 @@ ixgbe_attach(device_t dev) goto err_out; } -#if __FreeBSD_version >= 700000 - if (adapter->msix) { - error = ixgbe_setup_msix(adapter); - if (error) - goto err_out; - } -#endif - /* Initialize the shared code */ if (ixgbe_init_shared_code(&adapter->hw)) { device_printf(dev,"Unable to initialize the shared code\n"); error = EIO; - goto err_out; + goto err_late; } /* Initialize the hardware */ if (ixgbe_hardware_init(adapter)) { device_printf(dev,"Unable to initialize the hardware\n"); error = EIO; - goto err_out; + goto err_late; } + if ((adapter->msix > 1) && (ixgbe_enable_msix)) + error = ixgbe_allocate_msix(adapter); + else + error = ixgbe_allocate_legacy(adapter); + if (error) + goto err_late; + /* Setup OS specific network interface */ ixgbe_setup_interface(dev, adapter); @@ -418,7 +459,9 @@ ixgbe_attach(device_t dev) INIT_DEBUGOUT("ixgbe_attach: end"); return (0); - +err_late: + ixgbe_free_transmit_structures(adapter); + ixgbe_free_receive_structures(adapter); err_out: ixgbe_free_pci_resources(adapter); return (error); @@ -439,6 +482,8 @@ static int ixgbe_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); + struct tx_ring *txr = adapter->tx_rings; + struct rx_ring *rxr = adapter->rx_rings; u32 ctrl_ext; INIT_DEBUGOUT("ixgbe_detach: begin"); @@ -453,17 +498,23 @@ ixgbe_detach(device_t dev) return (EBUSY); } - mtx_lock(&adapter->core_mtx); + IXGBE_CORE_LOCK(adapter); ixgbe_stop(adapter); - mtx_unlock(&adapter->core_mtx); + IXGBE_CORE_UNLOCK(adapter); - if (adapter->tq != NULL) { - taskqueue_drain(adapter->tq, &adapter->rxtx_task); - taskqueue_drain(taskqueue_fast, &adapter->link_task); - taskqueue_free(adapter->tq); - adapter->tq = NULL; + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) { + taskqueue_drain(txr->tq, &txr->tx_task); + taskqueue_free(txr->tq); + txr->tq = NULL; } + for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) { + taskqueue_drain(rxr->tq, &rxr->rx_task); + taskqueue_free(rxr->tq); + rxr->tq = NULL; + } + taskqueue_drain(taskqueue_fast, &adapter->link_task); + /* let hardware know driver is unloading */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; @@ -478,7 +529,7 @@ ixgbe_detach(device_t dev) ixgbe_free_transmit_structures(adapter); ixgbe_free_receive_structures(adapter); - mtx_destroy(&adapter->core_mtx); + IXGBE_CORE_LOCK_DESTROY(adapter); return (0); } @@ -492,9 +543,9 @@ static int ixgbe_shutdown(device_t dev) { struct adapter *adapter = device_get_softc(dev); - mtx_lock(&adapter->core_mtx); + IXGBE_CORE_LOCK(adapter); ixgbe_stop(adapter); - mtx_unlock(&adapter->core_mtx); + IXGBE_CORE_UNLOCK(adapter); return (0); } @@ -510,12 +561,12 @@ ixgbe_shutdown(device_t dev) **********************************************************************/ static void -ixgbe_start_locked(struct ifnet * ifp) +ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp) { struct mbuf *m_head; - struct adapter *adapter = ifp->if_softc; + struct adapter *adapter = txr->adapter; - mtx_assert(&adapter->tx_mtx, MA_OWNED); + IXGBE_TX_LOCK_ASSERT(txr); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) @@ -529,7 +580,7 @@ ixgbe_start_locked(struct ifnet * ifp) if (m_head == NULL) break; - if (ixgbe_encap(adapter, &m_head)) { + if (ixgbe_xmit(txr, &m_head)) { if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; @@ -540,21 +591,37 @@ ixgbe_start_locked(struct ifnet * ifp) ETHER_BPF_MTAP(ifp, m_head); /* Set timeout in case hardware has problems transmitting */ - adapter->watchdog_timer = IXGBE_TX_TIMEOUT; + txr->watchdog_timer = IXGBE_TX_TIMEOUT; } return; } + static void ixgbe_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + u32 queue = 0; - mtx_lock(&adapter->tx_mtx); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - ixgbe_start_locked(ifp); - mtx_unlock(&adapter->tx_mtx); + /* + ** This is really just here for testing + ** TX multiqueue, ultimately what is + ** needed is the flow support in the stack + ** and appropriate logic here to deal with + ** it. -jfv + */ + if (adapter->num_tx_queues > 1) + queue = (curcpu % adapter->num_tx_queues); + + txr = &adapter->tx_rings[queue]; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_TX_LOCK(txr); + ixgbe_start_locked(txr, ifp); + IXGBE_TX_UNLOCK(txr); + } return; } @@ -581,9 +648,9 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) if (ifa->ifa_addr->sa_family == AF_INET) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { - mtx_lock(&adapter->core_mtx); + IXGBE_CORE_LOCK(adapter); ixgbe_init_locked(adapter); - mtx_unlock(&adapter->core_mtx); + IXGBE_CORE_UNLOCK(adapter); } arp_ifinit(ifp, ifa); } else @@ -594,21 +661,21 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) { error = EINVAL; } else { - mtx_lock(&adapter->core_mtx); + IXGBE_CORE_LOCK(adapter); ifp->if_mtu = ifr->ifr_mtu; adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ixgbe_init_locked(adapter); - mtx_unlock(&adapter->core_mtx); + IXGBE_CORE_UNLOCK(adapter); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); - mtx_lock(&adapter->core_mtx); + IXGBE_CORE_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ adapter->if_flags) & - IFF_PROMISC) { + (IFF_PROMISC | IFF_ALLMULTI)) { ixgbe_disable_promisc(adapter); ixgbe_set_promisc(adapter); } @@ -618,17 +685,17 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixgbe_stop(adapter); adapter->if_flags = ifp->if_flags; - mtx_unlock(&adapter->core_mtx); + IXGBE_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - mtx_lock(&adapter->core_mtx); + IXGBE_CORE_LOCK(adapter); ixgbe_disable_intr(adapter); ixgbe_set_multi(adapter); ixgbe_enable_intr(adapter); - mtx_unlock(&adapter->core_mtx); + IXGBE_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: @@ -665,13 +732,18 @@ ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) /********************************************************************* * Watchdog entry point * - * This routine is called whenever hardware quits transmitting. + * This routine is called by the local timer + * to detect hardware hangs . * **********************************************************************/ static void ixgbe_watchdog(struct adapter *adapter) { + device_t dev = adapter->dev; + struct tx_ring *txr = adapter->tx_rings; + struct ixgbe_hw *hw = &adapter->hw; + bool tx_hang = FALSE; mtx_assert(&adapter->core_mtx, MA_OWNED); @@ -682,22 +754,36 @@ ixgbe_watchdog(struct adapter *adapter) * Finally, anytime all descriptors are clean the timer is * set to 0. */ - if (adapter->watchdog_timer == 0 || --adapter->watchdog_timer) - return; + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) { + if (txr->watchdog_timer == 0 || --txr->watchdog_timer) + continue; + else { + tx_hang = TRUE; + break; + } + } + if (tx_hang == FALSE) + return; /* * If we are in this routine because of pause frames, then don't * reset the hardware. */ - if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) { - adapter->watchdog_timer = IXGBE_TX_TIMEOUT; + if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) { + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) + txr->watchdog_timer = IXGBE_TX_TIMEOUT; return; } device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); - ixgbe_print_debug_info(adapter); - + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) { + device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i, + IXGBE_READ_REG(hw, IXGBE_TDH(i)), + IXGBE_READ_REG(hw, IXGBE_TDT(i))); + device_printf(dev,"TX(%d) desc avail = %d, Next TX to Clean = %d\n", + i, txr->tx_avail, txr->next_tx_to_clean); + } adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; @@ -722,10 +808,12 @@ ixgbe_init_locked(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; - u32 txdctl, rxdctl, mhadd; + struct ixgbe_hw *hw; + u32 txdctl, rxdctl, mhadd, gpie; INIT_DEBUGOUT("ixgbe_init: begin"); + hw = &adapter->hw; mtx_assert(&adapter->core_mtx, MA_OWNED); ixgbe_stop(adapter); @@ -743,7 +831,7 @@ ixgbe_init_locked(struct adapter *adapter) } if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) - ixgbe_enable_vlans(adapter); + ixgbe_enable_hw_vlans(adapter); /* Prepare transmit descriptors and buffers */ if (ixgbe_setup_transmit_structures(adapter)) { @@ -774,16 +862,17 @@ ixgbe_init_locked(struct adapter *adapter) /* Configure RX settings */ ixgbe_initialize_receive_units(adapter); - /* Enable Enhanced MSIX mode */ + gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); + /* Enable Fan Failure Interrupt */ + if (adapter->hw.phy.media_type == ixgbe_media_type_copper) + gpie |= IXGBE_SDP1_GPIEN; if (adapter->msix) { - u32 gpie; - gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); + /* Enable Enhanced MSIX mode */ gpie |= IXGBE_GPIE_MSIX_MODE; gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); - gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); } + IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); /* Set the various hardware offload abilities */ ifp->if_hwassist = 0; @@ -805,11 +894,15 @@ ixgbe_init_locked(struct adapter *adapter) for (int i = 0; i < adapter->num_tx_queues; i++) { txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i)); txdctl |= IXGBE_TXDCTL_ENABLE; + /* Set WTHRESH to 8, burst writeback */ + txdctl |= (8 << 16); IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl); } for (int i = 0; i < adapter->num_rx_queues; i++) { rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i)); + /* PTHRESH set to 32 */ + rxdctl |= 0x0020; rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl); } @@ -833,56 +926,64 @@ ixgbe_init(void *arg) { struct adapter *adapter = arg; - mtx_lock(&adapter->core_mtx); + IXGBE_CORE_LOCK(adapter); ixgbe_init_locked(adapter); - mtx_unlock(&adapter->core_mtx); + IXGBE_CORE_UNLOCK(adapter); return; } +/* +** Deferred Interrupt Handlers +*/ + static void -ixgbe_link(void *context, int pending) +ixgbe_handle_link(void *context, int pending) { struct adapter *adapter = context; struct ifnet *ifp = adapter->ifp; - mtx_lock(&adapter->core_mtx); - if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { - mtx_unlock(&adapter->core_mtx); - return; - } + IXGBE_CORE_LOCK(adapter); + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + goto out; callout_stop(&adapter->timer); ixgbe_update_link_status(adapter); callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); - mtx_unlock(&adapter->core_mtx); +out: + IXGBE_CORE_UNLOCK(adapter); + return; } -/* -** MSI and Legacy Deferred Handler -** - note this runs without the general lock -*/ static void -ixgbe_rxtx(void *context, int pending) +ixgbe_handle_rx(void *context, int pending) { - struct adapter *adapter = context; - struct ifnet *ifp = adapter->ifp; - /* For legacy there is only one of each */ - struct rx_ring *rxr = adapter->rx_rings; - struct tx_ring *txr = adapter->tx_rings; + struct rx_ring *rxr = context; + struct adapter *adapter = rxr->adapter; + struct ifnet *ifp = adapter->ifp; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0) - taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); - mtx_lock(&adapter->tx_mtx); - ixgbe_txeof(txr); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0) + /* More to clean, schedule another task */ + taskqueue_enqueue(rxr->tq, &rxr->rx_task); - if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) - ixgbe_start_locked(ifp); - mtx_unlock(&adapter->tx_mtx); - } +} - ixgbe_enable_intr(adapter); +static void +ixgbe_handle_tx(void *context, int pending) +{ + struct tx_ring *txr = context; + struct adapter *adapter = txr->adapter; + struct ifnet *ifp = adapter->ifp; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_TX_LOCK(txr); + if (ixgbe_txeof(txr) != 0) + taskqueue_enqueue(txr->tq, &txr->tx_task); + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + ixgbe_start_locked(txr, ifp); + IXGBE_TX_UNLOCK(txr); + } } @@ -892,32 +993,41 @@ ixgbe_rxtx(void *context, int pending) * **********************************************************************/ -#if __FreeBSD_version >= 700000 -static int -#else static void -#endif -ixgbe_fast_irq(void *arg) +ixgbe_legacy_irq(void *arg) { - u32 reg_eicr; - struct adapter *adapter = arg; + u32 reg_eicr; + struct adapter *adapter = arg; + struct tx_ring *txr = adapter->tx_rings; + struct rx_ring *rxr = adapter->rx_rings; + struct ixgbe_hw *hw; + hw = &adapter->hw; reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); if (reg_eicr == 0) - return FILTER_STRAY; + return; - ixgbe_disable_intr(adapter); - taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); + if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0) + taskqueue_enqueue(rxr->tq, &rxr->rx_task); + if (ixgbe_txeof(txr) != 0) + taskqueue_enqueue(txr->tq, &txr->tx_task); + /* Check for fan failure */ + if ((hw->phy.media_type == ixgbe_media_type_copper) && + (reg_eicr & IXGBE_EICR_GPI_SDP1)) { + device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " + "REPLACE IMMEDIATELY!!\n"); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, + IXGBE_EICR_GPI_SDP1); + } /* Link status change */ if (reg_eicr & IXGBE_EICR_LSC) taskqueue_enqueue(taskqueue_fast, &adapter->link_task); - return FILTER_HANDLED; + return; } -#if __FreeBSD_version >= 700000 /********************************************************************* * * MSI TX Interrupt Service routine @@ -930,21 +1040,17 @@ ixgbe_msix_tx(void *arg) struct tx_ring *txr = arg; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; - uint32_t loop_cnt = MAX_INTR; - mtx_lock(&adapter->tx_mtx); - - while (loop_cnt > 0) { - if (__predict_false(!ixgbe_txeof(txr))) - break; - loop_cnt--; + ++txr->tx_irq; + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_TX_LOCK(txr); + if (ixgbe_txeof(txr) != 0) + taskqueue_enqueue(txr->tq, &txr->tx_task); + IXGBE_TX_UNLOCK(txr); } + /* Reenable this interrupt */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, txr->eims); - if (ifp->if_drv_flags & IFF_DRV_RUNNING && - ifp->if_snd.ifq_head != NULL) - ixgbe_start_locked(ifp); - ixgbe_enable_intr(adapter); - mtx_unlock(&adapter->tx_mtx); return; } @@ -960,37 +1066,41 @@ ixgbe_msix_rx(void *arg) struct rx_ring *rxr = arg; struct adapter *adapter = rxr->adapter; struct ifnet *ifp = adapter->ifp; - uint32_t loop = MAX_INTR; - - while ((loop-- > 0) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) - ixgbe_rxeof(rxr, adapter->rx_process_limit); - - ixgbe_enable_intr(adapter); + ++rxr->rx_irq; + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + if (ixgbe_rxeof(rxr, adapter->rx_process_limit) != 0) + taskqueue_enqueue(rxr->tq, &rxr->rx_task); + /* Reenable this interrupt */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->eims); + return; } static void ixgbe_msix_link(void *arg) { struct adapter *adapter = arg; - uint32_t reg_eicr; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_eicr; - mtx_lock(&adapter->core_mtx); + ++adapter->link_irq; - reg_eicr = IXGBE_READ_REG(&adapter->hw, IXGBE_EICR); + reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR); - if (reg_eicr & IXGBE_EICR_LSC) { - callout_stop(&adapter->timer); - ixgbe_update_link_status(adapter); - callout_reset(&adapter->timer, hz, - ixgbe_local_timer, adapter); + if (reg_eicr & IXGBE_EICR_LSC) + taskqueue_enqueue(taskqueue_fast, &adapter->link_task); + + /* Check for fan failure */ + if ((hw->phy.media_type == ixgbe_media_type_copper) && + (reg_eicr & IXGBE_EICR_GPI_SDP1)) { + device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " + "REPLACE IMMEDIATELY!!\n"); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1); } - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); - ixgbe_enable_intr(adapter); - mtx_unlock(&adapter->core_mtx); + return; } -#endif /* __FreeBSD_version >= 700000 */ + /********************************************************************* * @@ -1006,16 +1116,29 @@ ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) struct adapter *adapter = ifp->if_softc; INIT_DEBUGOUT("ixgbe_media_status: begin"); + IXGBE_CORE_LOCK(adapter); ixgbe_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; - if (!adapter->link_active) + if (!adapter->link_active) { + IXGBE_CORE_UNLOCK(adapter); return; + } ifmr->ifm_status |= IFM_ACTIVE; - ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; + + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_T | IFM_FDX; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= ixgbe_optics | IFM_FDX; + break; + } + + IXGBE_CORE_UNLOCK(adapter); return; } @@ -1039,6 +1162,17 @@ ixgbe_media_change(struct ifnet * ifp) if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); + switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_AUTO: + adapter->hw.mac.autoneg = TRUE; + adapter->hw.phy.autoneg_advertised = + IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL; + break; + default: + device_printf(adapter->dev, "Only auto media type\n"); + return (EINVAL); + } + return (0); } @@ -1052,8 +1186,9 @@ ixgbe_media_change(struct ifnet * ifp) **********************************************************************/ static int -ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) +ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp) { + struct adapter *adapter = txr->adapter; u32 olinfo_status = 0, cmd_type_len = 0; u32 paylen; int i, j, error, nsegs; @@ -1061,7 +1196,6 @@ ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) struct mbuf *m_head; bus_dma_segment_t segs[IXGBE_MAX_SCATTER]; bus_dmamap_t map; - struct tx_ring *txr = adapter->tx_rings; struct ixgbe_tx_buf *txbuf, *txbuf_mapped; union ixgbe_adv_tx_desc *txd = NULL; @@ -1084,7 +1218,7 @@ ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) ixgbe_txeof(txr); /* Make sure things have improved */ if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) { - adapter->no_tx_desc_avail1++; + txr->no_tx_desc_avail++; return (ENOBUFS); } } @@ -1142,9 +1276,9 @@ ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) /* Make certain there are enough descriptors */ if (nsegs > txr->tx_avail - 2) { - adapter->no_tx_desc_avail2++; + txr->no_tx_desc_avail++; error = ENOBUFS; - goto encap_fail; + goto xmit_fail; } m_head = *m_headp; @@ -1159,10 +1293,8 @@ ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; ++adapter->tso_tx; - } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { - if (ixgbe_tx_csum_setup(txr, m_head)) + } else if (ixgbe_tx_ctx_setup(txr, m_head)) olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; - } i = txr->next_avail_tx_desc; for (j = 0; j < nsegs; j++) { @@ -1184,7 +1316,6 @@ ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) i = 0; txbuf->m_head = NULL; - txbuf->next_eop = -1; /* ** we have to do this inside the loop right now ** because of the hardware workaround. @@ -1207,7 +1338,6 @@ ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) /* Set the index of the descriptor that will be marked done */ txbuf = &txr->tx_buffers[first]; - txbuf->next_eop = last; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); @@ -1216,9 +1346,10 @@ ixgbe_encap(struct adapter *adapter, struct mbuf **m_headp) * hardware that this frame is available to transmit. */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i); + ++txr->tx_packets; return (0); -encap_fail: +xmit_fail: bus_dmamap_unload(txr->txtag, txbuf->map); return (error); @@ -1270,10 +1401,11 @@ ixgbe_disable_promisc(struct adapter * adapter) static void ixgbe_set_multi(struct adapter *adapter) { - uint32_t fctrl; - uint8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; - struct ifmultiaddr *ifma; - int mcnt = 0; + u32 fctrl; + u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 *update_ptr; + struct ifmultiaddr *ifma; + int mcnt = 0; struct ifnet *ifp = adapter->ifp; IOCTL_DEBUGOUT("ixgbe_set_multi: begin"); @@ -1301,11 +1433,30 @@ ixgbe_set_multi(struct adapter *adapter) } IF_ADDR_UNLOCK(ifp); - ixgbe_update_mc_addr_list(&adapter->hw, mta, mcnt, 0); + update_ptr = mta; + ixgbe_update_mc_addr_list(&adapter->hw, + update_ptr, mcnt, ixgbe_mc_array_itr); return; } +/* + * This is an iterator function now needed by the multicast + * shared code. It simply feeds the shared code routine the + * addresses in the array of ixgbe_set_multi() one by one. + */ +static u8 * +ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) +{ + u8 *addr = *update_ptr; + u8 *newptr; + *vmdq = 0; + + newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; + *update_ptr = newptr; + return addr; +} + /********************************************************************* * Timer routine @@ -1340,18 +1491,19 @@ ixgbe_local_timer(void *arg) static void ixgbe_update_link_status(struct adapter *adapter) { - uint32_t link_speed; boolean_t link_up = FALSE; struct ifnet *ifp = adapter->ifp; + struct tx_ring *txr = adapter->tx_rings; device_t dev = adapter->dev; - ixgbe_check_link(&adapter->hw, &link_speed, &link_up); + ixgbe_check_link(&adapter->hw, &adapter->link_speed, &link_up, 0); if (link_up){ if (adapter->link_active == FALSE) { if (bootverbose) - device_printf(dev,"Link is up %d Mbps %s \n", - 10000, "Full Duplex"); + device_printf(dev,"Link is up %d Gbps %s \n", + ((adapter->link_speed == 128)? 10:1), + "Full Duplex"); adapter->link_active = TRUE; if_link_state_change(ifp, LINK_STATE_UP); } @@ -1361,6 +1513,9 @@ ixgbe_update_link_status(struct adapter *adapter) device_printf(dev,"Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); adapter->link_active = FALSE; + for (int i = 0; i < adapter->num_tx_queues; + i++, txr++) + txr->watchdog_timer = FALSE; } } @@ -1425,14 +1580,70 @@ ixgbe_identify_hardware(struct adapter *adapter) return; } -#if __FreeBSD_version >= 700000 /********************************************************************* * - * Setup MSIX: this is a prereq for doing Multiqueue/RSS. + * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ static int -ixgbe_setup_msix(struct adapter *adapter) +ixgbe_allocate_legacy(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct tx_ring *txr = adapter->tx_rings; + struct rx_ring *rxr = adapter->rx_rings; + int error; + + /* Legacy RID at 0 */ + if (adapter->msix == 0) + adapter->rid[0] = 0; + + /* We allocate a single interrupt resource */ + adapter->res[0] = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE); + if (adapter->res[0] == NULL) { + device_printf(dev, "Unable to allocate bus resource: " + "interrupt\n"); + return (ENXIO); + } + + /* + * Try allocating a fast interrupt and the associated deferred + * processing contexts. + */ + TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr); + TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr); + TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); + txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT, + taskqueue_thread_enqueue, &txr->tq); + rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT, + taskqueue_thread_enqueue, &rxr->tq); + taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq", + device_get_nameunit(adapter->dev)); + taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq", + device_get_nameunit(adapter->dev)); + if ((error = bus_setup_intr(dev, adapter->res[0], + INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, + adapter, &adapter->tag[0])) != 0) { + device_printf(dev, "Failed to register fast interrupt " + "handler: %d\n", error); + taskqueue_free(txr->tq); + taskqueue_free(rxr->tq); + txr->tq = NULL; + rxr->tq = NULL; + return (error); + } + + return (0); +} + + +/********************************************************************* + * + * Setup MSIX Interrupt resources and handlers + * + **********************************************************************/ +static int +ixgbe_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; @@ -1459,7 +1670,14 @@ ixgbe_setup_msix(struct adapter *adapter) device_printf(dev, "Failed to register TX handler"); return (error); } - adapter->msix++; + txr->msix = vector; + txr->eims = IXGBE_IVAR_TX_QUEUE(vector); + /* Make tasklet for deferred handling - one per queue */ + TASK_INIT(&txr->tx_task, 0, ixgbe_handle_tx, txr); + txr->tq = taskqueue_create_fast("ixgbe_txq", M_NOWAIT, + taskqueue_thread_enqueue, &txr->tq); + taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s rxq", + device_get_nameunit(adapter->dev)); } /* RX setup */ @@ -1482,7 +1700,13 @@ ixgbe_setup_msix(struct adapter *adapter) device_printf(dev, "Failed to register RX handler"); return (error); } - adapter->msix++; + rxr->msix = vector; + rxr->eims = IXGBE_IVAR_RX_QUEUE(vector); + TASK_INIT(&rxr->rx_task, 0, ixgbe_handle_rx, rxr); + rxr->tq = taskqueue_create_fast("ixgbe_rxq", M_NOWAIT, + taskqueue_thread_enqueue, &rxr->tq); + taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq", + device_get_nameunit(adapter->dev)); } /* Now for Link changes */ @@ -1502,177 +1726,132 @@ ixgbe_setup_msix(struct adapter *adapter) device_printf(dev, "Failed to register LINK handler"); return (error); } - adapter->msix++; + adapter->linkvec = vector; + TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); return (0); } -#endif + + +/* + * Setup Either MSI/X or MSI + */ +static int +ixgbe_setup_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + int rid, want, queues, msgs; + + /* First try MSI/X */ + rid = PCIR_BAR(IXGBE_MSIX_BAR); + adapter->msix_mem = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (!adapter->msix_mem) { + /* May not be enabled */ + device_printf(adapter->dev, + "Unable to map MSIX table \n"); + goto msi; + } + + msgs = pci_msix_count(dev); + if (msgs == 0) { /* system has msix disabled */ + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem); + adapter->msix_mem = NULL; + goto msi; + } + + /* Figure out a reasonable auto config value */ + queues = (mp_ncpus > ((msgs-1)/2)) ? (msgs-1)/2 : mp_ncpus; + + if (ixgbe_tx_queues == 0) + ixgbe_tx_queues = queues; + if (ixgbe_rx_queues == 0) + ixgbe_rx_queues = queues; + want = ixgbe_tx_queues + ixgbe_rx_queues + 1; + if (msgs >= want) + msgs = want; + else { + device_printf(adapter->dev, + "MSIX Configuration Problem, " + "%d vectors but %d queues wanted!\n", + msgs, want); + return (ENXIO); + } + if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) { + device_printf(adapter->dev, + "Using MSIX interrupts with %d vectors\n", msgs); + adapter->num_tx_queues = ixgbe_tx_queues; + adapter->num_rx_queues = ixgbe_rx_queues; + return (msgs); + } +msi: + msgs = pci_msi_count(dev); + if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0) + device_printf(adapter->dev,"Using MSI interrupt\n"); + return (msgs); +} static int ixgbe_allocate_pci_resources(struct adapter *adapter) { - int error, rid; + int rid; device_t dev = adapter->dev; rid = PCIR_BAR(0); - adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); - if (!(adapter->res_memory)) { + if (!(adapter->pci_mem)) { device_printf(dev,"Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = - rman_get_bustag(adapter->res_memory); + rman_get_bustag(adapter->pci_mem); adapter->osdep.mem_bus_space_handle = - rman_get_bushandle(adapter->res_memory); - adapter->hw.hw_addr = (uint8_t *) &adapter->osdep.mem_bus_space_handle; + rman_get_bushandle(adapter->pci_mem); + adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; /* - * First try to setup MSI/X interrupts, - * if that fails fall back to Legacy. + * Init the resource arrays */ - if (ixgbe_allocate_msix_resources(adapter)) { - int val; - - adapter->num_tx_queues = 1; - adapter->num_rx_queues = 1; - val = 0; - -#if __FreeBSD_version >= 700000 - /* Attempt to use MSI */ - val = pci_msi_count(dev); - if ((val) && pci_alloc_msi(dev, &val) == 0) { - adapter->rid[0] = 1; - device_printf(dev, "MSI Interrupts enabled\n"); - } else -#endif - { - adapter->rid[0] = 0; - device_printf(dev, "Legacy Interrupts enabled\n"); - } - adapter->res[0] = bus_alloc_resource_any(dev, - SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE); - if (adapter->res[0] == NULL) { - device_printf(dev, "Unable to allocate bus " - "resource: interrupt\n"); - return (ENXIO); - } - /* Set the handler contexts */ - TASK_INIT(&adapter->rxtx_task, 0, ixgbe_rxtx, adapter); - TASK_INIT(&adapter->link_task, 0, ixgbe_link, adapter); - adapter->tq = taskqueue_create_fast("ix_taskq", M_NOWAIT, - taskqueue_thread_enqueue, &adapter->tq); - taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq", - device_get_nameunit(adapter->dev)); -#if __FreeBSD_version < 700000 - error = bus_setup_intr(dev, adapter->res[0], - INTR_TYPE_NET | INTR_FAST, ixgbe_fast_irq, -#else - error = bus_setup_intr(dev, adapter->res[0], - INTR_TYPE_NET, ixgbe_fast_irq, NULL, -#endif - adapter, &adapter->tag[0]); - if (error) { - adapter->res[0] = NULL; - device_printf(dev, "Failed to register" - " Fast Legacy handler"); - return (error); - } + for (int i = 0; i < IXGBE_MSGS; i++) { + adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */ + adapter->tag[i] = NULL; + adapter->res[i] = NULL; } + /* Legacy defaults */ + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + /* Now setup MSI or MSI/X */ + adapter->msix = ixgbe_setup_msix(adapter); + adapter->hw.back = &adapter->osdep; return (0); } -#if __FreeBSD_version >= 700000 -/* - * Attempt to configure MSI/X, the prefered - * interrupt option. - */ -static int -ixgbe_allocate_msix_resources(struct adapter *adapter) -{ - int error, val, want, rid; - device_t dev = adapter->dev; - int vector = 1; - - - /* First map the MSIX table */ - rid = PCIR_BAR(3); - adapter->res_msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY, - &rid, RF_ACTIVE); - if (!adapter->res_msix) { - device_printf(dev,"Unable to map MSIX table \n"); - return (ENXIO); - } - - /* Now figure out now many vectors we need to use */ - val = pci_msix_count(dev); - - /* check configured values */ - want = ixgbe_tx_queues + ixgbe_rx_queues + ixgbe_other_queues; - /* - * We arent going to do anything fancy for now, - * we either can meet desired config or we fail. - */ - if (val >= want) - val = want; - else - return (ENXIO); - - /* Initialize the resource arrays */ - for (int i = 0; i < IXGBE_MSGS; i++, vector++) { - adapter->rid[i] = vector; - adapter->tag[i] = NULL; - adapter->res[i] = NULL; - } - - adapter->num_tx_queues = ixgbe_tx_queues; - adapter->num_rx_queues = ixgbe_rx_queues; - - /* Now allocate the vectors */ - if ((error = pci_alloc_msix(dev, &val)) == 0) { - adapter->msix = 1; - device_printf(dev, - "MSI/X enabled with %d vectors\n", val); - } else { - device_printf(dev, - "FAIL pci_alloc_msix() %d\n", error); - return (error); - } - return (0); -} -#else /* FreeBSD 6.2 */ -static int -ixgbe_allocate_msix_resources(struct adapter *adapter) -{ - return (1); /* Force Legacy behavior for 6.2 */ -} -#endif - static void ixgbe_free_pci_resources(struct adapter * adapter) { device_t dev = adapter->dev; - int i, loop; /* * Legacy has this set to 0, but we need * to run this once, so reset it. */ - if (adapter->msix) - loop = adapter->msix; - else - loop = 1; + if (adapter->msix == 0) + adapter->msix = 1; + /* * First release all the interrupt resources: * notice that since these are just kept * in an array we can do the same logic * whether its MSIX or just legacy. */ - for (i = 0; i < loop; i++) { + for (int i = 0; i < adapter->msix; i++) { if (adapter->tag[i] != NULL) { bus_teardown_intr(dev, adapter->res[i], adapter->tag[i]); @@ -1684,12 +1863,16 @@ ixgbe_free_pci_resources(struct adapter * adapter) } } -#if __FreeBSD_version >= 700000 - pci_release_msi(dev); -#endif - if (adapter->res_memory != NULL) + if (adapter->msix) + pci_release_msi(dev); + + if (adapter->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, - IXGBE_MMBA, adapter->res_memory); + PCIR_BAR(IXGBE_MSIX_BAR), adapter->msix_mem); + + if (adapter->pci_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(0), adapter->pci_mem); return; } @@ -1706,7 +1889,7 @@ static int ixgbe_hardware_init(struct adapter *adapter) { device_t dev = adapter->dev; - uint16_t csum; + u16 csum; csum = 0; /* Issue a global reset */ @@ -1720,7 +1903,7 @@ ixgbe_hardware_init(struct adapter *adapter) } /* Get Hardware Flow Control setting */ - adapter->hw.fc.original_type = ixgbe_fc_full; + adapter->hw.fc.type = ixgbe_fc_full; adapter->hw.fc.pause_time = IXGBE_FC_PAUSE; adapter->hw.fc.low_water = IXGBE_FC_LO; adapter->hw.fc.high_water = IXGBE_FC_HI; @@ -1743,6 +1926,7 @@ static void ixgbe_setup_interface(device_t dev, struct adapter *adapter) { struct ifnet *ifp; + struct ixgbe_hw *hw = &adapter->hw; INIT_DEBUGOUT("ixgbe_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); @@ -1770,23 +1954,35 @@ ixgbe_setup_interface(device_t dev, struct adapter *adapter) */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); - if (adapter->msix) /* RSS and HWCSUM not compatible */ - ifp->if_capabilities |= IFCAP_TSO4; - else - ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4); + ifp->if_capabilities |= (IFCAP_HWCSUM | IFCAP_TSO4); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; + if ((hw->device_id == IXGBE_DEV_ID_82598AT) || + (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT)) + ixgbe_setup_link_speed(hw, (IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL), TRUE, TRUE); + else + ixgbe_setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, + TRUE, FALSE); + /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, ixgbe_media_status); - ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_SR | - IFM_FDX, 0, NULL); + ifmedia_add(&adapter->media, IFM_ETHER | ixgbe_optics | + IFM_FDX, 0, NULL); + if ((hw->device_id == IXGBE_DEV_ID_82598AT) || + (hw->device_id == IXGBE_DEV_ID_82598AT_DUAL_PORT)) { + ifmedia_add(&adapter->media, + IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); + ifmedia_add(&adapter->media, + IFM_ETHER | IFM_1000_T, 0, NULL); + } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); @@ -1882,6 +2078,7 @@ ixgbe_allocate_queues(struct adapter *adapter) struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize, error = IXGBE_SUCCESS; + char name_string[16]; int txconf = 0, rxconf = 0; /* First allocate the TX ring struct memory */ @@ -1904,26 +2101,25 @@ ixgbe_allocate_queues(struct adapter *adapter) } rxr = adapter->rx_rings; + /* For the ring itself */ tsize = roundup2(adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc), 4096); + /* * Now set up the TX queues, txconf is needed to handle the * possibility that things fail midcourse and we need to * undo memory gracefully */ for (int i = 0; i < adapter->num_tx_queues; i++, txconf++) { - char name_string[16]; /* Set up some basics */ txr = &adapter->tx_rings[i]; txr->adapter = adapter; txr->me = i; - /* - * Initialize the TX side lock - * -this has to change for multi tx - */ - snprintf(name_string, sizeof(name_string), "%s:tx", - device_get_nameunit(dev)); - mtx_init(&adapter->tx_mtx, name_string, NULL, MTX_DEF); + + /* Initialize the TX side lock */ + snprintf(name_string, sizeof(name_string), "%s:tx(%d)", + device_get_nameunit(dev), txr->me); + mtx_init(&txr->tx_mtx, name_string, NULL, MTX_DEF); if (ixgbe_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { @@ -1956,6 +2152,11 @@ ixgbe_allocate_queues(struct adapter *adapter) rxr->adapter = adapter; rxr->me = i; + /* Initialize the TX side lock */ + snprintf(name_string, sizeof(name_string), "%s:rx(%d)", + device_get_nameunit(dev), rxr->me); + mtx_init(&rxr->rx_mtx, name_string, NULL, MTX_DEF); + if (ixgbe_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, @@ -2078,8 +2279,6 @@ ixgbe_setup_transmit_ring(struct tx_ring *txr) m_freem(txbuf->m_head); txbuf->m_head = NULL; } - /* clear the watch index */ - txbuf->next_eop = -1; } /* Set number of descriptors available */ @@ -2109,32 +2308,47 @@ ixgbe_setup_transmit_structures(struct adapter *adapter) /********************************************************************* * * Enable transmit unit. - * NOTE: this will need to be changed if there are more than - * one transmit queues. + * **********************************************************************/ static void ixgbe_initialize_transmit_units(struct adapter *adapter) { - struct tx_ring *txr = adapter->tx_rings; - uint64_t tdba = txr->txdma.dma_paddr; + struct tx_ring *txr = adapter->tx_rings; + struct ixgbe_hw *hw = &adapter->hw; /* Setup the Base and Length of the Tx Descriptor Ring */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0), + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) { + u64 tdba = txr->txdma.dma_paddr; + u32 txctrl; + vm_paddr_t txhwb = 0; + + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & 0x00000000ffffffffULL)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0), (tdba >> 32)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0), - adapter->num_tx_desc * - sizeof(struct ixgbe_legacy_tx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), + adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); - /* Setup the HW Tx Head and Tail descriptor pointers */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0); + /* Setup for Head WriteBack */ + txhwb = vtophys(&txr->tx_hwb); + txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i), + (txhwb & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i), + (txhwb >> 32)); + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT); + /* Setup the HW Tx Head and Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); - /* Setup Transmit Descriptor Cmd Settings */ - txr->txd_cmd = IXGBE_TXD_CMD_IFCS; + /* Setup Transmit Descriptor Cmd Settings */ + txr->txd_cmd = IXGBE_TXD_CMD_IFCS; + + txr->watchdog_timer = 0; + } return; } @@ -2148,13 +2362,14 @@ static void ixgbe_free_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; - mtx_lock(&adapter->tx_mtx); + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) { + IXGBE_TX_LOCK(txr); ixgbe_free_transmit_buffers(txr); ixgbe_dma_free(adapter, &txr->txdma); + IXGBE_TX_UNLOCK(txr); + IXGBE_TX_LOCK_DESTROY(txr); } - mtx_unlock(&adapter->tx_mtx); - mtx_destroy(&adapter->tx_mtx); free(adapter->tx_rings, M_DEVBUF); } @@ -2216,18 +2431,19 @@ ixgbe_free_transmit_buffers(struct tx_ring *txr) **********************************************************************/ static boolean_t -ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp) +ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) { struct adapter *adapter = txr->adapter; struct ixgbe_adv_tx_context_desc *TXD; struct ixgbe_tx_buf *tx_buffer; - uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; - int ehdrlen, ip_hlen; + int ehdrlen, ip_hlen = 0; u16 etype; - u8 ipproto; + u8 ipproto = 0; + bool offload = TRUE; int ctxd = txr->next_avail_tx_desc; #if __FreeBSD_version < 700000 struct m_tag *mtag; @@ -2236,6 +2452,9 @@ ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp) #endif + if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) + offload = FALSE; + tx_buffer = &txr->tx_buffers[ctxd]; TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; @@ -2245,14 +2464,17 @@ ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp) */ #if __FreeBSD_version < 700000 mtag = VLAN_OUTPUT_TAG(ifp, mp); - if (mtag != NULL) + if (mtag != NULL) { vlan_macip_lens |= htole16(VLAN_TAG_VALUE(mtag)) << IXGBE_ADVTXD_VLAN_SHIFT; + } else if (offload == FALSE) + return FALSE; /* No need for CTX */ #else if (mp->m_flags & M_VLANTAG) { vtag = htole16(mp->m_pkthdr.ether_vtag); vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); - } + } else if (offload == FALSE) + return FALSE; #endif /* * Determine where frame payload starts. @@ -2289,7 +2511,8 @@ ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; break; default: - return FALSE; + offload = FALSE; + break; } vlan_macip_lens |= ip_hlen; @@ -2304,6 +2527,9 @@ ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp) if (mp->m_pkthdr.csum_flags & CSUM_UDP) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; break; + default: + offload = FALSE; + break; } /* Now copy bits into descriptor */ @@ -2318,7 +2544,6 @@ ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp) #endif tx_buffer->m_head = NULL; - tx_buffer->next_eop = -1; /* We've consumed the first desc, adjust counters */ if (++ctxd == adapter->num_tx_desc) @@ -2326,7 +2551,7 @@ ixgbe_tx_csum_setup(struct tx_ring *txr, struct mbuf *mp) txr->next_avail_tx_desc = ctxd; --txr->tx_avail; - return TRUE; + return (offload); } #if __FreeBSD_version >= 700000 @@ -2410,7 +2635,6 @@ ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen) TXD->seqnum_seed = htole32(0); tx_buffer->m_head = NULL; - tx_buffer->next_eop = -1; #ifndef NO_82598_A0_SUPPORT if (adapter->hw.revision_id == 0) @@ -2448,9 +2672,9 @@ ixgbe_txeof(struct tx_ring *txr) struct ifnet *ifp = adapter->ifp; int first, last, done, num_avail; struct ixgbe_tx_buf *tx_buffer; - struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc; + struct ixgbe_legacy_tx_desc *tx_desc; - mtx_assert(&adapter->tx_mtx, MA_OWNED); + mtx_assert(&txr->mtx, MA_OWNED); if (txr->tx_avail == adapter->num_tx_desc) return FALSE; @@ -2461,27 +2685,16 @@ ixgbe_txeof(struct tx_ring *txr) tx_buffer = &txr->tx_buffers[first]; /* For cleanup we just use legacy struct */ tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; - last = tx_buffer->next_eop; - if (last == -1) - return FALSE; - eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; - - /* - * What this does is get the index of the - * first descriptor AFTER the EOP of the - * first packet, that way we can do the - * simple comparison on the inner while loop - * below. - */ - if (++last == adapter->num_tx_desc) last = 0; - done = last; + /* Get the HWB */ + rmb(); + done = txr->tx_hwb; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); - while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) { - /* We clean the range of the packet */ + while (TRUE) { + /* We clean the range til last head write back */ while (first != done) { tx_desc->upper.data = 0; tx_desc->lower.data = 0; @@ -2499,7 +2712,6 @@ ixgbe_txeof(struct tx_ring *txr) tx_buffer->m_head = NULL; tx_buffer->map = NULL; } - tx_buffer->next_eop = -1; if (++first == adapter->num_tx_desc) first = 0; @@ -2508,17 +2720,12 @@ ixgbe_txeof(struct tx_ring *txr) tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; } - /* See if we can continue to the next packet */ - last = tx_buffer->next_eop; - if (last != -1) { - eop_desc = - (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; - /* Get new done point */ - if (++last == adapter->num_tx_desc) last = 0; - done = last; - } else + /* See if there is more work now */ + last = done; + rmb(); + done = txr->tx_hwb; + if (last == done) break; - } bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); @@ -2534,11 +2741,14 @@ ixgbe_txeof(struct tx_ring *txr) if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* If all are clean turn off the timer */ - if (num_avail == adapter->num_tx_desc) - adapter->watchdog_timer = 0; + if (num_avail == adapter->num_tx_desc) { + txr->watchdog_timer = 0; + txr->tx_avail = num_avail; + return FALSE; + } /* Some were cleaned, so reset timer */ - else if (num_avail == txr->tx_avail) - adapter->watchdog_timer = IXGBE_TX_TIMEOUT; + else if (num_avail != txr->tx_avail) + txr->watchdog_timer = IXGBE_TX_TIMEOUT; } txr->tx_avail = num_avail; @@ -2724,10 +2934,13 @@ static int ixgbe_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter; - struct ixgbe_rx_buf *rxbuf; - int j, rsize, s; + device_t dev; + struct ixgbe_rx_buf *rxbuf; + struct lro_ctrl *lro = &rxr->lro; + int j, rsize, s = 0; adapter = rxr->adapter; + dev = adapter->dev; rsize = roundup2(adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc), 4096); /* Clear the ring contents */ @@ -2767,6 +2980,18 @@ ixgbe_setup_receive_ring(struct rx_ring *rxr) bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + /* Now set up the LRO interface */ + if (ixgbe_enable_lro) { + int err = tcp_lro_init(lro); + if (err) { + device_printf(dev,"LRO Initialization failed!\n"); + goto fail; + } + device_printf(dev,"RX LRO Initialized\n"); + lro->ifp = adapter->ifp; + } + + return (0); fail: /* @@ -2897,6 +3122,8 @@ ixgbe_initialize_receive_units(struct adapter *adapter) adapter->num_rx_desc - 1); } + rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM); + if (adapter->num_rx_queues > 1) { /* set up random bits */ arc4rand(&random, sizeof(random), 0); @@ -2941,16 +3168,17 @@ ixgbe_initialize_receive_units(struct adapter *adapter) IXGBE_WRITE_REG(&adapter->hw, IXGBE_MRQC, mrqc); /* RSS and RX IPP Checksum are mutually exclusive */ - rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum); - } else { - rxcsum = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCSUM); - if (ifp->if_capenable & IFCAP_RXCSUM) - rxcsum |= IXGBE_RXCSUM_IPPCSE; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum); } + if (ifp->if_capenable & IFCAP_RXCSUM) + rxcsum |= IXGBE_RXCSUM_PCSD; + + if (!(rxcsum & IXGBE_RXCSUM_PCSD)) + rxcsum |= IXGBE_RXCSUM_IPPCSE; + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCSUM, rxcsum); + /* Enable Receive engine */ rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS); IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rxctrl); @@ -2969,7 +3197,10 @@ ixgbe_free_receive_structures(struct adapter *adapter) struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) { + struct lro_ctrl *lro = &rxr->lro; ixgbe_free_receive_buffers(rxr); + /* Free LRO memory */ + tcp_lro_free(lro); /* Free the ring memory as well */ ixgbe_dma_free(adapter, &rxr->rxdma); } @@ -3028,24 +3259,29 @@ ixgbe_free_receive_buffers(struct rx_ring *rxr) * count < 0. * *********************************************************************/ -static int +static bool ixgbe_rxeof(struct rx_ring *rxr, int count) { struct adapter *adapter = rxr->adapter; struct ifnet *ifp = adapter->ifp; + struct lro_ctrl *lro = &rxr->lro; + struct lro_entry *queued; struct mbuf *mp; int len, i, eop = 0; - uint8_t accept_frame = 0; - uint32_t staterr; + u8 accept_frame = 0; + u32 staterr; union ixgbe_adv_rx_desc *cur; + IXGBE_RX_LOCK(rxr); i = rxr->next_to_check; cur = &rxr->rx_base[i]; staterr = cur->wb.upper.status_error; - if (!(staterr & IXGBE_RXD_STAT_DD)) - return (0); + if (!(staterr & IXGBE_RXD_STAT_DD)) { + IXGBE_RX_UNLOCK(rxr); + return FALSE; + } while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { @@ -3146,8 +3382,13 @@ ixgbe_rxeof(struct rx_ring *rxr, int count) /* Now send up to the stack */ if (m != NULL) { rxr->next_to_check = i; - (*ifp->if_input)(ifp, m); - i = rxr->next_to_check; + /* Use LRO if possible */ + if ((!lro->lro_cnt) || (tcp_lro_rx(lro, m, 0))) { + IXGBE_RX_UNLOCK(rxr); + (*ifp->if_input)(ifp, m); + IXGBE_RX_LOCK(rxr); + i = rxr->next_to_check; + } } /* Get next descriptor */ cur = &rxr->rx_base[i]; @@ -3157,11 +3398,23 @@ ixgbe_rxeof(struct rx_ring *rxr, int count) /* Advance the IXGB's Receive Queue "Tail Pointer" */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned); + IXGBE_RX_UNLOCK(rxr); + + /* + ** Flush any outstanding LRO work + ** this may call into the stack and + ** must not hold a driver lock. + */ + while(!SLIST_EMPTY(&lro->lro_active)) { + queued = SLIST_FIRST(&lro->lro_active); + SLIST_REMOVE_HEAD(&lro->lro_active, next); + tcp_lro_flush(lro, queued); + } if (!(staterr & IXGBE_RXD_STAT_DD)) - return (0); + return FALSE; - return (1); + return TRUE; } /********************************************************************* @@ -3173,13 +3426,14 @@ ixgbe_rxeof(struct rx_ring *rxr, int count) *********************************************************************/ static void ixgbe_rx_checksum(struct adapter *adapter, - uint32_t staterr, struct mbuf * mp) + u32 staterr, struct mbuf * mp) { - uint16_t status = (uint16_t) staterr; - uint8_t errors = (uint8_t) (staterr >> 24); + struct ifnet *ifp = adapter->ifp; + u16 status = (u16) staterr; + u8 errors = (u8) (staterr >> 24); - /* Not offloaded */ - if (status & IXGBE_RXD_STAT_IXSM) { + /* Not offloading */ + if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) { mp->m_pkthdr.csum_flags = 0; return; } @@ -3207,9 +3461,9 @@ ixgbe_rx_checksum(struct adapter *adapter, static void -ixgbe_enable_vlans(struct adapter *adapter) +ixgbe_enable_hw_vlans(struct adapter *adapter) { - uint32_t ctrl; + u32 ctrl; ixgbe_disable_intr(adapter); ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); @@ -3225,19 +3479,23 @@ ixgbe_enable_vlans(struct adapter *adapter) static void ixgbe_enable_intr(struct adapter *adapter) { - u32 mask; + struct ixgbe_hw *hw = &adapter->hw; + u32 mask = IXGBE_EIMS_ENABLE_MASK; - /* With RSS set up what to auto clear */ - if (adapter->msix) { - mask = IXGBE_EIMS_ENABLE_MASK; + /* Enable Fan Failure detection */ + if (hw->phy.media_type == ixgbe_media_type_copper) + mask |= IXGBE_EIMS_GPI_SDP1; + /* With RSS we use auto clear */ + if (adapter->msix_mem) { + /* Dont autoclear Link */ mask &= ~IXGBE_EIMS_OTHER; mask &= ~IXGBE_EIMS_LSC; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, + adapter->eims_mask | mask); } - mask = IXGBE_EIMS_ENABLE_MASK; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_ENABLE_MASK); - IXGBE_WRITE_FLUSH(&adapter->hw); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + IXGBE_WRITE_FLUSH(hw); return; } @@ -3245,7 +3503,10 @@ ixgbe_enable_intr(struct adapter *adapter) static void ixgbe_disable_intr(struct adapter *adapter) { + if (adapter->msix_mem) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + IXGBE_WRITE_FLUSH(&adapter->hw); return; } @@ -3264,9 +3525,11 @@ static void ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector) { u32 ivar, index; + vector |= IXGBE_IVAR_ALLOC_VAL; index = (entry >> 2) & 0x1F; ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (entry & 0x3))); ivar |= (vector << (8 * (entry & 0x3))); IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); } @@ -3274,16 +3537,23 @@ ixgbe_set_ivar(struct adapter *adapter, u16 entry, u8 vector) static void ixgbe_configure_ivars(struct adapter *adapter) { - int i, vec; + struct tx_ring *txr = adapter->tx_rings; + struct rx_ring *rxr = adapter->rx_rings; - for (i = 0, vec = 1; i < adapter->num_rx_queues; i++, vec++) - ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), vec); + for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) { + ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), rxr->msix); + adapter->eims_mask |= rxr->eims; + } - for (i = 0, vec = 8; i < adapter->num_tx_queues; i++, vec++) - ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), vec); + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) { + ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), txr->msix); + adapter->eims_mask |= txr->eims; + } /* For the Link interrupt */ - ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0); + ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, + adapter->linkvec); + adapter->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX; } /********************************************************************** @@ -3294,33 +3564,37 @@ ixgbe_configure_ivars(struct adapter *adapter) static void ixgbe_update_stats_counters(struct adapter *adapter) { - struct ifnet *ifp; + struct ifnet *ifp = adapter->ifp;; struct ixgbe_hw *hw = &adapter->hw; - u64 good_rx, missed_rx; + u32 missed_rx = 0, bprc, lxon, lxoff, total; adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); - good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC); - missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0)); - missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1)); - missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2)); - missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3)); - missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4)); - missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5)); - missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6)); - missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7)); + for (int i = 0; i < 8; i++) { + int mp; + mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + missed_rx += mp; + adapter->stats.mpc[i] += mp; + adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + } - adapter->stats.gprc += (good_rx - missed_rx); + /* Hardware workaround, gprc counts missed packets */ + adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + adapter->stats.gprc -= missed_rx; - adapter->stats.mpc[0] += missed_rx; adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); - adapter->stats.bprc += IXGBE_READ_REG(hw, IXGBE_BPRC); - adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); + /* * Workaround: mprc hardware is incorrectly counting * broadcasts, so for now we subtract those. */ - adapter->stats.mprc -= adapter->stats.bprc; + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + adapter->stats.bprc += bprc; + adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + adapter->stats.mprc -= bprc; + adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); @@ -3328,32 +3602,36 @@ ixgbe_update_stats_counters(struct adapter *adapter) adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); - adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); - adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC); adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); - adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + adapter->stats.lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + adapter->stats.lxofftxc += lxoff; + total = lxon + lxoff; + adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); - adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); - adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0)); + adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + adapter->stats.gptc -= total; + adapter->stats.mptc -= total; + adapter->stats.ptc64 -= total; + adapter->stats.gotc -= total * ETHER_MIN_LEN; + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); - adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); - adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); - adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); - adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); - ifp = adapter->ifp; /* Fill out the OS statistics structure */ ifp->if_ipackets = adapter->stats.gprc; @@ -3364,11 +3642,8 @@ ixgbe_update_stats_counters(struct adapter *adapter) ifp->if_collisions = 0; /* Rx Errors */ - ifp->if_ierrors = - adapter->stats.mpc[0] + - adapter->stats.crcerrs + + ifp->if_ierrors = missed_rx + adapter->stats.crcerrs + adapter->stats.rlec; - } @@ -3385,10 +3660,6 @@ ixgbe_print_hw_stats(struct adapter * adapter) device_t dev = adapter->dev; - device_printf(dev,"Tx Descriptors not Avail1 = %lu\n", - adapter->no_tx_desc_avail1); - device_printf(dev,"Tx Descriptors not Avail2 = %lu\n", - adapter->no_tx_desc_avail2); device_printf(dev,"Std Mbuf Failed = %lu\n", adapter->mbuf_alloc_failed); device_printf(dev,"Std Cluster Failed = %lu\n", @@ -3403,6 +3674,8 @@ ixgbe_print_hw_stats(struct adapter * adapter) (long long)adapter->stats.crcerrs); device_printf(dev,"Driver dropped packets = %lu\n", adapter->dropped_pkts); + device_printf(dev, "watchdog timeouts = %ld\n", + adapter->watchdog_events); device_printf(dev,"XON Rcvd = %llu\n", (long long)adapter->stats.lxonrxc); @@ -3437,49 +3710,43 @@ ixgbe_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; struct rx_ring *rxr = adapter->rx_rings; + struct tx_ring *txr = adapter->tx_rings; struct ixgbe_hw *hw = &adapter->hw; - uint8_t *hw_addr = adapter->hw.hw_addr; - device_printf(dev,"Adapter hardware address = %p \n", hw_addr); - device_printf(dev,"CTRL = 0x%x RXCTRL = 0x%x \n", - IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)), - IXGBE_READ_REG(hw, IXGBE_RXCTRL)); - device_printf(dev,"RXDCTL(0) = 0x%x RXDCTL(1) = 0x%x" - " RXCTRL(2) = 0x%x \n", - IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)), - IXGBE_READ_REG(hw, IXGBE_RXDCTL(1)), - IXGBE_READ_REG(hw, IXGBE_RXDCTL(2))); - device_printf(dev,"SRRCTL(0) = 0x%x SRRCTL(1) = 0x%x" - " SRRCTL(2) = 0x%x \n", - IXGBE_READ_REG(hw, IXGBE_SRRCTL(0)), - IXGBE_READ_REG(hw, IXGBE_SRRCTL(1)), - IXGBE_READ_REG(hw, IXGBE_SRRCTL(2))); - device_printf(dev,"EIMC = 0x%x EIMS = 0x%x\n", - IXGBE_READ_REG(hw, IXGBE_EIMC), - IXGBE_READ_REG(hw, IXGBE_EIMS)); - device_printf(dev,"Queue(0) tdh = %d, hw tdt = %d\n", - IXGBE_READ_REG(hw, IXGBE_TDH(0)), - IXGBE_READ_REG(hw, IXGBE_TDT(0))); device_printf(dev,"Error Byte Count = %u \n", IXGBE_READ_REG(hw, IXGBE_ERRBC)); for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) { - device_printf(dev,"Queue %d Packets Received: %lu\n", - rxr->me, (long)rxr->packet_count); - } - - rxr = adapter->rx_rings; // Reset - for (int i = 0; i < adapter->num_rx_queues; i++, rxr++) { - device_printf(dev,"Queue %d Bytes Received: %lu\n", - rxr->me, (long)rxr->byte_count); - } - - for (int i = 0; i < adapter->num_rx_queues; i++) { + struct lro_ctrl *lro = &rxr->lro; device_printf(dev,"Queue[%d]: rdh = %d, hw rdt = %d\n", i, IXGBE_READ_REG(hw, IXGBE_RDH(i)), IXGBE_READ_REG(hw, IXGBE_RDT(i))); + device_printf(dev,"RX(%d) Packets Received: %lu\n", + rxr->me, (long)rxr->packet_count); + device_printf(dev,"RX(%d) Bytes Received: %lu\n", + rxr->me, (long)rxr->byte_count); + device_printf(dev,"RX(%d) IRQ Handled: %lu\n", + rxr->me, (long)rxr->rx_irq); + device_printf(dev,"RX(%d) LRO Queued= %d\n", + rxr->me, lro->lro_queued); + device_printf(dev,"RX(%d) LRO Flushed= %d\n", + rxr->me, lro->lro_flushed); } + for (int i = 0; i < adapter->num_tx_queues; i++, txr++) { + device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", i, + IXGBE_READ_REG(hw, IXGBE_TDH(i)), + IXGBE_READ_REG(hw, IXGBE_TDT(i))); + device_printf(dev,"TX(%d) Packets Sent: %lu\n", + txr->me, (long)txr->tx_irq); + device_printf(dev,"TX(%d) IRQ Handled: %lu\n", + txr->me, (long)txr->tx_irq); + device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", + txr->me, (long)txr->no_tx_desc_avail); + } + + device_printf(dev,"Link IRQ Handled: %lu\n", + (long)adapter->link_irq); return; } @@ -3546,11 +3813,11 @@ ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS) case ixgbe_fc_rx_pause: case ixgbe_fc_tx_pause: case ixgbe_fc_full: - adapter->hw.fc.original_type = ixgbe_flow_control; + adapter->hw.fc.type = ixgbe_flow_control; break; case ixgbe_fc_none: default: - adapter->hw.fc.original_type = ixgbe_fc_none; + adapter->hw.fc.type = ixgbe_fc_none; } ixgbe_setup_fc(&adapter->hw, 0); diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h index c496244db726..4b80590136da 100644 --- a/sys/dev/ixgbe/ixgbe.h +++ b/sys/dev/ixgbe/ixgbe.h @@ -1,36 +1,36 @@ -/************************************************************************** +/****************************************************************************** -Copyright (c) 2001-2007, Intel Corporation -All rights reserved. + Copyright (c) 2001-2008, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -***************************************************************************/ -/* $FreeBSD$ */ +******************************************************************************/ +/*$FreeBSD$*/ #ifndef _IXGBE_H_ #define _IXGBE_H_ @@ -80,8 +80,10 @@ POSSIBILITY OF SUCH DAMAGE. #include #include #include +#include #include "ixgbe_api.h" +#include "tcp_lro.h" /* Tunables */ @@ -157,7 +159,7 @@ POSSIBILITY OF SUCH DAMAGE. #define MAX_NUM_MULTICAST_ADDRESSES 128 #define IXGBE_MAX_SCATTER 100 -#define IXGBE_MMBA 0x0010 +#define IXGBE_MSIX_BAR 3 #define IXGBE_TSO_SIZE 65535 #define IXGBE_TX_BUFFER_SIZE ((u32) 1514) #define IXGBE_RX_HDR_SIZE ((u32) 256) @@ -184,6 +186,9 @@ POSSIBILITY OF SUCH DAMAGE. #define DEFAULT_ITR 1000000000/(MAX_IRQ_SEC * 256) #define LINK_ITR 1000000000/(1950 * 256) +/* Used for auto RX queue configuration */ +extern int mp_ncpus; + /* * ****************************************************************************** * vendor_info_array @@ -203,7 +208,6 @@ typedef struct _ixgbe_vendor_info_t { struct ixgbe_tx_buf { - int next_eop; struct mbuf *m_head; bus_dmamap_t map; }; @@ -233,15 +237,27 @@ struct ixgbe_dma_alloc { */ struct tx_ring { struct adapter *adapter; + struct mtx tx_mtx; u32 me; + u32 msix; + u32 eims; + u32 watchdog_timer; union ixgbe_adv_tx_desc *tx_base; + volatile u32 tx_hwb; struct ixgbe_dma_alloc txdma; - uint32_t next_avail_tx_desc; - uint32_t next_tx_to_clean; + struct task tx_task; + struct taskqueue *tq; + u32 next_avail_tx_desc; + u32 next_tx_to_clean; struct ixgbe_tx_buf *tx_buffers; - volatile uint16_t tx_avail; - uint32_t txd_cmd; + volatile u16 tx_avail; + u32 txd_cmd; bus_dma_tag_t txtag; + /* Soft Stats */ + u32 no_tx_desc_avail; + u32 no_tx_desc_late; + u64 tx_irq; + u64 tx_packets; }; @@ -249,21 +265,28 @@ struct tx_ring { * The Receive ring, one per rx queue */ struct rx_ring { - struct adapter *adapter; - u32 me; - u32 payload; - union ixgbe_adv_rx_desc *rx_base; - struct ixgbe_dma_alloc rxdma; - unsigned int last_cleaned; - unsigned int next_to_check; - struct ixgbe_rx_buf *rx_buffers; - bus_dma_tag_t rxtag[2]; - bus_dmamap_t spare_map[2]; - struct mbuf *fmp; - struct mbuf *lmp; + struct adapter *adapter; + struct mtx rx_mtx; + u32 me; + u32 msix; + u32 eims; + u32 payload; + struct task rx_task; + struct taskqueue *tq; + union ixgbe_adv_rx_desc *rx_base; + struct ixgbe_dma_alloc rxdma; + struct lro_ctrl lro; + unsigned int last_cleaned; + unsigned int next_to_check; + struct ixgbe_rx_buf *rx_buffers; + bus_dma_tag_t rxtag[2]; + bus_dmamap_t spare_map[2]; + struct mbuf *fmp; + struct mbuf *lmp; /* Soft stats */ - u64 packet_count; - u64 byte_count; + u64 rx_irq; + u64 packet_count; + u64 byte_count; }; /* Our adapter structure */ @@ -273,10 +296,10 @@ struct adapter { /* FreeBSD operating-system-specific structures */ struct ixgbe_osdep osdep; - struct device *dev; - struct resource *res_memory; - struct resource *res_msix; + + struct resource *pci_mem; + struct resource *msix_mem; /* * Interrupt resources: @@ -286,31 +309,30 @@ struct adapter { void *tag[IXGBE_MSGS]; struct resource *res[IXGBE_MSGS]; int rid[IXGBE_MSGS]; + u32 eims_mask; struct ifmedia media; struct callout timer; - int watchdog_timer; int msix; int if_flags; + struct mtx core_mtx; - struct mtx tx_mtx; + /* Legacy Fast Intr handling */ struct task link_task; - struct task rxtx_task; - struct taskqueue *tq; /* Info about the board itself */ - uint32_t part_num; - boolean_t link_active; - uint16_t max_frame_size; - uint16_t link_duplex; - uint32_t tx_int_delay; - uint32_t tx_abs_int_delay; - uint32_t rx_int_delay; - uint32_t rx_abs_int_delay; + u32 part_num; + bool link_active; + u16 max_frame_size; + u32 link_speed; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 rx_int_delay; + u32 rx_abs_int_delay; /* Indicates the cluster size to use */ - boolean_t bigbufs; + bool bigbufs; /* * Transmit rings: @@ -327,20 +349,35 @@ struct adapter { struct rx_ring *rx_rings; int num_rx_desc; int num_rx_queues; - uint32_t rx_process_limit; + u32 rx_process_limit; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long mbuf_alloc_failed; unsigned long mbuf_cluster_failed; - unsigned long no_tx_desc_avail1; - unsigned long no_tx_desc_avail2; unsigned long no_tx_map_avail; unsigned long no_tx_dma_setup; unsigned long watchdog_events; unsigned long tso_tx; + unsigned long linkvec; + unsigned long link_irq; struct ixgbe_hw_stats stats; }; +#define IXGBE_CORE_LOCK_INIT(_sc, _name) \ + mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF) +#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) +#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) +#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) +#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) +#define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) +#define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) +#define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) +#define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) +#define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) +#define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) +#define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) + + #endif /* _IXGBE_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c index 35d7e5e36a95..b7f8224c11c2 100644 --- a/sys/dev/ixgbe/ixgbe_82598.c +++ b/sys/dev/ixgbe/ixgbe_82598.c @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,140 +29,135 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ - +******************************************************************************/ +/*$FreeBSD$*/ #include "ixgbe_type.h" #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" -#define IXGBE_82598_MAX_TX_QUEUES 32 -#define IXGBE_82598_MAX_RX_QUEUES 64 -#define IXGBE_82598_RAR_ENTRIES 16 - -s32 ixgbe_init_shared_code_82598(struct ixgbe_hw *hw); -s32 ixgbe_assign_func_pointers_82598(struct ixgbe_hw *hw); -s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg); -s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg); +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); -u32 ixgbe_get_num_of_tx_queues_82598(struct ixgbe_hw *hw); -u32 ixgbe_get_num_of_rx_queues_82598(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num); s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); -s32 ixgbe_check_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); #ifndef NO_82598_A0_SUPPORT s32 ixgbe_reset_hw_rev_0_82598(struct ixgbe_hw *hw); #endif s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); -u32 ixgbe_get_num_rx_addrs_82598(struct ixgbe_hw *hw); s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw); s32 ixgbe_setup_fiber_serdes_link_82598(struct ixgbe_hw *hw); -s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); -s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); - +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index); /** - * ixgbe_init_shared_code_82598 - Inits func ptrs and MAC type + * ixgbe_init_ops_82598 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * - * Initialize the shared code for 82598. This will assign function pointers - * and assign the MAC type. Does not touch the hardware. + * Initialize the function pointers and assign the MAC type for 82598. + * Does not touch the hardware. **/ -s32 ixgbe_init_shared_code_82598(struct ixgbe_hw *hw) +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) { - /* Assign function pointers */ - ixgbe_assign_func_pointers_82598(hw); + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; - return IXGBE_SUCCESS; -} + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); -/** - * ixgbe_assign_func_pointers_82598 - Assigns 82598-specific funtion pointers - * @hw: pointer to hardware structure - * - * Note - Generic function pointers have already been assigned, so the - * function pointers set here are only for 82598-specific functions. - **/ -s32 ixgbe_assign_func_pointers_82598(struct ixgbe_hw *hw) -{ - - hw->func.ixgbe_func_get_media_type = - &ixgbe_get_media_type_82598; - hw->func.ixgbe_func_get_num_of_tx_queues = - &ixgbe_get_num_of_tx_queues_82598; - hw->func.ixgbe_func_get_num_of_rx_queues = - &ixgbe_get_num_of_rx_queues_82598; - hw->func.ixgbe_func_read_analog_reg8 = - &ixgbe_read_analog_reg8_82598; - hw->func.ixgbe_func_write_analog_reg8 = - &ixgbe_write_analog_reg8_82598; + /* MAC */ #ifndef NO_82598_A0_SUPPORT - if (hw->revision_id == 0) { - hw->func.ixgbe_func_reset_hw = - &ixgbe_reset_hw_rev_0_82598; - } else { - hw->func.ixgbe_func_reset_hw = &ixgbe_reset_hw_82598; - } + if (hw->revision_id == 0) + mac->ops.reset_hw = &ixgbe_reset_hw_rev_0_82598; + else + mac->ops.reset_hw = &ixgbe_reset_hw_82598; #else - hw->func.ixgbe_func_reset_hw = &ixgbe_reset_hw_82598; + mac->ops.reset_hw = &ixgbe_reset_hw_82598; #endif + mac->ops.get_media_type = &ixgbe_get_media_type_82598; - hw->func.ixgbe_func_get_num_rx_addrs = - &ixgbe_get_num_rx_addrs_82598; + /* LEDs */ + mac->ops.blink_led_start = &ixgbe_blink_led_start_82598; + mac->ops.blink_led_stop = &ixgbe_blink_led_stop_82598; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; + + /* Flow Control */ + mac->ops.setup_fc = &ixgbe_setup_fc_82598; + + /* Call PHY identify routine to get the phy type */ + phy->ops.identify(hw); + + /* PHY Init */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = &ixgbe_reset_phy_nl; + break; + default: + break; + } /* Link */ - if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { - hw->func.ixgbe_func_setup_link = - &ixgbe_setup_copper_link_82598; - hw->func.ixgbe_func_check_link = - &ixgbe_check_copper_link_82598; - hw->func.ixgbe_func_setup_link_speed = - &ixgbe_setup_copper_link_speed_82598; - hw->func.ixgbe_func_get_link_settings = - &ixgbe_get_copper_link_settings_82598; + mac->ops.check_link = &ixgbe_check_mac_link_82598; + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.setup_link_speed = + &ixgbe_setup_copper_link_speed_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_82598; } else { - hw->func.ixgbe_func_setup_link = - &ixgbe_setup_mac_link_82598; - hw->func.ixgbe_func_check_link = - &ixgbe_check_mac_link_82598; - hw->func.ixgbe_func_setup_link_speed = - &ixgbe_setup_mac_link_speed_82598; - hw->func.ixgbe_func_get_link_settings = - &ixgbe_get_link_settings_82598; + mac->ops.setup_link = &ixgbe_setup_mac_link_82598; + mac->ops.setup_link_speed = &ixgbe_setup_mac_link_speed_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_link_capabilities_82598; } + mac->mcft_size = 128; + mac->vft_size = 128; + mac->num_rar_entries = 16; + mac->max_tx_queues = 32; + mac->max_rx_queues = 64; + return IXGBE_SUCCESS; } /** - * ixgbe_get_link_settings_82598 - Determines default link settings + * ixgbe_get_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * - * Determines the default link settings by reading the AUTOC register. + * Determines the link capabilities by reading the AUTOC register. **/ -s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *autoneg) +s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) { s32 status = IXGBE_SUCCESS; s32 autoc_reg; @@ -195,12 +190,10 @@ s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, case IXGBE_AUTOC_LMS_KX4_AN: case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; - if (autoc_reg & IXGBE_AUTOC_KX4_SUPP) { + if (autoc_reg & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; - } - if (autoc_reg & IXGBE_AUTOC_KX_SUPP) { + if (autoc_reg & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - } *autoneg = TRUE; break; @@ -213,16 +206,16 @@ s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, } /** - * ixgbe_get_copper_link_settings_82598 - Determines default link settings + * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * - * Determines the default link settings by reading the AUTOC register. + * Determines the link capabilities by reading the AUTOC register. **/ -s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) +s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) { s32 status = IXGBE_ERR_LINK_SETUP; u16 speed_ability; @@ -230,15 +223,15 @@ s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, *speed = 0; *autoneg = TRUE; - status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &speed_ability); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); if (status == IXGBE_SUCCESS) { if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) - *speed |= IXGBE_LINK_SPEED_10GB_FULL; + *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) - *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *speed |= IXGBE_LINK_SPEED_1GB_FULL; } return status; @@ -259,8 +252,18 @@ enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: media_type = ixgbe_media_type_fiber; break; + case IXGBE_DEV_ID_82598AT: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_82598AT_DUAL_PORT: + media_type = ixgbe_media_type_copper; + break; default: media_type = ixgbe_media_type_unknown; break; @@ -270,25 +273,121 @@ enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) } /** - * ixgbe_get_num_of_tx_queues_82598 - Get number of TX queues + * ixgbe_setup_fc_82598 - Configure flow control settings * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) * - * Returns the number of transmit queues for the given adapter. + * Configures the flow control settings based on SW configuration. This + * function is used for 802.3x flow control configuration only. **/ -u32 ixgbe_get_num_of_tx_queues_82598(struct ixgbe_hw *hw) +s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) { - return IXGBE_82598_MAX_TX_QUEUES; -} + u32 frctl_reg; + u32 rmcs_reg; -/** - * ixgbe_get_num_of_rx_queues_82598 - Get number of RX queues - * @hw: pointer to hardware structure - * - * Returns the number of receive queues for the given adapter. - **/ -u32 ixgbe_get_num_of_rx_queues_82598(struct ixgbe_hw *hw) -{ - return IXGBE_82598_MAX_RX_QUEUES; + if (packetbuf_num < 0 || packetbuf_num > 7) { + DEBUGOUT1("Invalid packet buffer number [%d], expected range is" + " 0-7\n", packetbuf_num); + ASSERT(0); + } + + frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * 10 gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.type == ixgbe_fc_default) + hw->fc.type = ixgbe_fc_full; + + /* + * We want to save off the original Flow Control configuration just in + * case we get disconnected and then reconnected into a different hub + * or switch with different Flow Control capabilities. + */ + hw->fc.original_type = hw->fc.type; + + /* + * The possible values of the "flow_control" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames but not + * send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do not + * support receiving pause frames) + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.type) { + case ixgbe_fc_none: + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled, + * and Tx Flow control is disabled. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + frctl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + /* We should never get here. The value should be 0-3. */ + DEBUGOUT("Flow control param set incorrectly\n"); + ASSERT(0); + break; + } + + /* Enable 802.3x based flow control settings. */ + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* + * Check for invalid software configuration, zeros are completely + * invalid for all parameters used past this point, and if we enable + * flow control with zero water marks, we blast flow control packets. + */ + if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { + DEBUGOUT("Flow control structure initialized incorrectly\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc.type & ixgbe_fc_tx_pause) { + if (hw->fc.send_xon) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + (hw->fc.low_water | IXGBE_FCRTL_XONE)); + } else { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), + hw->fc.low_water); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), + (hw->fc.high_water)|IXGBE_FCRTH_FCEN); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + + return IXGBE_SUCCESS; } /** @@ -316,6 +415,7 @@ s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) autoc_reg |= hw->mac.link_mode_select; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); msec_delay(50); } @@ -346,16 +446,16 @@ s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) * case we get disconnected and then reconnected into a different hub * or switch with different Flow Control capabilities. */ - hw->fc.type = hw->fc.original_type; + hw->fc.original_type = hw->fc.type; /* * Set up the SerDes link if in 1Gb mode, otherwise just set up * 10Gb flow control. */ - ixgbe_check_link(hw, &speed, &link_up); + hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); if (speed == IXGBE_LINK_SPEED_1GB_FULL) status = ixgbe_setup_fiber_serdes_link_82598(hw); else - ixgbe_setup_fc_generic(hw, 0); + ixgbe_setup_fc_82598(hw, 0); /* Add delay to filter out noises during initial link setup */ msec_delay(50); @@ -368,26 +468,75 @@ s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @speed: pointer to link speed * @link_up: TRUE is link is up, FALSE otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) + bool *link_up, bool link_up_wait_to_complete) { u32 links_reg; + u32 i; + u16 link_reg, adapt_comp_reg; + + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.read_reg(hw, 1, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 1, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, + &adapt_comp_reg); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if ((link_reg & (1 << 2)) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = TRUE; + break; + } else { + *link_up = FALSE; + } + msec_delay(100); + hw->phy.ops.read_reg(hw, 1, IXGBE_TWINAX_DEV, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, + IXGBE_TWINAX_DEV, + &adapt_comp_reg); + } + } else { + if ((link_reg & (1 << 2)) && + ((adapt_comp_reg & 1) == 0)) + *link_up = TRUE; + else + *link_up = FALSE; + } + + if (*link_up == FALSE) + goto out; + } links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); - - if (links_reg & IXGBE_LINKS_UP) - *link_up = TRUE; - else - *link_up = FALSE; + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = TRUE; + break; + } else { + *link_up = FALSE; + } + msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = TRUE; + else + *link_up = FALSE; + } if (links_reg & IXGBE_LINKS_SPEED) *speed = IXGBE_LINK_SPEED_10GB_FULL; else *speed = IXGBE_LINK_SPEED_1GB_FULL; +out: return IXGBE_SUCCESS; } @@ -401,21 +550,15 @@ s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; - u32 delay = 300, pcs_anadv_reg, pcs_lpab_reg, pcs_lstat_reg, i; - + u32 pcs_anadv_reg, pcs_lpab_reg, pcs_lstat_reg, i; DEBUGFUNC("ixgbe_configure_fiber_serdes_fc_82598"); /* Check that autonegotiation has completed */ for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { - /* - * Delay 300msec before reading PCS1GLSTA. Reading PCS1GLSTA - * before then will sometimes result in AN_COMPLETE not being - * set. - */ - msec_delay(delay); + msec_delay(10); pcs_lstat_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - if (pcs_lstat_reg & IXGBE_PCS1GLSTA_AN_COMPLETE) { - if (pcs_lstat_reg & (IXGBE_PCS1GLSTA_LINK_OK)) { + if (pcs_lstat_reg & IXGBE_PCS1GLSTA_LINK_OK) { + if (pcs_lstat_reg & IXGBE_PCS1GLSTA_AN_COMPLETE) { if (!(pcs_lstat_reg & (IXGBE_PCS1GLSTA_AN_TIMED_OUT))) hw->mac.autoneg_failed = 0; @@ -427,17 +570,8 @@ s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw) break; } } - - /* - * Increment the delay time by 50ms to wait before reading - * PCS1GLSTA again. - */ - delay += 50; } - if (i == FIBER_LINK_UP_LIMIT) - hw->mac.autoneg_failed = 1; - if (hw->mac.autoneg_failed) { /* * AutoNeg failed to achieve a link, so we will turn @@ -445,7 +579,7 @@ s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw) */ hw->fc.type = ixgbe_fc_none; DEBUGOUT("Flow Control = NONE.\n"); - ret_val = ixgbe_setup_fc_generic(hw, 0); + ret_val = ixgbe_setup_fc_82598(hw, 0); goto out; } @@ -458,7 +592,7 @@ s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw) if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { /* - * Now we need to check if the user selected RX ONLY + * Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise RX * ONLY. Hence, we must now check to see if we need to @@ -472,28 +606,23 @@ s32 ixgbe_configure_fiber_serdes_fc_82598(struct ixgbe_hw *hw) DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); } } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { + (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && + (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && + (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { hw->fc.type = ixgbe_fc_tx_pause; DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && - !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { + (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && + !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && + (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { hw->fc.type = ixgbe_fc_rx_pause; DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); - } else if ((hw->fc.original_type == ixgbe_fc_none || - hw->fc.original_type == ixgbe_fc_tx_pause) || - hw->fc.strict_ieee) { + } else { hw->fc.type = ixgbe_fc_none; DEBUGOUT("Flow Control = NONE.\n"); - } else { - hw->fc.type = ixgbe_fc_rx_pause; - DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); } - ret_val = ixgbe_setup_fc_generic(hw, 0); + ret_val = ixgbe_setup_fc_82598(hw, 0); if (ret_val) { DEBUGOUT("Error forcing flow control settings\n"); goto out; @@ -518,104 +647,84 @@ s32 ixgbe_setup_fiber_serdes_link_82598(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_setup_fiber_serdes_link_82598"); /* - * 82598 fiber/serdes devices require that flow control be resolved in - * software. Set up flow control advertisement if autoneg is enabled. + * 10 gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. */ - if (hw->mac.autoneg) { - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - - /* - * Check for a software override of the flow control settings, - * and setup the device accordingly. If auto-negotiation is - * enabled, then software will have to set the "PAUSE" bits to - * the correct value and re-start auto- negotiation. However, - * if auto-negotiation is disabled, then software will have to - * manually configure flow control. - * - * The possible values of the "fc" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and TX flow control (symmetric) are enabled. - */ - - switch (hw->fc.type) { - case ixgbe_fc_none: - /* - * Flow control completely disabled by a software - * over-ride. - */ - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | - IXGBE_PCS1GANA_ASM_PAUSE); - break; - case ixgbe_fc_rx_pause: - /* - * RX Flow control is enabled and TX Flow control is - * disabled by a software over-ride. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric RX PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | - IXGBE_PCS1GANA_ASM_PAUSE); - break; - case ixgbe_fc_tx_pause: - /* - * TX Flow control is enabled, and RX Flow control is - * disabled, by a software over-ride. - */ - reg |= (IXGBE_PCS1GANA_ASM_PAUSE); - reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); - break; - case ixgbe_fc_full: - /* - * Flow control (both RX and TX) is enabled by a - * software over-ride. - */ - reg |= (IXGBE_PCS1GANA_SYM_PAUSE | - IXGBE_PCS1GANA_ASM_PAUSE); - break; - default: - DEBUGOUT("Flow control param set incorrectly\n"); - ret_val = -IXGBE_ERR_CONFIG; - goto out; - break; - } - - IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); - } + if (hw->fc.type == ixgbe_fc_default) + hw->fc.type = ixgbe_fc_full; /* - * New SerDes mode allows for forcing speed or autonegotiating speed - * at 1gb. Autoneg should be default set by most drivers. This is the - * mode that will be compatible with older link partners and switches. - * However, both are supported by the hardware and some drivers/tools. + * 82598 fiber/serdes devices require that flow control be resolved in + * software. */ - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - reg &= ~(IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - if (hw->mac.autoneg) { - /* Set PCS register for autoneg */ - reg |= IXGBE_PCS1GLCTL_AN_ENABLE | /* Enable Autoneg */ - IXGBE_PCS1GLCTL_AN_RESTART; /* Restart autoneg */ - DEBUGOUT1("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); - } else { - /* Set PCS register for forced speed */ - reg |= IXGBE_PCS1GLCTL_FLV_LINK_UP | /* Force link up */ - IXGBE_PCS1GLCTL_FORCE_LINK; /* Force Link */ - DEBUGOUT1("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); + /* + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.type) { + case ixgbe_fc_none: + /* + * Flow control completely disabled by a software + * over-ride. + */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by a software over-ride. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + reg |= (IXGBE_PCS1GANA_ASM_PAUSE); + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); + break; + case ixgbe_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a + * software over-ride. + */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -IXGBE_ERR_CONFIG; + goto out; + break; } + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Set PCS register for autoneg */ + /* Enable and restart autoneg */ + reg |= IXGBE_PCS1GLCTL_AN_ENABLE | IXGBE_PCS1GLCTL_AN_RESTART; + + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; /* Disable AN timeout */ + DEBUGOUT1("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); /* * Configure flow control. If we aren't auto-negotiating, * just setup the flow control and do not worry about PCS autoneg. */ - if (hw->mac.autoneg) - ixgbe_configure_fiber_serdes_fc_82598(hw); + ixgbe_configure_fiber_serdes_fc_82598(hw); out: return IXGBE_SUCCESS; @@ -631,18 +740,18 @@ s32 ixgbe_setup_fiber_serdes_link_82598(struct ixgbe_hw *hw) * Set the link speed in the AUTOC register and restarts link. **/ s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) { s32 status = IXGBE_SUCCESS; /* If speed is 10G, then check for CX4 or XAUI. */ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && - (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) + (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) { hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; - else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) + } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) { hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; - else if (autoneg) { + } else if (autoneg) { /* BX mode - Autonegotiate 1G */ if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; @@ -680,72 +789,17 @@ s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) { s32 status; - ixgbe_link_speed speed = 0; - bool link_up = FALSE; + + /* Restart autonegotiation on PHY */ + status = hw->phy.ops.setup_link(hw); + + /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ + hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; /* Set up MAC */ ixgbe_setup_mac_link_82598(hw); - /* Restart autonegotiation on PHY */ - status = ixgbe_setup_phy_link(hw); - - /* Synchronize MAC to PHY speed */ - if (status == IXGBE_SUCCESS) - status = ixgbe_check_link(hw, &speed, &link_up); - - return status; -} - -/** - * ixgbe_check_copper_link_82598 - Syncs MAC & PHY link settings - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @link_up: TRUE if link is up, FALSE otherwise - * - * Reads the mac link, phy link, and synchronizes the MAC to PHY. - **/ -s32 ixgbe_check_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) -{ - s32 status; - ixgbe_link_speed phy_speed = 0; - bool phy_link = FALSE; - - /* This is the speed and link the MAC is set at */ - ixgbe_check_mac_link_82598(hw, speed, link_up); - - /* - * Check current speed and link status of the PHY register. - * This is a vendor specific register and may have to - * be changed for other copper PHYs. - */ - status = ixgbe_check_phy_link(hw, &phy_speed, &phy_link); - - if ((status == IXGBE_SUCCESS) && (phy_link)) { - /* - * Check current link status of the MACs link's register - * matches that of the speed in the PHY register - */ - if (*speed != phy_speed) { - /* - * The copper PHY requires 82598 attach type to be XAUI - * for 10G and BX for 1G - */ - hw->mac.link_attach_type = - (IXGBE_AUTOC_10G_XAUI | IXGBE_AUTOC_1G_BX); - - /* Synchronize the MAC speed to the PHY speed */ - status = ixgbe_setup_mac_link_speed_82598(hw, phy_speed, - FALSE, FALSE); - if (status == IXGBE_SUCCESS) - ixgbe_check_mac_link_82598(hw, speed, link_up); - else - status = IXGBE_ERR_LINK_SETUP; - } - } else { - *link_up = phy_link; - } - return status; } @@ -759,20 +813,22 @@ s32 ixgbe_check_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) { s32 status; - bool link_up = 0; /* Setup the PHY according to input speed */ - status = ixgbe_setup_phy_link_speed(hw, speed, autoneg, - autoneg_wait_to_complete); + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); - /* Synchronize MAC to PHY speed */ - if (status == IXGBE_SUCCESS) - status = ixgbe_check_link(hw, &speed, &link_up); + /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ + hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); + hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; + + /* Set up MAC */ + ixgbe_setup_mac_link_82598(hw); return status; } @@ -782,7 +838,7 @@ s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, * ixgbe_reset_hw_rev_0_82598 - Performs hardware reset * @hw: pointer to hardware structure * - * Resets the hardware by reseting the transmit and receive units, masks and + * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ @@ -796,10 +852,10 @@ s32 ixgbe_reset_hw_rev_0_82598(struct ixgbe_hw *hw) u32 resets; /* Call adapter stop to disable tx/rx and clear interrupts */ - ixgbe_stop_adapter(hw); + hw->mac.ops.stop_adapter(hw); /* Reset PHY */ - ixgbe_reset_phy(hw); + hw->phy.ops.reset(hw); for (resets = 0; resets < 10; resets++) { /* @@ -858,13 +914,13 @@ s32 ixgbe_reset_hw_rev_0_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } else { hw->mac.link_attach_type = - (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); + (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); hw->mac.link_settings_loaded = TRUE; } /* Store the permanent mac address */ - ixgbe_get_mac_addr(hw, hw->mac.perm_addr); + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); return status; } @@ -874,7 +930,7 @@ s32 ixgbe_reset_hw_rev_0_82598(struct ixgbe_hw *hw) * ixgbe_reset_hw_82598 - Performs hardware reset * @hw: pointer to hardware structure * - * Resets the hardware by reseting the transmit and receive units, masks and + * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ @@ -888,35 +944,44 @@ s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) u8 analog_val; /* Call adapter stop to disable tx/rx and clear interrupts */ - ixgbe_stop_adapter(hw); + hw->mac.ops.stop_adapter(hw); /* - * Power up the Atlas TX lanes if they are currently powered down. - * Atlas TX lanes are powered down for MAC loopback tests, but + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but * they are not automatically restored on reset. */ - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { - /* Enable TX Atlas so packets can be transmitted again */ - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); analog_val &= ~ IXGBE_ATLAS_PDN_TX_10G_QL_ALL; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); } /* Reset PHY */ - ixgbe_reset_phy(hw); + if (hw->phy.reset_disable == FALSE) + hw->phy.ops.reset(hw); /* * Prevent the PCI-E bus from from hanging by disabling PCI-E master @@ -969,68 +1034,83 @@ s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } else { hw->mac.link_attach_type = - (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); + (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); hw->mac.link_settings_loaded = TRUE; } /* Store the permanent mac address */ - ixgbe_get_mac_addr(hw, hw->mac.perm_addr); + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); return status; } /** - * ixgbe_read_analog_reg8_82598 - Reads 8 bit 82598 Atlas analog register - * @hw: pointer to hardware structure - * @reg: analog register to read - * @val: read value - * - * Performs write operation to analog register specified. + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index **/ -s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { - u32 atlas_ctl; + u32 rar_high; - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_start_82598 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* + * Link must be up to auto-blink the LEDs on the 82598EB MAC; + * force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + + if (!link_up) { + autoc_reg |= IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + msec_delay(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - usec_delay(10); - atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); - *val = (u8)atlas_ctl; return IXGBE_SUCCESS; } /** - * ixgbe_write_analog_reg8_82598 - Writes 8 bit 82598 Atlas analog register + * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. * @hw: pointer to hardware structure - * @reg: atlas register to write - * @val: value to write - * - * Performs write operation to Atlas analog register specified. + * @index: led number to stop blinking **/ -s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) { - u32 atlas_ctl; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - atlas_ctl = (reg << 8) | val; - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + autoc_reg &= ~IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); - usec_delay(10); return IXGBE_SUCCESS; } - -/** - * ixgbe_get_num_rx_addrs_82598 - Get RX address registers - * @hw: pointer to hardware structure - * - * Returns the of RAR entries for the given adapter. - **/ -u32 ixgbe_get_num_rx_addrs_82598(struct ixgbe_hw *hw) -{ - UNREFERENCED_PARAMETER(hw); - - return IXGBE_82598_RAR_ENTRIES; -} - diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c index 57b567866d2a..570d480a7950 100644 --- a/sys/dev/ixgbe/ixgbe_api.c +++ b/sys/dev/ixgbe/ixgbe_api.c @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,15 +29,13 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ - +******************************************************************************/ +/*$FreeBSD$*/ #include "ixgbe_api.h" #include "ixgbe_common.h" -extern s32 ixgbe_init_shared_code_82598(struct ixgbe_hw *hw); -extern s32 ixgbe_init_shared_code_phy(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); /** * ixgbe_init_shared_code - Initialize the shared code @@ -55,12 +53,6 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) { s32 status; - /* - * Assign generic function pointers before entering adapter-specific - * init - */ - ixgbe_assign_func_pointers_generic(hw); - /* * Set the mac type */ @@ -68,8 +60,7 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) switch (hw->mac.type) { case ixgbe_mac_82598EB: - status = ixgbe_init_shared_code_82598(hw); - status = ixgbe_init_shared_code_phy(hw); + status = ixgbe_init_ops_82598(hw); break; default: status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; @@ -90,13 +81,19 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; - DEBUGFUNC("ixgbe_set_mac_type"); + DEBUGFUNC("ixgbe_set_mac_type\n"); if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) { switch (hw->device_id) { case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT_DUAL_PORT: case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: hw->mac.type = ixgbe_mac_82598EB; break; default: @@ -107,6 +104,8 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; } + DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); return ret_val; } @@ -118,8 +117,8 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) **/ s32 ixgbe_init_hw(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_init_hw, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -131,12 +130,12 @@ s32 ixgbe_init_hw(struct ixgbe_hw *hw) **/ s32 ixgbe_reset_hw(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_reset_hw, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_start_hw - Prepares hardware for TX/TX + * ixgbe_start_hw - Prepares hardware for Rx/Tx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, @@ -147,8 +146,8 @@ s32 ixgbe_reset_hw(struct ixgbe_hw *hw) **/ s32 ixgbe_start_hw(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_start_hw, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -160,8 +159,8 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw) **/ s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_clear_hw_cntrs, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -172,8 +171,8 @@ s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) **/ enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_get_media_type, (hw), - ixgbe_media_type_unknown); + return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), + ixgbe_media_type_unknown); } /** @@ -182,14 +181,14 @@ enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) * @mac_addr: Adapter MAC address * * Reads the adapter's MAC address from the first Receive Address Register - * (RAR0) A reset of the adapter must have been performed prior to calling this - * function in order for the MAC address to have been loaded from the EEPROM - * into RAR0 + * (RAR0) A reset of the adapter must have been performed prior to calling + * this function in order for the MAC address to have been loaded from the + * EEPROM into RAR0 **/ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) { - return ixgbe_call_func(hw, ixgbe_func_get_mac_addr, - (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, + (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); } /** @@ -200,36 +199,34 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) **/ s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_get_bus_info, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_get_num_of_tx_queues - Get TX queues + * ixgbe_get_num_of_tx_queues - Get Tx queues * @hw: pointer to hardware structure * * Returns the number of transmit queues for the given adapter. **/ u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_get_num_of_tx_queues, - (hw), 0); + return hw->mac.max_tx_queues; } /** - * ixgbe_get_num_of_rx_queues - Get RX queues + * ixgbe_get_num_of_rx_queues - Get Rx queues * @hw: pointer to hardware structure * * Returns the number of receive queues for the given adapter. **/ u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_get_num_of_rx_queues, - (hw), 0); + return hw->mac.max_rx_queues; } /** - * ixgbe_stop_adapter - Disable TX/TX units + * ixgbe_stop_adapter - Disable Rx/Tx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, @@ -239,8 +236,20 @@ u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) **/ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_stop_adapter, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_pba_num - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num) +{ + return ixgbe_read_pba_num_generic(hw, pba_num); } /** @@ -255,9 +264,9 @@ s32 ixgbe_identify_phy(struct ixgbe_hw *hw) if (hw->phy.type == ixgbe_phy_unknown) { status = ixgbe_call_func(hw, - ixgbe_func_identify_phy, - (hw), - IXGBE_NOT_IMPLEMENTED); + hw->phy.ops.identify, + (hw), + IXGBE_NOT_IMPLEMENTED); } return status; @@ -278,14 +287,27 @@ s32 ixgbe_reset_phy(struct ixgbe_hw *hw) } if (status == IXGBE_SUCCESS) { - status = ixgbe_call_func(hw, - ixgbe_func_reset_phy, - (hw), - IXGBE_NOT_IMPLEMENTED); + status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), + IXGBE_NOT_IMPLEMENTED); } return status; } +/** + * ixgbe_get_phy_firmware_version - + * @hw: pointer to hardware structure + * @firmware_version: pointer to firmware version + **/ +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, + (hw, firmware_version), + IXGBE_NOT_IMPLEMENTED); + return status; +} + /** * ixgbe_read_phy_reg - Read PHY register * @hw: pointer to hardware structure @@ -295,23 +317,10 @@ s32 ixgbe_reset_phy(struct ixgbe_hw *hw) * Reads a value from a specified PHY register **/ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data) + u16 *phy_data) { - s32 status = IXGBE_SUCCESS; - - if (hw->phy.type == ixgbe_phy_unknown) { - if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) { - status = IXGBE_ERR_PHY; - } - } - - if (status == IXGBE_SUCCESS) { - status = ixgbe_call_func(hw, - ixgbe_func_read_phy_reg, - (hw, reg_addr, device_type, phy_data), - IXGBE_NOT_IMPLEMENTED); - } - return status; + return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); } /** @@ -323,23 +332,53 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, * Writes a value to specified PHY register **/ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 phy_data) + u16 phy_data) { - s32 status = IXGBE_SUCCESS; + return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} - if (hw->phy.type == ixgbe_phy_unknown) { - if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) { - status = IXGBE_ERR_PHY; - } - } +/** + * ixgbe_setup_phy_link - Restart PHY autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), + IXGBE_NOT_IMPLEMENTED); +} - if (status == IXGBE_SUCCESS) { - status = ixgbe_call_func(hw, - ixgbe_func_write_phy_reg, - (hw, reg_addr, device_type, phy_data), - IXGBE_NOT_IMPLEMENTED); - } - return status; +/** + * ixgbe_check_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads a PHY register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, + link_up), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link_speed - Set auto advertise + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * + * Sets the auto advertised capabilities + **/ +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, + autoneg, autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); } /** @@ -351,8 +390,8 @@ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, **/ s32 ixgbe_setup_link(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_setup_link, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -362,10 +401,11 @@ s32 ixgbe_setup_link(struct ixgbe_hw *hw) * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) + bool *link_up, bool link_up_wait_to_complete) { - return ixgbe_call_func(hw, ixgbe_func_check_link, (hw, speed, link_up), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, + link_up, link_up_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); } /** @@ -377,29 +417,29 @@ s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * Set the link speed and restarts the link. **/ s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) + bool autoneg, + bool autoneg_wait_to_complete) { - return ixgbe_call_func(hw, ixgbe_func_setup_link_speed, (hw, speed, - autoneg, autoneg_wait_to_complete), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.setup_link_speed, (hw, speed, + autoneg, autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_get_link_settings - Set link settings to default + * ixgbe_get_link_capabilities - Returns link capabilities * @hw: pointer to hardware structure * - * Sets the default link settings based on attach type in the hw struct. + * Determines the link capabilities of the current configuration. **/ -s32 ixgbe_get_link_settings(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *autoneg) +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg) { - return ixgbe_call_func(hw, ixgbe_func_get_link_settings, (hw, speed, - autoneg), IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, + speed, autoneg), IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_led_on - Turn on LED's + * ixgbe_led_on - Turn on LEDs * @hw: pointer to hardware structure * @index: led number to turn on * @@ -407,12 +447,12 @@ s32 ixgbe_get_link_settings(struct ixgbe_hw *hw, ixgbe_link_speed *speed, **/ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) { - return ixgbe_call_func(hw, ixgbe_func_led_on, (hw, index), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_led_off - Turn off LED's + * ixgbe_led_off - Turn off LEDs * @hw: pointer to hardware structure * @index: led number to turn off * @@ -420,12 +460,12 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) **/ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) { - return ixgbe_call_func(hw, ixgbe_func_led_off, (hw, index), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_blink_led_start - Blink LED's + * ixgbe_blink_led_start - Blink LEDs * @hw: pointer to hardware structure * @index: led number to blink * @@ -433,24 +473,24 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) **/ s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) { - return ixgbe_call_func(hw, ixgbe_func_blink_led_start, (hw, index), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_blink_led_stop - Stop blinking LED's + * ixgbe_blink_led_stop - Stop blinking LEDs * @hw: pointer to hardware structure * * Stop blinking LED based on index. **/ s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) { - return ixgbe_call_func(hw, ixgbe_func_blink_led_stop, (hw, index), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_init_eeprom_params - Initialiaze EEPROM parameters + * ixgbe_init_eeprom_params - Initialize EEPROM parameters * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the @@ -458,8 +498,8 @@ s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) **/ s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_init_eeprom_params, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), + IXGBE_NOT_IMPLEMENTED); } @@ -475,25 +515,8 @@ s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) **/ s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) { - s32 status; - - /* - * Initialize EEPROM parameters. This will not do anything if the - * EEPROM structure has already been initialized - */ - ixgbe_init_eeprom_params(hw); - - /* Check for invalid offset */ - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - } else { - status = ixgbe_call_func(hw, - ixgbe_func_write_eeprom, - (hw, offset, data), - IXGBE_NOT_IMPLEMENTED); - } - - return status; + return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); } /** @@ -506,25 +529,8 @@ s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) **/ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) { - s32 status; - - /* - * Initialize EEPROM parameters. This will not do anything if the - * EEPROM structure has already been initialized - */ - ixgbe_init_eeprom_params(hw); - - /* Check for invalid offset */ - if (offset >= hw->eeprom.word_size) { - status = IXGBE_ERR_EEPROM; - } else { - status = ixgbe_call_func(hw, - ixgbe_func_read_eeprom, - (hw, offset, data), - IXGBE_NOT_IMPLEMENTED); - } - - return status; + return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); } /** @@ -536,8 +542,8 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) **/ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) { - return ixgbe_call_func(hw, ixgbe_func_validate_eeprom_checksum, - (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, + (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); } /** @@ -546,25 +552,37 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) **/ s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_update_eeprom_checksum, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), + IXGBE_NOT_IMPLEMENTED); } /** - * ixgbe_set_rar - Set RX address register + * ixgbe_set_rar - Set Rx address register * @hw: pointer to hardware structure - * @addr: Address to put into receive address register * @index: Receive address register to write - * @vind: Vind to set RAR to + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" * @enable_addr: set flag that address is active * * Puts an ethernet address into a receive address register. **/ -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, - u32 enable_addr) +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) { - return ixgbe_call_func(hw, ixgbe_func_set_rar, (hw, index, addr, vind, - enable_addr), IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, + enable_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vmdq - Associate a VMDq index with a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to associate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); } /** @@ -572,13 +590,13 @@ s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, * @hw: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest - * of the receive addresss registers. Clears the multicast table. Assumes + * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_init_rx_addrs, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -587,7 +605,26 @@ s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) **/ u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_get_num_rx_addrs, (hw), 0); + return hw->mac.num_rar_entries; +} + +/** + * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new multicast addresses + * @addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + **/ +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, + addr_list, addr_count, func), + IXGBE_NOT_IMPLEMENTED); } /** @@ -595,19 +632,19 @@ u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @mc_addr_list: the list of new multicast addresses * @mc_addr_count: number of addresses - * @pad: number of bytes between addresses in the list + * @func: iterator function to walk the multicast address list * * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unsed receive address + * address registers and the multicast table. Uses unused receive address * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, u32 pad) + u32 mc_addr_count, ixgbe_mc_addr_itr func) { - return ixgbe_call_func(hw, ixgbe_func_update_mc_addr_list, (hw, - mc_addr_list, mc_addr_count, pad), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, + mc_addr_list, mc_addr_count, func), + IXGBE_NOT_IMPLEMENTED); } /** @@ -618,8 +655,8 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, **/ s32 ixgbe_enable_mc(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_enable_mc, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -630,8 +667,8 @@ s32 ixgbe_enable_mc(struct ixgbe_hw *hw) **/ s32 ixgbe_disable_mc(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_disable_mc, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -642,8 +679,8 @@ s32 ixgbe_disable_mc(struct ixgbe_hw *hw) **/ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_clear_vfta, (hw), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), + IXGBE_NOT_IMPLEMENTED); } /** @@ -657,8 +694,8 @@ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) **/ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { - return ixgbe_call_func(hw, ixgbe_func_set_vfta, (hw, vlan, vind, - vlan_on), IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, + vlan_on), IXGBE_NOT_IMPLEMENTED); } /** @@ -670,11 +707,10 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) **/ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) { - return ixgbe_call_func(hw, ixgbe_func_setup_fc, (hw, packetbuf_num), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw, packetbuf_num), + IXGBE_NOT_IMPLEMENTED); } - /** * ixgbe_read_analog_reg8 - Reads 8 bit analog register * @hw: pointer to hardware structure @@ -685,8 +721,8 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) **/ s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) { - return ixgbe_call_func(hw, ixgbe_func_read_analog_reg8, (hw, reg, val), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); } /** @@ -699,7 +735,7 @@ s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) **/ s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) { - return ixgbe_call_func(hw, ixgbe_func_write_analog_reg8, (hw, reg, val), - IXGBE_NOT_IMPLEMENTED); + return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); } diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h index 5154cf7ace2b..79a8779dfa12 100644 --- a/sys/dev/ixgbe/ixgbe_api.h +++ b/sys/dev/ixgbe/ixgbe_api.h @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,8 +29,8 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ +******************************************************************************/ +/*$FreeBSD$*/ #ifndef _IXGBE_API_H_ #define _IXGBE_API_H_ @@ -50,21 +50,30 @@ s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num); s32 ixgbe_identify_phy(struct ixgbe_hw *hw); s32 ixgbe_reset_phy(struct ixgbe_hw *hw); s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data); + u16 *phy_data); s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 phy_data); + u16 phy_data); +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); s32 ixgbe_setup_link(struct ixgbe_hw *hw); s32 ixgbe_setup_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, bool autoneg_wait_to_complete); + bool autoneg, bool autoneg_wait_to_complete); s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up); -s32 ixgbe_get_link_settings(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *autoneg); + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg); s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); @@ -76,21 +85,26 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); -s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, - u32 vind, u32 enable_addr); +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, u32 pad); + u32 mc_addr_count, ixgbe_mc_addr_itr func); s32 ixgbe_enable_mc(struct ixgbe_hw *hw); s32 ixgbe_disable_mc(struct ixgbe_hw *hw); s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); + u32 vind, bool vlan_on); s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, + u16 *firmware_version); s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c index f9cc69cebda2..191cefd47970 100644 --- a/sys/dev/ixgbe/ixgbe_common.c +++ b/sys/dev/ixgbe/ixgbe_common.c @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,9 +29,8 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ - +******************************************************************************/ +/*$FreeBSD$*/ #include "ixgbe_common.h" #include "ixgbe_api.h" @@ -43,7 +42,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, - u16 count); + u16 count); static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); @@ -54,51 +53,75 @@ static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); /** - * ixgbe_assign_func_pointers_generic - Set generic func ptrs - * @hw: pointer to hardware structure + * ixgbe_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure * - * Assigns generic function pointers. Adapter-specific functions can - * override the assignment of generic function pointers by assigning - * their own adapter-specific function pointers. + * Initialize the function pointers. **/ -s32 ixgbe_assign_func_pointers_generic(struct ixgbe_hw *hw) +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) { - struct ixgbe_functions *f = &hw->func; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_mac_info *mac = &hw->mac; + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC); - f->ixgbe_func_init_hw = &ixgbe_init_hw_generic; - f->ixgbe_func_start_hw = &ixgbe_start_hw_generic; - f->ixgbe_func_clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; - f->ixgbe_func_get_mac_addr = &ixgbe_get_mac_addr_generic; - f->ixgbe_func_stop_adapter = &ixgbe_stop_adapter_generic; - f->ixgbe_func_get_bus_info = &ixgbe_get_bus_info_generic; - /* LED */ - f->ixgbe_func_led_on = &ixgbe_led_on_generic; - f->ixgbe_func_led_off = &ixgbe_led_off_generic; /* EEPROM */ - f->ixgbe_func_init_eeprom_params = &ixgbe_init_eeprom_params_generic; - f->ixgbe_func_read_eeprom = &ixgbe_read_eeprom_bit_bang_generic; - f->ixgbe_func_write_eeprom = &ixgbe_write_eeprom_generic; - f->ixgbe_func_validate_eeprom_checksum = - &ixgbe_validate_eeprom_checksum_generic; - f->ixgbe_func_update_eeprom_checksum = - &ixgbe_update_eeprom_checksum_generic; + eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; + /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ + if (eec & (1 << 8)) + eeprom->ops.read = &ixgbe_read_eeprom_generic; + else + eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; + eeprom->ops.write = &ixgbe_write_eeprom_generic; + eeprom->ops.validate_checksum = + &ixgbe_validate_eeprom_checksum_generic; + eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; + + /* MAC */ + mac->ops.init_hw = &ixgbe_init_hw_generic; + mac->ops.reset_hw = NULL; + mac->ops.start_hw = &ixgbe_start_hw_generic; + mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; + mac->ops.get_media_type = NULL; + mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; + mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; + mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; + mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_generic; + mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_generic; + + /* LEDs */ + mac->ops.led_on = &ixgbe_led_on_generic; + mac->ops.led_off = &ixgbe_led_off_generic; + mac->ops.blink_led_start = NULL; + mac->ops.blink_led_stop = NULL; + /* RAR, Multicast, VLAN */ - f->ixgbe_func_set_rar = &ixgbe_set_rar_generic; - f->ixgbe_func_init_rx_addrs = &ixgbe_init_rx_addrs_generic; - f->ixgbe_func_update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; - f->ixgbe_func_enable_mc = &ixgbe_enable_mc_generic; - f->ixgbe_func_disable_mc = &ixgbe_disable_mc_generic; - f->ixgbe_func_clear_vfta = &ixgbe_clear_vfta_generic; - f->ixgbe_func_set_vfta = &ixgbe_set_vfta_generic; - f->ixgbe_func_setup_fc = &ixgbe_setup_fc_generic; + mac->ops.set_rar = &ixgbe_set_rar_generic; + mac->ops.set_vmdq = NULL; + mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; + mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; + mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; + mac->ops.enable_mc = &ixgbe_enable_mc_generic; + mac->ops.disable_mc = &ixgbe_disable_mc_generic; + mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; + mac->ops.set_vfta = &ixgbe_set_vfta_generic; + + /* Flow Control */ + mac->ops.setup_fc = NULL; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.setup_link = NULL; + mac->ops.setup_link_speed = NULL; + mac->ops.check_link = NULL; return IXGBE_SUCCESS; } /** - * ixgbe_start_hw_generic - Prepare hardware for TX/RX + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, clears @@ -111,33 +134,34 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) u32 ctrl_ext; /* Set the media type */ - hw->phy.media_type = ixgbe_get_media_type(hw); + hw->phy.media_type = hw->mac.ops.get_media_type(hw); /* Set bus info */ - ixgbe_get_bus_info(hw); + hw->mac.ops.get_bus_info(hw); /* Identify the PHY */ - ixgbe_identify_phy(hw); + hw->phy.ops.identify(hw); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table */ - ixgbe_init_rx_addrs(hw); + hw->mac.ops.init_rx_addrs(hw); /* Clear the VLAN filter table */ - ixgbe_clear_vfta(hw); + hw->mac.ops.clear_vfta(hw); /* Set up link */ - ixgbe_setup_link(hw); + hw->mac.ops.setup_link(hw); /* Clear statistics registers */ - ixgbe_clear_hw_cntrs(hw); + hw->mac.ops.clear_hw_cntrs(hw); /* Set No Snoop Disable */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); /* Clear adapter stopped flag */ hw->adapter_stopped = FALSE; @@ -149,7 +173,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) * ixgbe_init_hw_generic - Generic hardware initialization * @hw: pointer to hardware structure * - * Initialize the hardware by reseting the hardware, filling the bus info + * Initialize the hardware by resetting the hardware, filling the bus info * structure and media type, clears all on chip counters, initializes receive * address registers, multicast table, VLAN filter table, calls routine to set * up link and flow control settings, and leaves transmit and receive units @@ -158,10 +182,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) { /* Reset the hardware */ - ixgbe_reset_hw(hw); + hw->mac.ops.reset_hw(hw); /* Start the HW */ - ixgbe_start_hw(hw); + hw->mac.ops.start_hw(hw); return IXGBE_SUCCESS; } @@ -244,6 +268,37 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) return IXGBE_SUCCESS; } +/** + * ixgbe_read_pba_num - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 data; + + DEBUGFUNC("ixgbe_read_pba_num_generic"); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num = (u32)(data << 16); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num |= data; + + return IXGBE_SUCCESS; +} + /** * ixgbe_get_mac_addr_generic - Generic get MAC address * @hw: pointer to hardware structure @@ -320,7 +375,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) } /** - * ixgbe_stop_adapter_generic - Generic stop TX/RX units + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, @@ -344,6 +399,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); reg_val &= ~(IXGBE_RXCTRL_RXEN); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); + IXGBE_WRITE_FLUSH(hw); msec_delay(2); /* Clear interrupt mask to stop from interrupts being generated */ @@ -353,7 +409,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) IXGBE_READ_REG(hw, IXGBE_EICR); /* Disable the transmit unit. Each queue must be disabled. */ - number_of_queues = ixgbe_get_num_of_tx_queues(hw); + number_of_queues = hw->mac.max_tx_queues; for (i = 0; i < number_of_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); if (reg_val & IXGBE_TXDCTL_ENABLE) { @@ -362,6 +418,14 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) } } + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests + */ + if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + return IXGBE_SUCCESS; } @@ -378,6 +442,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } @@ -395,37 +460,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - - return IXGBE_SUCCESS; -} - - -/** - * ixgbe_blink_led_start_generic - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - **/ -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - led_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - **/ -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - led_reg &= ~IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } @@ -490,6 +525,13 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) s32 status; u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + /* Prepare the EEPROM for writing */ status = ixgbe_acquire_eeprom(hw); @@ -505,7 +547,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) /* Send the WRITE ENABLE command (8 bit opcode ) */ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); + IXGBE_EEPROM_OPCODE_BITS); ixgbe_standby_eeprom(hw); @@ -518,9 +560,9 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) /* Send the Write command (8-bit opcode + addr) */ ixgbe_shift_out_eeprom_bits(hw, write_opcode, - IXGBE_EEPROM_OPCODE_BITS); + IXGBE_EEPROM_OPCODE_BITS); ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), - hw->eeprom.address_bits); + hw->eeprom.address_bits); /* Send the data */ data = (data >> 8) | (data << 8); @@ -533,6 +575,7 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) ixgbe_release_eeprom(hw); } +out: return status; } @@ -545,12 +588,19 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) * Reads 16 bit value from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 *data) + u16 *data) { s32 status; u16 word_in; u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + /* Prepare the EEPROM for reading */ status = ixgbe_acquire_eeprom(hw); @@ -573,9 +623,9 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, /* Send the READ command (opcode + addr) */ ixgbe_shift_out_eeprom_bits(hw, read_opcode, - IXGBE_EEPROM_OPCODE_BITS); + IXGBE_EEPROM_OPCODE_BITS); ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), - hw->eeprom.address_bits); + hw->eeprom.address_bits); /* Read the data. */ word_in = ixgbe_shift_in_eeprom_bits(hw, 16); @@ -585,6 +635,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, ixgbe_release_eeprom(hw); } +out: return status; } @@ -601,6 +652,13 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) u32 eerd; s32 status; + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + IXGBE_EEPROM_READ_REG_START; @@ -609,10 +667,11 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) if (status == IXGBE_SUCCESS) *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> - IXGBE_EEPROM_READ_REG_DATA); + IXGBE_EEPROM_READ_REG_DATA); else DEBUGOUT("Eeprom read timed out\n"); +out: return status; } @@ -669,7 +728,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) usec_delay(5); } - /* Release if grant not aquired */ + /* Release if grant not acquired */ if (!(eec & IXGBE_EEC_GNT)) { eec &= ~IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); @@ -747,7 +806,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) */ if (i >= timeout) { DEBUGOUT("Driver can't access the Eeprom - Semaphore " - "not granted.\n"); + "not granted.\n"); ixgbe_release_eeprom_semaphore(hw); status = IXGBE_ERR_EEPROM; } @@ -771,6 +830,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_FLUSH(hw); } /** @@ -791,7 +851,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) */ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, - IXGBE_EEPROM_OPCODE_BITS); + IXGBE_EEPROM_OPCODE_BITS); spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) break; @@ -840,7 +900,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) * @count: number of bits to shift out **/ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, - u16 count) + u16 count) { u32 eec; u32 mask; @@ -999,7 +1059,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { - if (ixgbe_read_eeprom(hw, i, &word) != IXGBE_SUCCESS) { + if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) { DEBUGOUT("EEPROM read failed\n"); break; } @@ -1008,15 +1068,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { - ixgbe_read_eeprom(hw, i, &pointer); + hw->eeprom.ops.read(hw, i, &pointer); /* Make sure the pointer seems valid */ if (pointer != 0xFFFF && pointer != 0) { - ixgbe_read_eeprom(hw, pointer, &length); + hw->eeprom.ops.read(hw, pointer, &length); if (length != 0xFFFF && length != 0) { for (j = pointer+1; j <= pointer+length; j++) { - ixgbe_read_eeprom(hw, j, &word); + hw->eeprom.ops.read(hw, j, &word); checksum += word; } } @@ -1037,7 +1097,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) * caller does not need checksum_val, the value can be NULL. **/ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, - u16 *checksum_val) + u16 *checksum_val) { s32 status; u16 checksum; @@ -1048,25 +1108,23 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, * not continue or we could be in for a very long wait while every * EEPROM read fails */ - status = ixgbe_read_eeprom(hw, 0, &checksum); + status = hw->eeprom.ops.read(hw, 0, &checksum); if (status == IXGBE_SUCCESS) { checksum = ixgbe_calc_eeprom_checksum(hw); - ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); /* * Verify read checksum from EEPROM is the same as * calculated checksum */ - if (read_checksum != checksum) { + if (read_checksum != checksum) status = IXGBE_ERR_EEPROM_CHECKSUM; - } /* If the user cares, return the calculated checksum */ - if (checksum_val) { + if (checksum_val) *checksum_val = checksum; - } } else { DEBUGOUT("EEPROM read failed\n"); } @@ -1075,7 +1133,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, } /** - * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksm + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum * @hw: pointer to hardware structure **/ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) @@ -1088,12 +1146,12 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) * not continue or we could be in for a very long wait while every * EEPROM read fails */ - status = ixgbe_read_eeprom(hw, 0, &checksum); + status = hw->eeprom.ops.read(hw, 0, &checksum); if (status == IXGBE_SUCCESS) { checksum = ixgbe_calc_eeprom_checksum(hw); - status = ixgbe_write_eeprom(hw, IXGBE_EEPROM_CHECKSUM, - checksum); + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, + checksum); } else { DEBUGOUT("EEPROM read failed\n"); } @@ -1121,7 +1179,7 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr) status = IXGBE_ERR_INVALID_MAC_ADDR; /* Reject the zero address */ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { DEBUGOUT("MAC address is all zeros\n"); status = IXGBE_ERR_INVALID_MAC_ADDR; } @@ -1129,44 +1187,57 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr) } /** - * ixgbe_set_rar_generic - Set RX address register + * ixgbe_set_rar_generic - Set Rx address register * @hw: pointer to hardware structure - * @addr: Address to put into receive address register * @index: Receive address register to write - * @vind: Vind to set RAR to + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index * @enable_addr: set flag that address is active * * Puts an ethernet address into a receive address register. **/ -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, - u32 enable_addr) +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) { u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; - /* - * HW expects these in little endian so we reverse the byte order from - * network order (big endian) to little endian - */ - rar_low = ((u32)addr[0] | - ((u32)addr[1] << 8) | - ((u32)addr[2] << 16) | - ((u32)addr[3] << 24)); + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); - rar_high = ((u32)addr[4] | - ((u32)addr[5] << 8) | - ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); + /* Make sure we are using a valid rar index range */ + if (index < rar_entries) { + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); - if (enable_addr != 0) - rar_high |= IXGBE_RAH_AV; + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + } else { + DEBUGOUT("Current RAR index is out of range."); + } return IXGBE_SUCCESS; } /** - * ixgbe_enable_rar - Enable RX address register + * ixgbe_enable_rar - Enable Rx address register * @hw: pointer to hardware structure * @index: index into the RAR table * @@ -1182,7 +1253,7 @@ static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index) } /** - * ixgbe_disable_rar - Disable RX address register + * ixgbe_disable_rar - Disable Rx address register * @hw: pointer to hardware structure * @index: index into the RAR table * @@ -1202,13 +1273,13 @@ static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index) * @hw: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest - * of the receive addresss registers. Clears the multicast table. Assumes + * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) { u32 i; - u32 rar_entries = ixgbe_get_num_rx_addrs(hw); + u32 rar_entries = hw->mac.num_rar_entries; /* * If the current mac address is valid, assume it is a software override @@ -1218,29 +1289,30 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) if (ixgbe_validate_mac_addr(hw->mac.addr) == IXGBE_ERR_INVALID_MAC_ADDR) { /* Get the MAC address from the RAR0 for later reference */ - ixgbe_get_mac_addr(hw, hw->mac.addr); + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", - hw->mac.addr[0], hw->mac.addr[1], - hw->mac.addr[2]); + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], - hw->mac.addr[4], hw->mac.addr[5]); + hw->mac.addr[4], hw->mac.addr[5]); } else { /* Setup the receive address. */ DEBUGOUT("Overriding MAC Address in RAR[0]\n"); DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", - hw->mac.addr[0], hw->mac.addr[1], - hw->mac.addr[2]); + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], - hw->mac.addr[4], hw->mac.addr[5]); + hw->mac.addr[4], hw->mac.addr[5]); - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } + hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; /* Zero out the other receive addresses. */ - DEBUGOUT("Clearing RAR[1-15]\n"); + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); @@ -1252,12 +1324,113 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); DEBUGOUT(" Clearing MTA\n"); - for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); return IXGBE_SUCCESS; } +/** + * ixgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count - + hw->addr_ctrl.mc_addr_in_rar_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ixgbe_add_uc_addr Complete\n"); +} + +/** + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 fctrl; + u32 vmdq; + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - + hw->addr_ctrl.mc_addr_in_rar_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use); + for (i = 1; i <= uc_addr_in_use; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ixgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT( " Entering address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } + + DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + /** * ixgbe_mta_vector - Determines bit-vector in multicast table to set * @hw: pointer to hardware structure @@ -1267,7 +1440,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) * bit-vector to set in the multicast table. The hardware uses 12 bits, from * incoming rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set - * by the MO field of the MCSTCTRL. The MO field is set during initalization + * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) @@ -1275,19 +1448,19 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) u32 vector = 0; switch (hw->mac.mc_filter_type) { - case 0: /* use bits [47:36] of the address */ + case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; - case 1: /* use bits [46:35] of the address */ + case 1: /* use bits [46:35] of the address */ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; - case 2: /* use bits [45:34] of the address */ + case 2: /* use bits [45:34] of the address */ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; - case 3: /* use bits [43:32] of the address */ + case 3: /* use bits [43:32] of the address */ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; - default: /* Invalid mc_filter_type */ + default: /* Invalid mc_filter_type */ DEBUGOUT("MC filter type param set incorrectly\n"); ASSERT(0); break; @@ -1342,21 +1515,22 @@ void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) **/ void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) { - u32 rar_entries = ixgbe_get_num_rx_addrs(hw); + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; DEBUGOUT6(" MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", - mc_addr[0], mc_addr[1], mc_addr[2], - mc_addr[3], mc_addr[4], mc_addr[5]); + mc_addr[0], mc_addr[1], mc_addr[2], + mc_addr[3], mc_addr[4], mc_addr[5]); /* * Place this multicast address in the RAR if there is room, * else put it in the MTA */ if (hw->addr_ctrl.rar_used_count < rar_entries) { - ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, - mc_addr, 0, IXGBE_RAH_AV); - DEBUGOUT1("Added a multicast address to RAR[%d]\n", - hw->addr_ctrl.rar_used_count); + /* use RAR from the end up for multicast */ + rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; + hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); + DEBUGOUT1("Added a multicast address to RAR[%d]\n", rar); hw->addr_ctrl.rar_used_count++; hw->addr_ctrl.mc_addr_in_rar_count++; } else { @@ -1371,18 +1545,19 @@ void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) * @hw: pointer to hardware structure * @mc_addr_list: the list of new multicast addresses * @mc_addr_count: number of addresses - * @pad: number of bytes between addresses in the list + * @next: iterator function to walk the multicast address list * * The given list replaces any existing list. Clears the MC addrs from receive - * address registers and the multicast table. Uses unsed receive address + * address registers and the multicast table. Uses unused receive address * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, u32 pad) + u32 mc_addr_count, ixgbe_mc_addr_itr next) { u32 i; - u32 rar_entries = ixgbe_get_num_rx_addrs(hw); + u32 rar_entries = hw->mac.num_rar_entries; + u32 vmdq; /* * Set the new number of MC addresses that we are being requested to @@ -1394,7 +1569,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, hw->addr_ctrl.mta_in_use = 0; /* Zero out the other receive addresses. */ - DEBUGOUT("Clearing RAR[1-15]\n"); + DEBUGOUT2("Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count, + rar_entries - 1); for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); @@ -1402,20 +1578,19 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, /* Clear the MTA */ DEBUGOUT(" Clearing MTA\n"); - for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) + for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); /* Add the new addresses */ for (i = 0; i < mc_addr_count; i++) { DEBUGOUT(" Adding the multicast addresses:\n"); - ixgbe_add_mc_addr(hw, mc_addr_list + - (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad))); + ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq)); } /* Enable mta */ if (hw->addr_ctrl.mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); return IXGBE_SUCCESS; @@ -1430,22 +1605,23 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { u32 i; + u32 rar_entries = hw->mac.num_rar_entries; struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; if (a->mc_addr_in_rar_count > 0) - for (i = (a->rar_used_count - a->mc_addr_in_rar_count); - i < a->rar_used_count; i++) + for (i = (rar_entries - a->mc_addr_in_rar_count); + i < rar_entries; i++) ixgbe_enable_rar(hw, i); if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | - hw->mac.mc_filter_type); + hw->mac.mc_filter_type); return IXGBE_SUCCESS; } /** - * ixgbe_disable_mc_generic - Disable mutlicast address in RAR + * ixgbe_disable_mc_generic - Disable multicast address in RAR * @hw: pointer to hardware structure * * Disables multicast address in RAR and the use of the multicast hash table. @@ -1453,11 +1629,12 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { u32 i; + u32 rar_entries = hw->mac.num_rar_entries; struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; if (a->mc_addr_in_rar_count > 0) - for (i = (a->rar_used_count - a->mc_addr_in_rar_count); - i < a->rar_used_count; i++) + for (i = (rar_entries - a->mc_addr_in_rar_count); + i < rar_entries; i++) ixgbe_disable_rar(hw, i); if (a->mta_in_use > 0) @@ -1477,13 +1654,13 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) u32 offset; u32 vlanbyte; - for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) - for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++) + for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), - 0); + 0); return IXGBE_SUCCESS; } @@ -1498,7 +1675,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on) { u32 VftaIndex; u32 BitOffset; @@ -1519,7 +1696,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg); /* Determine the location of the bit for this VLAN id */ - BitOffset = vlan & 0x1F; /* lower five bits */ + BitOffset = vlan & 0x1F; /* lower five bits */ VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex)); if (vlan_on) @@ -1533,107 +1710,6 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, return IXGBE_SUCCESS; } -/** - * ixgbe_setup_fc_generic - Configure flow control settings - * @hw: pointer to hardware structure - * @packetbuf_num: packet buffer number (0-7) - * - * Configures the flow control settings based on SW configuration. - * This function is used for 802.3x flow control configuration only. - **/ -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packetbuf_num) -{ - u32 frctl_reg; - u32 rmcs_reg; - - if (packetbuf_num < 0 || packetbuf_num > 7) { - DEBUGOUT1("Invalid packet buffer number [%d], expected range is" - " 0-7\n", packetbuf_num); - ASSERT(0); - } - - frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); - - rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); - - /* - * We want to save off the original Flow Control configuration just in - * case we get disconnected and then reconnected into a different hub - * or switch with different Flow Control capabilities. - */ - hw->fc.type = hw->fc.original_type; - - /* - * The possible values of the "flow_control" parameter are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames but not - * send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but we do not - * support receiving pause frames) - * 3: Both Rx and TX flow control (symmetric) are enabled. - * other: Invalid. - */ - switch (hw->fc.type) { - case ixgbe_fc_none: - break; - case ixgbe_fc_rx_pause: - /* - * RX Flow control is enabled, - * and TX Flow control is disabled. - */ - frctl_reg |= IXGBE_FCTRL_RFCE; - break; - case ixgbe_fc_tx_pause: - /* - * TX Flow control is enabled, and RX Flow control is disabled, - * by a software over-ride. - */ - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; - case ixgbe_fc_full: - /* - * Flow control (both RX and TX) is enabled by a software - * over-ride. - */ - frctl_reg |= IXGBE_FCTRL_RFCE; - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; - break; - default: - /* We should never get here. The value should be 0-3. */ - DEBUGOUT("Flow control param set incorrectly\n"); - ASSERT(0); - break; - } - - /* Enable 802.3x based flow control settings. */ - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); - IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); - - /* - * We need to set up the Receive Threshold high and low water - * marks as well as (optionally) enabling the transmission of - * XON frames. - */ - if (hw->fc.type & ixgbe_fc_tx_pause) { - if (hw->fc.send_xon) { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), - (hw->fc.low_water | IXGBE_FCRTL_XONE)); - } else { - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), - hw->fc.low_water); - } - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), - (hw->fc.high_water)|IXGBE_FCRTH_FCEN); - } - - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); - - return IXGBE_SUCCESS; -} - /** * ixgbe_disable_pcie_master - Disable PCI-express master access * @hw: pointer to hardware structure @@ -1666,11 +1742,11 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) /** - * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore * @hw: pointer to hardware structure - * @mask: Mask to specify wich semaphore to acquire + * @mask: Mask to specify which semaphore to acquire * - * Aquires the SWFW semaphore throught the GSSR register for the specified + * Acquires the SWFW semaphore thought the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) @@ -1712,9 +1788,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) /** * ixgbe_release_swfw_sync - Release SWFW semaphore * @hw: pointer to hardware structure - * @mask: Mask to specify wich semaphore to release + * @mask: Mask to specify which semaphore to release * - * Releases the SWFW semaphore throught the GSSR register for the specified + * Releases the SWFW semaphore thought the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) @@ -1731,3 +1807,44 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) ixgbe_release_eeprom_semaphore(hw); } +/** + * ixgbe_read_analog_reg8_generic - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_analog_reg8_generic - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + return IXGBE_SUCCESS; +} + diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h index 236ce7526d11..c916d93dde44 100644 --- a/sys/dev/ixgbe/ixgbe_common.h +++ b/sys/dev/ixgbe/ixgbe_common.h @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,52 +29,54 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ +******************************************************************************/ +/*$FreeBSD$*/ #ifndef _IXGBE_COMMON_H_ #define _IXGBE_COMMON_H_ #include "ixgbe_type.h" -s32 ixgbe_assign_func_pointers_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, - u16 *data); + u16 *data); s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, - u16 *checksum_val); + u16 *checksum_val); s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, - u32 vind, u32 enable_addr); +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, - u32 mc_addr_count, u32 pad); + u32 mc_addr_count, + ixgbe_mc_addr_itr func); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); - -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); + u32 vind, bool vlan_on); s32 ixgbe_validate_mac_addr(u8 *mac_addr); s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); +s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); #endif /* IXGBE_COMMON */ diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h index d995c7c97faf..ac7a39cb98c8 100644 --- a/sys/dev/ixgbe/ixgbe_osdep.h +++ b/sys/dev/ixgbe/ixgbe_osdep.h @@ -1,36 +1,36 @@ -/************************************************************************** +/****************************************************************************** -Copyright (c) 2001-2007, Intel Corporation -All rights reserved. + Copyright (c) 2001-2008, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -***************************************************************************/ -/* $FreeBSD$ */ +******************************************************************************/ +/*$FreeBSD$*/ #ifndef _IXGBE_OS_H_ #define _IXGBE_OS_H_ @@ -91,6 +91,16 @@ typedef boolean_t bool; #define le16_to_cpu +#if defined(__i386__) || defined(__amd64__) +#define mb() __asm volatile("mfence" ::: "memory") +#define wmb() __asm volatile("sfence" ::: "memory") +#define rmb() __asm volatile("lfence" ::: "memory") +#else +#define mb() +#define rmb() +#define wmb() +#endif + struct ixgbe_osdep { bus_space_tag_t mem_bus_space_tag; diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c index e19d303eaeda..134e31028b64 100644 --- a/sys/dev/ixgbe/ixgbe_phy.c +++ b/sys/dev/ixgbe/ixgbe_phy.c @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,43 +29,32 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ - +******************************************************************************/ +/*$FreeBSD$*/ #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" /** - * ixgbe_init_shared_code_phy - Initialize PHY shared code - * @hw: pointer to hardware structure - **/ -s32 ixgbe_init_shared_code_phy(struct ixgbe_hw *hw) -{ - /* Assign function pointers */ - ixgbe_assign_func_pointers_phy(hw); - - return IXGBE_SUCCESS; -} - -/** - * ixgbe_assign_func_pointers_phy - Assigns PHY-specific function pointers - * @hw: pointer to hardware structure + * ixgbe_init_phy_ops_generic - Inits PHY function ptrs + * @hw: pointer to the hardware structure * - * Note, generic function pointers have already been assigned, so the - * function pointers set here are only for PHY-specific functions. + * Initialize the function pointers. **/ -s32 ixgbe_assign_func_pointers_phy(struct ixgbe_hw *hw) +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) { - hw->func.ixgbe_func_reset_phy = - &ixgbe_reset_phy_generic; - hw->func.ixgbe_func_read_phy_reg = - &ixgbe_read_phy_reg_generic; - hw->func.ixgbe_func_write_phy_reg = - &ixgbe_write_phy_reg_generic; - hw->func.ixgbe_func_identify_phy = - &ixgbe_identify_phy_generic; + struct ixgbe_phy_info *phy = &hw->phy; + + /* PHY */ + phy->ops.identify = &ixgbe_identify_phy_generic; + phy->ops.reset = &ixgbe_reset_phy_generic; + phy->ops.read_reg = &ixgbe_read_phy_reg_generic; + phy->ops.write_reg = &ixgbe_write_phy_reg_generic; + phy->ops.setup_link = &ixgbe_setup_phy_link_generic; + phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic; + phy->ops.check_link = NULL; + phy->ops.get_firmware_version = NULL; return IXGBE_SUCCESS; } @@ -81,15 +70,21 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 phy_addr; - for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { - if (ixgbe_validate_phy_addr(hw, phy_addr)) { - hw->phy.addr = phy_addr; - ixgbe_get_phy_id(hw); - hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); - status = IXGBE_SUCCESS; - break; + if (hw->phy.type == ixgbe_phy_unknown) { + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_validate_phy_addr(hw, phy_addr)) { + hw->phy.addr = phy_addr; + ixgbe_get_phy_id(hw); + hw->phy.type = + ixgbe_get_phy_type_from_id(hw->phy.id); + status = IXGBE_SUCCESS; + break; + } } + } else { + status = IXGBE_SUCCESS; } + return status; } @@ -104,10 +99,8 @@ bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) bool valid = FALSE; hw->phy.addr = phy_addr; - ixgbe_read_phy_reg_generic(hw, - IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); if (phy_id != 0xFFFF && phy_id != 0x0) valid = TRUE; @@ -126,17 +119,15 @@ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) u16 phy_id_high = 0; u16 phy_id_low = 0; - status = ixgbe_read_phy_reg_generic(hw, - IXGBE_MDIO_PHY_ID_HIGH, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id_high); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); if (status == IXGBE_SUCCESS) { hw->phy.id = (u32)(phy_id_high << 16); - status = ixgbe_read_phy_reg_generic(hw, - IXGBE_MDIO_PHY_ID_LOW, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - &phy_id_low); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); } @@ -154,14 +145,21 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) enum ixgbe_phy_type phy_type; switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; case QT2022_PHY_ID: phy_type = ixgbe_phy_qt; break; + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; default: phy_type = ixgbe_phy_unknown; break; } + DEBUGOUT1("phy type found is %d\n", phy_type); return phy_type; } @@ -175,9 +173,9 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ - return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, - IXGBE_MDIO_PHY_XS_DEV_TYPE, - IXGBE_MDIO_PHY_XS_RESET); + return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); } /** @@ -187,7 +185,7 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data) + u32 device_type, u16 *phy_data) { u32 command; u32 i; @@ -206,9 +204,9 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, if (status == IXGBE_SUCCESS) { /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -238,9 +236,9 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -274,6 +272,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, ixgbe_release_swfw_sync(hw, gssr); } + return status; } @@ -285,7 +284,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, * @phy_data: Data to write to the PHY register **/ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) + u32 device_type, u16 phy_data) { u32 command; u32 i; @@ -306,9 +305,9 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -322,14 +321,14 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { - DEBUGFUNC("PHY address cmd didn't complete\n"); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; - } } - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + DEBUGOUT("PHY address cmd didn't complete\n"); status = IXGBE_ERR_PHY; + } if (status == IXGBE_SUCCESS) { /* @@ -337,9 +336,9 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); @@ -353,15 +352,14 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, command = IXGBE_READ_REG(hw, IXGBE_MSCA); - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { - DEBUGFUNC("PHY write command did not " - "complete.\n"); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; - } } - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + DEBUGOUT("PHY address cmd didn't complete\n"); status = IXGBE_ERR_PHY; + } } ixgbe_release_swfw_sync(hw, gssr); @@ -371,45 +369,303 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, } /** - * ixgbe_setup_phy_link - Restart PHY autoneg + * ixgbe_setup_phy_link_generic - Set and restart autoneg * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. **/ -s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { - return ixgbe_call_func(hw, ixgbe_func_setup_phy_link, (hw), - IXGBE_NOT_IMPLEMENTED); + s32 status = IXGBE_NOT_IMPLEMENTED; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + + /* + * Set advertisement settings in PHY based on autoneg_advertised + * settings. If autoneg_advertised = 0, then advertise default values + * tnx devices cannot be "forced" to a autoneg 10G and fail. But can + * for a 1G. + */ + hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ + else + autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ + + hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + autoneg_reg |= IXGBE_MII_RESTART; + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + usec_delay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { + status = IXGBE_SUCCESS; + break; + } + } + + if (time_out == max_time_out) + status = IXGBE_ERR_LINK_SETUP; + + return status; } /** - * ixgbe_check_phy_link - Determine link and speed status - * @hw: pointer to hardware structure - * - * Reads a PHY register to determine if link is up and the current speed for - * the PHY. - **/ -s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) -{ - return ixgbe_call_func(hw, ixgbe_func_check_phy_link, (hw, speed, - link_up), IXGBE_NOT_IMPLEMENTED); -} - -/** - * ixgbe_setup_phy_link_speed - Set auto advertise + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: TRUE if autonegotiation enabled - * - * Sets the auto advertised capabilities **/ -s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete) +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) { - return ixgbe_call_func(hw, ixgbe_func_setup_phy_link_speed, (hw, speed, - autoneg, autoneg_wait_to_complete), - IXGBE_NOT_IMPLEMENTED); + UNREFERENCED_PARAMETER(autoneg); + UNREFERENCED_PARAMETER(autoneg_wait_to_complete); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + } + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + } + + /* Setup link based on the new speed settings */ + hw->phy.ops.setup_link(hw); + + return IXGBE_SUCCESS; } +/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status = IXGBE_SUCCESS; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = FALSE; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + usec_delay(10); + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = TRUE; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ + u16 phy_offset, control, eword, edata, list_crc, block_crc, id, sfp_id; + bool end_data = FALSE; + u16 list_offset, data_offset; + u16 phy_data = 0; + s32 ret_val = IXGBE_SUCCESS; + u32 i; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + + /* reset the PHY and poll for completion */ + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + (phy_data | IXGBE_MDIO_PHY_XS_RESET)); + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0 ) + break; + msec_delay(10); + } + + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { + DEBUGOUT("PHY reset did not complete.\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + + /* read offset to PHY init contents */ + hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, &list_offset); + + if ((!list_offset) || (list_offset == 0xFFFF)) { + ret_val = IXGBE_ERR_PHY; + goto out; + } + + /* Acquire the CRC */ + hw->eeprom.ops.read(hw, list_offset, &list_crc); + + /* Shift offset to first ID word */ + list_offset++; + + /* determine the sfp sequence based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + sfp_id = 0; + break; + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + sfp_id = 1; + break; + default: + ret_val = IXGBE_ERR_PHY; + goto out; + } + + /* + * Find the matching sfp ID in the EEPROM + * and program the init sequence + */ + hw->eeprom.ops.read(hw, list_offset, &id); + + while (!((id == IXGBE_CONTROL_EOL_NL) || (end_data == TRUE))) { + if (id == sfp_id) { + list_offset++; + hw->eeprom.ops.read(hw, list_offset, &data_offset); + if ((!data_offset) || (data_offset == 0xFFFF)) + break; + ret_val = hw->eeprom.ops.read(hw, data_offset, + &block_crc); + data_offset++; + while (!end_data) { + /* + * Read control word from PHY init contents + * offset + */ + ret_val = hw->eeprom.ops.read(hw, data_offset, + &eword); + control = (eword & IXGBE_CONTROL_MASK_NL) >> + IXGBE_CONTROL_SHIFT_NL; + edata = eword & IXGBE_DATA_MASK_NL; + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; + DEBUGOUT1("DELAY: %d MS\n", edata); + msec_delay(edata); + break; + case IXGBE_DATA_NL: + DEBUGOUT("DATA: \n"); + data_offset++; + hw->eeprom.ops.read(hw, data_offset++, + &phy_offset); + for (i = 0; i < edata; i++) { + hw->eeprom.ops.read(hw, + data_offset, + &eword); + hw->phy.ops.write_reg(hw, + phy_offset, + IXGBE_TWINAX_DEV, + eword); + DEBUGOUT2("Wrote %4.4x to %4.4x\n", + eword, phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; + DEBUGOUT("CONTROL: \n"); + if (edata == IXGBE_CONTROL_EOL_NL) { + DEBUGOUT("EOL\n"); + end_data = TRUE; + } else if (edata == IXGBE_CONTROL_SOL_NL) { + DEBUGOUT("SOL\n"); + } else { + DEBUGOUT("Bad control value\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + break; + default: + DEBUGOUT("Bad control type\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + } + } else { + list_offset += 2; + ret_val = hw->eeprom.ops.read(hw, list_offset, &id); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h index 4313de1fbe06..0f31dd52f6c9 100644 --- a/sys/dev/ixgbe/ixgbe_phy.h +++ b/sys/dev/ixgbe/ixgbe_phy.h @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,32 +29,36 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ +******************************************************************************/ +/*$FreeBSD$*/ #ifndef _IXGBE_PHY_H_ #define _IXGBE_PHY_H_ #include "ixgbe_type.h" -s32 ixgbe_init_shared_code_phy(struct ixgbe_hw *hw); -s32 ixgbe_assign_func_pointers_phy(struct ixgbe_hw *hw); -s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); -s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *link_up); -s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg, - bool autoneg_wait_to_complete); +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 *phy_data); + u32 device_type, u16 *phy_data); s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data); + u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); #endif /* _IXGBE_PHY_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h index bfbd701e47ba..8df3f78237e9 100644 --- a/sys/dev/ixgbe/ixgbe_type.h +++ b/sys/dev/ixgbe/ixgbe_type.h @@ -1,6 +1,6 @@ -/******************************************************************************* +/****************************************************************************** - Copyright (c) 2001-2007, Intel Corporation + Copyright (c) 2001-2008, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without @@ -29,8 +29,8 @@ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*******************************************************************************/ -/* $FreeBSD$ */ +******************************************************************************/ +/*$FreeBSD$*/ #ifndef _IXGBE_TYPE_H_ #define _IXGBE_TYPE_H_ @@ -43,7 +43,13 @@ /* Device IDs */ #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10D7 #define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 /* General Registers */ #define IXGBE_CTRL 0x00000 @@ -74,11 +80,11 @@ #define IXGBE_EIMC 0x00888 #define IXGBE_EIAC 0x00810 #define IXGBE_EIAM 0x00890 -#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ -#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4))) +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ #define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ #define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ -#define IXGBE_PBACL 0x11068 +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) #define IXGBE_GPIE 0x00898 /* Flow Control Registers */ @@ -90,19 +96,33 @@ #define IXGBE_TFCS 0x0CE00 /* Receive DMA Registers */ -#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ -#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) -#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) -#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) -#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) -#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) -#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) - /* array of 16 (0x02100-0x0213C) */ -#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) - /* array of 16 (0x02200-0x0223C) */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40))) +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + ((_i - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + ((_i - 64) * 0x40)))) #define IXGBE_RDRXCTL 0x02F00 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) - /* 8 of these 0x03C00 - 0x03C1C */ + /* 8 of these 0x03C00 - 0x03C1C */ #define IXGBE_RXCTRL 0x03000 #define IXGBE_DROPEN 0x03D04 #define IXGBE_RXPBSIZE_SHIFT 10 @@ -110,29 +130,30 @@ /* Receive Registers */ #define IXGBE_RXCSUM 0x05000 #define IXGBE_RFCTL 0x05008 +/* Multicast Table Array - 128 entries */ #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) - /* Multicast Table Array - 128 entries */ -#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ -#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ -#define IXGBE_PSRTYPE 0x05480 - /* 0x5480-0x54BC Packet split receive type */ +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8))) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) - /* array of 4096 1-bit vlan filters */ +/*array of 4096 4-bit vlan vmdq indices */ #define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) - /*array of 4096 4-bit vlan vmdq indicies */ #define IXGBE_FCTRL 0x05080 #define IXGBE_VLNCTRL 0x05088 #define IXGBE_MCSTCTRL 0x05090 #define IXGBE_MRQC 0x05818 -#define IXGBE_VMD_CTL 0x0581C #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + /* Transmit DMA registers */ -#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) #define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) @@ -141,11 +162,10 @@ #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) #define IXGBE_DTXCTL 0x07E00 -#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) - /* there are 16 of these (0-15) */ + +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ #define IXGBE_TIPG 0x0CB00 -#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) - /* there are 8 of these */ +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) /* 8 of these */ #define IXGBE_MNGTXMAP 0x0CD10 #define IXGBE_TIPG_FIBER_DEFAULT 3 #define IXGBE_TXPBSIZE_SHIFT 10 @@ -173,6 +193,35 @@ #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + + /* Stats registers */ #define IXGBE_CRCERRS 0x04000 #define IXGBE_ILLERRC 0x04004 @@ -227,7 +276,7 @@ #define IXGBE_XEC 0x04120 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ -#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4))) #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ @@ -385,7 +434,7 @@ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ @@ -433,6 +482,7 @@ #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ @@ -449,22 +499,49 @@ #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ #define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ -#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ #define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ #define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MII_SPEED_SELECTION_REG 0x10 +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_REG 0x0 + #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 #define IXGBE_MAX_PHY_ADDR 32 /* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB #define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 /* General purpose Interrupt Enable */ -#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ -#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ -#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ -#define IXGBE_GPIE_EIAME 0x40000000 -#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 /* Transmit Flow Control status */ #define IXGBE_TFCS_TXOFF 0x00000001 @@ -525,7 +602,7 @@ #define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ /* RMCS Bit Masks */ -#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ #define IXGBE_RMCS_RAC 0x00000004 #define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ @@ -533,12 +610,15 @@ #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ #define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + /* Interrupt register bitmasks */ /* Extended Interrupt Cause Read */ #define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ -#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ @@ -546,8 +626,9 @@ /* Extended Interrupt Cause Set */ #define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ -#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ @@ -556,6 +637,8 @@ /* Extended Interrupt Mask Set */ #define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ @@ -565,6 +648,8 @@ /* Extended Interrupt Mask Clear */ #define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* Gen Purpose Interrupt on SDP1 */ #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ @@ -573,12 +658,12 @@ #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ #define IXGBE_EIMS_ENABLE_MASK ( \ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_LSC | \ - IXGBE_EIMS_TCP_TIMER | \ - IXGBE_EIMS_OTHER) + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) -/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ #define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ #define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ @@ -615,6 +700,7 @@ #define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ #define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ /* STATUS Bit Masks */ @@ -697,6 +783,7 @@ #define IXGBE_LINKS_TL_FAULT 0x00001000 #define IXGBE_LINKS_SIGNAL 0x00000F00 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ #define FIBER_LINK_UP_LIMIT 50 @@ -770,6 +857,8 @@ #define IXGBE_CSR0_CONFIG_PTR 0x0D #define IXGBE_CSR1_CONFIG_PTR 0x0E #define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 /* Legacy EEPROM word offsets */ #define IXGBE_ISCSI_BOOT_CAPS 0x0033 @@ -783,7 +872,7 @@ #define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ #define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ #define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ -/* EEPROM reset Write Enbale latch */ +/* EEPROM reset Write Enable latch */ #define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 #define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ #define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ @@ -824,27 +913,20 @@ /* Number of 100 microseconds we wait for PCI Express master disable */ #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 -/* PHY Types */ -#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 - /* Check whether address is multicast. This is little-endian specific check.*/ #define IXGBE_IS_MULTICAST(Address) \ - (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) /* Check whether an address is broadcast. */ #define IXGBE_IS_BROADCAST(Address) \ - ((((u8 *)(Address))[0] == ((u8)0xff)) && \ - (((u8 *)(Address))[1] == ((u8)0xff))) + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) /* RAH */ #define IXGBE_RAH_VIND_MASK 0x003C0000 #define IXGBE_RAH_VIND_SHIFT 18 #define IXGBE_RAH_AV 0x80000000 -/* Filters */ -#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ -#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ - /* Header split receive */ #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 #define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E @@ -885,7 +967,7 @@ #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ -/* Receive Priority Flow Control Enbale */ +/* Receive Priority Flow Control Enable */ #define IXGBE_FCTRL_RPFCE 0x00004000 #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ @@ -915,9 +997,8 @@ /* Receive Descriptor bit definitions */ #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ -#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ -#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ @@ -993,18 +1074,18 @@ /* Masks to determine if packets should be dropped due to frame errors */ #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXD_ERR_CE | \ - IXGBE_RXD_ERR_LE | \ - IXGBE_RXD_ERR_PE | \ - IXGBE_RXD_ERR_OSE | \ - IXGBE_RXD_ERR_USE) + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ - IXGBE_RXDADV_ERR_CE | \ - IXGBE_RXDADV_ERR_LE | \ - IXGBE_RXDADV_ERR_PE | \ - IXGBE_RXDADV_ERR_OSE | \ - IXGBE_RXDADV_ERR_USE) + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) /* Multicast bit mask */ #define IXGBE_MCSTCTRL_MFE 0x4 @@ -1020,23 +1101,33 @@ #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT +#ifndef __le16 +/* Little Endian defines */ +#define __le8 u8 +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#endif + + /* Transmit Descriptor - Legacy */ struct ixgbe_legacy_tx_desc { u64 buffer_addr; /* Address of the descriptor's data buffer */ union { - u32 data; + __le32 data; struct { - u16 length; /* Data buffer length */ - u8 cso; /* Checksum offset */ - u8 cmd; /* Descriptor control */ + __le16 length; /* Data buffer length */ + __le8 cso; /* Checksum offset */ + __le8 cmd; /* Descriptor control */ } flags; } lower; union { - u32 data; + __le32 data; struct { - u8 status; /* Descriptor status */ - u8 css; /* Checksum start */ - u16 vlan; + __le8 status; /* Descriptor status */ + __le8 css; /* Checksum start */ + __le16 vlan; } fields; } upper; }; @@ -1044,61 +1135,64 @@ struct ixgbe_legacy_tx_desc { /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { - u64 buffer_addr; /* Address of descriptor's data buf */ - u32 cmd_type_len; - u32 olinfo_status; + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; } read; struct { - u64 rsvd; /* Reserved */ - u32 nxtseq_seed; - u32 status; + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; } wb; }; /* Receive Descriptor - Legacy */ struct ixgbe_legacy_rx_desc { - u64 buffer_addr; /* Address of the descriptor's data buffer */ - u16 length; /* Length of data DMAed into data buffer */ - u16 csum; /* Packet checksum */ - u8 status; /* Descriptor status */ - u8 errors; /* Descriptor Errors */ - u16 vlan; + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + __le8 status; /* Descriptor status */ + __le8 errors; /* Descriptor Errors */ + __le16 vlan; }; /* Receive Descriptor - Advanced */ union ixgbe_adv_rx_desc { struct { - u64 pkt_addr; /* Packet buffer address */ - u64 hdr_addr; /* Header buffer address */ + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { - struct { - u16 pkt_info; /* RSS type, Packet type */ - u16 hdr_info; /* Split Header, header len */ + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS type, Packet type */ + __le16 hdr_info; /* Split Header, header len */ + } hs_rss; } lo_dword; union { - u32 rss; /* RSS Hash */ + __le32 rss; /* RSS Hash */ struct { - u16 ip_id; /* IP id */ - u16 csum; /* Packet Checksum */ + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { - u32 status_error; /* ext status/error */ - u16 length; /* Packet length */ - u16 vlan; /* VLAN tag */ + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; /* Context descriptors */ struct ixgbe_adv_tx_context_desc { - u32 vlan_macip_lens; - u32 seqnum_seed; - u32 type_tucmd_mlhl; - u32 mss_l4len_idx; + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; }; /* Adv Transmit Descriptor Config Masks */ @@ -1108,7 +1202,6 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ -#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */ #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ #define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ @@ -1118,27 +1211,28 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ #define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) + IXGBE_ADVTXD_POPTS_SHIFT) #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ - IXGBE_ADVTXD_POPTS_SHIFT) -#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ -#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ -#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ -#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ -#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ -#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ -#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ -#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ -#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ -#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ -#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ -#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ -#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ -#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Autonegotiation advertised speeds */ typedef u32 ixgbe_autoneg_advertised; @@ -1148,7 +1242,8 @@ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 - +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, @@ -1164,15 +1259,19 @@ enum ixgbe_mac_type { enum ixgbe_phy_type { ixgbe_phy_unknown = 0, + ixgbe_phy_tn, ixgbe_phy_qt, - ixgbe_phy_xaui + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_generic }; enum ixgbe_media_type { ixgbe_media_type_unknown = 0, ixgbe_media_type_fiber, ixgbe_media_type_copper, - ixgbe_media_type_backplane + ixgbe_media_type_backplane, + ixgbe_media_type_virtual }; /* Flow Control Settings */ @@ -1211,24 +1310,20 @@ enum ixgbe_bus_width { ixgbe_bus_width_unknown = 0, ixgbe_bus_width_pcie_x1, ixgbe_bus_width_pcie_x2, - ixgbe_bus_width_pcie_x4, - ixgbe_bus_width_pcie_x8, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, ixgbe_bus_width_32, ixgbe_bus_width_64, ixgbe_bus_width_reserved }; -struct ixgbe_eeprom_info { - enum ixgbe_eeprom_type type; - u16 word_size; - u16 address_bits; -}; - struct ixgbe_addr_filter_info { u32 num_mc_addrs; u32 rar_used_count; u32 mc_addr_in_rar_count; u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; }; /* Bus parameters */ @@ -1308,93 +1403,110 @@ struct ixgbe_hw_stats { u64 qbtc[16]; }; - /* forward declaration */ struct ixgbe_hw; +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + /* Function pointer table */ -struct ixgbe_functions -{ - s32 (*ixgbe_func_init_hw)(struct ixgbe_hw *); - s32 (*ixgbe_func_reset_hw)(struct ixgbe_hw *); - s32 (*ixgbe_func_start_hw)(struct ixgbe_hw *); - s32 (*ixgbe_func_clear_hw_cntrs)(struct ixgbe_hw *); - enum ixgbe_media_type (*ixgbe_func_get_media_type)(struct ixgbe_hw *); - s32 (*ixgbe_func_get_mac_addr)(struct ixgbe_hw *, u8 *); - u32 (*ixgbe_func_get_num_of_tx_queues)(struct ixgbe_hw *); - u32 (*ixgbe_func_get_num_of_rx_queues)(struct ixgbe_hw *); - s32 (*ixgbe_func_stop_adapter)(struct ixgbe_hw *); - s32 (*ixgbe_func_get_bus_info)(struct ixgbe_hw *); - s32 (*ixgbe_func_read_analog_reg8)(struct ixgbe_hw*, u32, u8*); - s32 (*ixgbe_func_write_analog_reg8)(struct ixgbe_hw*, u32, u8); - /* PHY */ - s32 (*ixgbe_func_identify_phy)(struct ixgbe_hw *); - s32 (*ixgbe_func_reset_phy)(struct ixgbe_hw *); - s32 (*ixgbe_func_read_phy_reg)(struct ixgbe_hw *, u32, u32, u16 *); - s32 (*ixgbe_func_write_phy_reg)(struct ixgbe_hw *, u32, u32, u16); - s32 (*ixgbe_func_setup_phy_link)(struct ixgbe_hw *); - s32 (*ixgbe_func_setup_phy_link_speed)(struct ixgbe_hw *, - ixgbe_link_speed, - bool, bool); - s32 (*ixgbe_func_check_phy_link)(struct ixgbe_hw *, ixgbe_link_speed *, - bool *); +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); /* Link */ - s32 (*ixgbe_func_setup_link)(struct ixgbe_hw *); - s32 (*ixgbe_func_setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, - bool, bool); - s32 (*ixgbe_func_check_link)(struct ixgbe_hw *, ixgbe_link_speed *, - bool *); - s32 (*ixgbe_func_get_link_settings)(struct ixgbe_hw *, - ixgbe_link_speed *, - bool *); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, + bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); /* LED */ - s32 (*ixgbe_func_led_on)(struct ixgbe_hw *, u32); - s32 (*ixgbe_func_led_off)(struct ixgbe_hw *, u32); - s32 (*ixgbe_func_blink_led_start)(struct ixgbe_hw *, u32); - s32 (*ixgbe_func_blink_led_stop)(struct ixgbe_hw *, u32); - - /* EEPROM */ - s32 (*ixgbe_func_init_eeprom_params)(struct ixgbe_hw *); - s32 (*ixgbe_func_read_eeprom)(struct ixgbe_hw *, u16, u16 *); - s32 (*ixgbe_func_write_eeprom)(struct ixgbe_hw *, u16, u16); - s32 (*ixgbe_func_validate_eeprom_checksum)(struct ixgbe_hw *, u16 *); - s32 (*ixgbe_func_update_eeprom_checksum)(struct ixgbe_hw *); + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); /* RAR, Multicast, VLAN */ - s32 (*ixgbe_func_set_rar)(struct ixgbe_hw *, u32, u8 *, u32 , u32); - s32 (*ixgbe_func_init_rx_addrs)(struct ixgbe_hw *); - u32 (*ixgbe_func_get_num_rx_addrs)(struct ixgbe_hw *); - s32 (*ixgbe_func_update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, - u32); - s32 (*ixgbe_func_enable_mc)(struct ixgbe_hw *); - s32 (*ixgbe_func_disable_mc)(struct ixgbe_hw *); - s32 (*ixgbe_func_clear_vfta)(struct ixgbe_hw *); - s32 (*ixgbe_func_set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); /* Flow Control */ - s32 (*ixgbe_func_setup_fc)(struct ixgbe_hw *, s32); + s32 (*setup_fc)(struct ixgbe_hw *, s32); +}; + +struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, + bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); +}; + +struct ixgbe_eeprom_info { + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u16 word_size; + u16 address_bits; }; struct ixgbe_mac_info { - enum ixgbe_mac_type type; - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; - s32 mc_filter_type; - u32 link_attach_type; - u32 link_mode_select; - bool link_settings_loaded; - bool autoneg; - bool autoneg_failed; + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 max_tx_queues; + u32 max_rx_queues; + u32 link_attach_type; + u32 link_mode_select; + bool link_settings_loaded; + bool autoneg; + bool autoneg_failed; }; struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; enum ixgbe_phy_type type; u32 addr; u32 id; u32 revision; enum ixgbe_media_type media_type; + bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; bool autoneg_wait_to_complete; }; @@ -1402,7 +1514,6 @@ struct ixgbe_phy_info { struct ixgbe_hw { u8 *hw_addr; void *back; - struct ixgbe_functions func; struct ixgbe_mac_info mac; struct ixgbe_addr_filter_info addr_ctrl; struct ixgbe_fc_info fc; @@ -1417,12 +1528,8 @@ struct ixgbe_hw { bool adapter_stopped; }; - -#define ixgbe_func_from_hw_struct(hw, _func) hw->func._func - #define ixgbe_call_func(hw, func, params, error) \ - (ixgbe_func_from_hw_struct(hw, func) != NULL) ? \ - ixgbe_func_from_hw_struct(hw, func) params: error + (func != NULL) ? func params: error /* Error Codes */ #define IXGBE_SUCCESS 0 @@ -1445,8 +1552,6 @@ struct ixgbe_hw { #define IXGBE_ERR_PHY_ADDR_INVALID -17 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF -#ifndef UNREFERENCED_PARAMETER #define UNREFERENCED_PARAMETER(_p) -#endif #endif /* _IXGBE_TYPE_H_ */ diff --git a/sys/dev/ixgbe/tcp_lro.c b/sys/dev/ixgbe/tcp_lro.c new file mode 100644 index 000000000000..b6c3d3840f6e --- /dev/null +++ b/sys/dev/ixgbe/tcp_lro.c @@ -0,0 +1,380 @@ +/****************************************************************************** + +Copyright (c) 2007, Myricom Inc. +Copyright (c) 2008, Intel Corporation. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Neither the name of the Myricom Inc, nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + 3. Neither the name of the Intel Corporation, nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +$FreeBSD$ +***************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "tcp_lro.h" + +static uint16_t do_csum_data(uint16_t *raw, int len) +{ + uint32_t csum; + csum = 0; + while (len > 0) { + csum += *raw; + raw++; + csum += *raw; + raw++; + len -= 4; + } + csum = (csum >> 16) + (csum & 0xffff); + csum = (csum >> 16) + (csum & 0xffff); + return (uint16_t)csum; +} + +/* + * Allocate and init the LRO data structures + */ +int +tcp_lro_init(struct lro_ctrl *cntl) +{ + struct lro_entry *lro; + int i, error = 0; + + SLIST_INIT(&cntl->lro_free); + SLIST_INIT(&cntl->lro_active); + + cntl->lro_bad_csum = 0; + cntl->lro_queued = 0; + cntl->lro_flushed = 0; + + for (i = 0; i < LRO_ENTRIES; i++) { + lro = (struct lro_entry *) malloc(sizeof (struct lro_entry), + M_DEVBUF, M_NOWAIT | M_ZERO); + if (lro == NULL) { + if (i == 0) + error = ENOMEM; + break; + } + cntl->lro_cnt = i; + SLIST_INSERT_HEAD(&cntl->lro_free, lro, next); + } + + return (error); +} + +void +tcp_lro_free(struct lro_ctrl *cntl) +{ + struct lro_entry *entry; + + while (!SLIST_EMPTY(&cntl->lro_free)) { + entry = SLIST_FIRST(&cntl->lro_free); + SLIST_REMOVE_HEAD(&cntl->lro_free, next); + free(entry, M_DEVBUF); + } +} + +void +tcp_lro_flush(struct lro_ctrl *cntl, struct lro_entry *lro) +{ + struct ifnet *ifp; + struct ip *ip; + struct tcphdr *tcp; + uint32_t *ts_ptr; + uint32_t tcplen, tcp_csum; + +//printf("tcp_lro_flush: entry\n"); + + if (lro->append_cnt) { + /* incorporate the new len into the ip header and + * re-calculate the checksum */ + ip = lro->ip; + ip->ip_len = htons(lro->len - ETHER_HDR_LEN); + ip->ip_sum = 0; + ip->ip_sum = 0xffff ^ + do_csum_data((uint16_t*)ip, + sizeof (*ip)); + + lro->m_head->m_pkthdr.csum_flags = CSUM_IP_CHECKED | + CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; + lro->m_head->m_pkthdr.csum_data = 0xffff; + lro->m_head->m_pkthdr.len = lro->len; + + /* incorporate the latest ack into the tcp header */ + tcp = (struct tcphdr *) (ip + 1); + tcp->th_ack = lro->ack_seq; + tcp->th_win = lro->window; + /* incorporate latest timestamp into the tcp header */ + if (lro->timestamp) { + ts_ptr = (uint32_t *)(tcp + 1); + ts_ptr[1] = htonl(lro->tsval); + ts_ptr[2] = lro->tsecr; + } + /* + * update checksum in tcp header by re-calculating the + * tcp pseudoheader checksum, and adding it to the checksum + * of the tcp payload data + */ + tcp->th_sum = 0; + tcplen = lro->len - sizeof(*ip) - ETHER_HDR_LEN; + tcp_csum = lro->data_csum; + tcp_csum += in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, + htons(tcplen + IPPROTO_TCP)); + tcp_csum += do_csum_data((uint16_t*)tcp, + tcp->th_off << 2); + tcp_csum = (tcp_csum & 0xffff) + (tcp_csum >> 16); + tcp_csum = (tcp_csum & 0xffff) + (tcp_csum >> 16); + tcp->th_sum = 0xffff ^ tcp_csum; + } + ifp = cntl->ifp; + (*ifp->if_input)(cntl->ifp, lro->m_head); + cntl->lro_queued += lro->append_cnt + 1; + cntl->lro_flushed++; + lro->m_head = NULL; + lro->timestamp = 0; + lro->append_cnt = 0; + SLIST_INSERT_HEAD(&cntl->lro_free, lro, next); +} + +int +tcp_lro_rx(struct lro_ctrl *cntl, struct mbuf *m_head, uint32_t csum) +{ + struct ether_header *eh; + struct ip *ip; + struct tcphdr *tcp; + uint32_t *ts_ptr; + struct mbuf *m_nxt, *m_tail; + struct lro_entry *lro; + int hlen, ip_len, tcp_hdr_len, tcp_data_len, tot_len; + int opt_bytes, trim; + uint32_t seq, tmp_csum, device_mtu; + + + eh = mtod(m_head, struct ether_header *); + if (eh->ether_type != htons(ETHERTYPE_IP)) + return 1; + ip = (struct ip *) (eh + 1); + if (ip->ip_p != IPPROTO_TCP) + return 1; + + /* ensure there are no options */ + if ((ip->ip_hl << 2) != sizeof (*ip)) + return -1; + + /* .. and the packet is not fragmented */ + if (ip->ip_off & htons(IP_MF|IP_OFFMASK)) + return -1; + + /* verify that the IP header checksum is correct */ + tmp_csum = do_csum_data((uint16_t *)ip, sizeof (*ip)); + if (__predict_false((tmp_csum ^ 0xffff) != 0)) { + cntl->lro_bad_csum++; + return -1; + } + + /* find the TCP header */ + tcp = (struct tcphdr *) (ip + 1); + + /* Get the TCP checksum if we dont have it */ + if (!csum) + csum = tcp->th_sum; + + /* ensure no bits set besides ack or psh */ + if ((tcp->th_flags & ~(TH_ACK | TH_PUSH)) != 0) + return -1; + + /* check for timestamps. Since the only option we handle are + timestamps, we only have to handle the simple case of + aligned timestamps */ + + opt_bytes = (tcp->th_off << 2) - sizeof (*tcp); + tcp_hdr_len = sizeof (*tcp) + opt_bytes; + ts_ptr = (uint32_t *)(tcp + 1); + if (opt_bytes != 0) { + if (__predict_false(opt_bytes != TCPOLEN_TSTAMP_APPA) || + (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16| + TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP))) + return -1; + } + + ip_len = ntohs(ip->ip_len); + tcp_data_len = ip_len - (tcp->th_off << 2) - sizeof (*ip); + + + /* + * If frame is padded beyond the end of the IP packet, + * then we must trim the extra bytes off the end. + */ + tot_len = m_head->m_pkthdr.len; + trim = tot_len - (ip_len + ETHER_HDR_LEN); + if (trim != 0) { + if (trim < 0) { + /* truncated packet */ + return -1; + } + m_adj(m_head, -trim); + tot_len = m_head->m_pkthdr.len; + } + + m_nxt = m_head; + m_tail = NULL; /* -Wuninitialized */ + while (m_nxt != NULL) { + m_tail = m_nxt; + m_nxt = m_tail->m_next; + } + + hlen = ip_len + ETHER_HDR_LEN - tcp_data_len; + seq = ntohl(tcp->th_seq); + + SLIST_FOREACH(lro, &cntl->lro_active, next) { + if (lro->source_port == tcp->th_sport && + lro->dest_port == tcp->th_dport && + lro->source_ip == ip->ip_src.s_addr && + lro->dest_ip == ip->ip_dst.s_addr) { + /* Try to append it */ + + if (__predict_false(seq != lro->next_seq)) { + /* out of order packet */ + SLIST_REMOVE(&cntl->lro_active, lro, + lro_entry, next); + tcp_lro_flush(cntl, lro); + return -1; + } + + if (opt_bytes) { + uint32_t tsval = ntohl(*(ts_ptr + 1)); + /* make sure timestamp values are increasing */ + if (__predict_false(lro->tsval > tsval || + *(ts_ptr + 2) == 0)) { + return -1; + } + lro->tsval = tsval; + lro->tsecr = *(ts_ptr + 2); + } + + lro->next_seq += tcp_data_len; + lro->ack_seq = tcp->th_ack; + lro->window = tcp->th_win; + lro->append_cnt++; + if (tcp_data_len == 0) { + m_freem(m_head); + return 0; + } + /* subtract off the checksum of the tcp header + * from the hardware checksum, and add it to the + * stored tcp data checksum. Byteswap the checksum + * if the total length so far is odd + */ + tmp_csum = do_csum_data((uint16_t*)tcp, + tcp_hdr_len); + csum = csum + (tmp_csum ^ 0xffff); + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + if (lro->len & 0x1) { + /* Odd number of bytes so far, flip bytes */ + csum = ((csum << 8) | (csum >> 8)) & 0xffff; + } + csum = csum + lro->data_csum; + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + lro->data_csum = csum; + + lro->len += tcp_data_len; + + /* adjust mbuf so that m->m_data points to + the first byte of the payload */ + m_adj(m_head, hlen); + /* append mbuf chain */ + lro->m_tail->m_next = m_head; + /* advance the last pointer */ + lro->m_tail = m_tail; + /* flush packet if required */ + device_mtu = cntl->ifp->if_mtu; + if (lro->len > (65535 - device_mtu)) { + SLIST_REMOVE(&cntl->lro_active, lro, + lro_entry, next); + tcp_lro_flush(cntl, lro); + } + return 0; + } + } + + if (SLIST_EMPTY(&cntl->lro_free)) + return -1; + + /* start a new chain */ + lro = SLIST_FIRST(&cntl->lro_free); + SLIST_REMOVE_HEAD(&cntl->lro_free, next); + SLIST_INSERT_HEAD(&cntl->lro_active, lro, next); + lro->source_port = tcp->th_sport; + lro->dest_port = tcp->th_dport; + lro->source_ip = ip->ip_src.s_addr; + lro->dest_ip = ip->ip_dst.s_addr; + lro->next_seq = seq + tcp_data_len; + lro->mss = tcp_data_len; + lro->ack_seq = tcp->th_ack; + lro->window = tcp->th_win; + + /* save the checksum of just the TCP payload by + * subtracting off the checksum of the TCP header from + * the entire hardware checksum + * Since IP header checksum is correct, checksum over + * the IP header is -0. Substracting -0 is unnecessary. + */ + tmp_csum = do_csum_data((uint16_t*)tcp, tcp_hdr_len); + csum = csum + (tmp_csum ^ 0xffff); + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + lro->data_csum = csum; + + lro->ip = ip; + /* record timestamp if it is present */ + if (opt_bytes) { + lro->timestamp = 1; + lro->tsval = ntohl(*(ts_ptr + 1)); + lro->tsecr = *(ts_ptr + 2); + } + lro->len = tot_len; + lro->m_head = m_head; + lro->m_tail = m_tail; + return 0; +} diff --git a/sys/dev/ixgbe/tcp_lro.h b/sys/dev/ixgbe/tcp_lro.h new file mode 100644 index 000000000000..08aac69058b2 --- /dev/null +++ b/sys/dev/ixgbe/tcp_lro.h @@ -0,0 +1,85 @@ +/******************************************************************************* + +Copyright (c) 2006, Myricom Inc. +Copyright (c) 2008, Intel Corporation. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Neither the name of the Myricom Inc, nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + 2. Neither the name of the Intel Corporation, nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + + +$FreeBSD$ + +***************************************************************************/ +#ifndef _TCP_LRO_H_ +#define _TCP_LRO_H_ + +struct lro_entry; +struct lro_entry +{ + SLIST_ENTRY(lro_entry) next; + struct mbuf *m_head; + struct mbuf *m_tail; + int timestamp; + struct ip *ip; + uint32_t tsval; + uint32_t tsecr; + uint32_t source_ip; + uint32_t dest_ip; + uint32_t next_seq; + uint32_t ack_seq; + uint32_t len; + uint32_t data_csum; + uint16_t window; + uint16_t source_port; + uint16_t dest_port; + uint16_t append_cnt; + uint16_t mss; + +}; +SLIST_HEAD(lro_head, lro_entry); + +struct lro_ctrl { + struct ifnet *ifp; + int lro_queued; + int lro_flushed; + int lro_bad_csum; + int lro_cnt; + + struct lro_head lro_active; + struct lro_head lro_free; +}; + + +int tcp_lro_init(struct lro_ctrl *); +void tcp_lro_free(struct lro_ctrl *); +void tcp_lro_flush(struct lro_ctrl *, struct lro_entry *); +int tcp_lro_rx(struct lro_ctrl *, struct mbuf *, uint32_t); + +/* Number of LRO entries - these are per rx queue */ +#define LRO_ENTRIES 8 + +#endif /* _TCP_LRO_H_ */ diff --git a/sys/modules/ixgbe/Makefile b/sys/modules/ixgbe/Makefile index 49832e6e361c..6d500b8f8277 100644 --- a/sys/modules/ixgbe/Makefile +++ b/sys/modules/ixgbe/Makefile @@ -2,7 +2,7 @@ .PATH: ${.CURDIR}/../../dev/ixgbe KMOD = ixgbe SRCS = device_if.h bus_if.h pci_if.h -SRCS += ixgbe.c +SRCS += ixgbe.c tcp_lro.c # Shared source SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_82598.c CFLAGS+= -I${.CURDIR}/../../dev/ixgbe