ixgbe: update base driver

Signed-off-by: Intel
This commit is contained in:
Intel 2012-12-20 00:00:00 +01:00 committed by Thomas Monjalon
parent a02286c8d7
commit aa4fc14d2c
25 changed files with 1748 additions and 11161 deletions

View File

@ -85,7 +85,7 @@
* };
*
* struct device devices[] = {
* #define RTE_PCI_DEV_ID_DECL(vendorID, deviceID) {vend, dev},
* #define RTE_PCI_DEV_ID_DECL_IXGBE(vendorID, deviceID) {vend, dev},
* #include <rte_pci_dev_ids.h>
* };
* @endcode
@ -105,6 +105,14 @@
#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev)
#endif
#ifndef RTE_PCI_DEV_ID_DECL_IXGBE
#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev)
#endif
#ifndef RTE_PCI_DEV_ID_DECL_IXGBEVF
#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev)
#endif
#ifndef PCI_VENDOR_ID_INTEL
/** Vendor ID used by Intel devices */
#define PCI_VENDOR_ID_INTEL 0x8086
@ -313,7 +321,6 @@ RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE)
RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP)
/****************** Physical IXGBE devices from ixgbe_type.h ******************/
#ifdef RTE_LIBRTE_IXGBE_PMD
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598_BX 0x1508
@ -335,43 +342,81 @@ RTE_PCI_DEV_ID_DECL_IGB(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP)
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_X540T1 0x1560
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)
RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
IXGBE_DEV_ID_82598AF_SINGLE_PORT)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, \
IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_RNDC)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_560FLR)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_ECNA_DP)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF2)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T1)
#endif /* RTE_LIBRTE_IXGBE_PMD */
/****************** Virtual IGB devices from e1000_hw.h ******************/
#define E1000_DEV_ID_82576_VF 0x10CA
#define E1000_DEV_ID_82576_VF_HV 0x152D
#define E1000_DEV_ID_I350_VF 0x1520
#define E1000_DEV_ID_I350_VF_HV 0x152F
RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_VF)
RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_VF_HV)
RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_VF)
RTE_PCI_DEV_ID_DECL_IGBVF(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_VF_HV)
/****************** Virtual IXGBE devices from ixgbe_type.h ******************/
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_VF)
RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_VF_HV)
RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540_VF)
RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540_VF_HV)
/*
* Undef all RTE_PCI_DEV_ID_DECL_* here.
@ -379,3 +424,5 @@ RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
#undef RTE_PCI_DEV_ID_DECL_EM
#undef RTE_PCI_DEV_ID_DECL_IGB
#undef RTE_PCI_DEV_ID_DECL_IGBVF
#undef RTE_PCI_DEV_ID_DECL_IXGBE
#undef RTE_PCI_DEV_ID_DECL_IXGBEVF

View File

@ -37,20 +37,25 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_ixgbe.a
CFLAGS += -O3
CFLAGS += -O3 -Wno-deprecated
CFLAGS += $(WERROR_FLAGS)
VPATH += $(RTE_SDK)/lib/librte_pmd_ixgbe/ixgbe
#
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_common.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82598.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82599.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_x540.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_phy.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_api.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_mbx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_common.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82598.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x540.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_mbx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c

View File

@ -34,11 +34,9 @@
Intel® IXGBE driver
===================
This directory contains code from the Intel® Network Adapter Driver for PCI-E
10 Gigabit Network Connections under FreeBSD, version 2.4.4, dated 10/25/2011.
This code is available from
`http://downloadmirror.intel.com/14688/eng/ixgbe-2.4.4.tar.gz`
This directory contains source code of FreeBSD ixgbe driver of version
cid-10g-shared-code.2012.11.09 released by LAD. The sub-directory of lad/
contains the original source package.
This driver is valid for the product(s) listed below
* Intel® 10 Gigabit AF DA Dual Port Server Adapter

File diff suppressed because it is too large Load Diff

View File

@ -1,521 +0,0 @@
/******************************************************************************
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXGBE_H_
#define _IXGBE_H_
#include <sys/param.h>
#include <sys/systm.h>
#if __FreeBSD_version >= 800000
#include <sys/buf_ring.h>
#endif
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet/udp.h>
#include <machine/in_cksum.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/clock.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/taskqueue.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
#include <machine/smp.h>
#ifdef IXGBE_IEEE1588
#include <sys/ieee1588.h>
#endif
#include "ixgbe_api.h"
/* Tunables */
/*
* TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
* number of transmit descriptors allocated by the driver. Increasing this
* value allows the driver to queue more transmits. Each descriptor is 16
* bytes. Performance tests have show the 2K value to be optimal for top
* performance.
*/
#define DEFAULT_TXD 1024
#define PERFORM_TXD 2048
#define MAX_TXD 4096
#define MIN_TXD 64
/*
* RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
* number of receive descriptors allocated for each RX queue. Increasing this
* value allows the driver to buffer more incoming packets. Each descriptor
* is 16 bytes. A receive buffer is also allocated for each descriptor.
*
* Note: with 8 rings and a dual port card, it is possible to bump up
* against the system mbuf pool limit, you can tune nmbclusters
* to adjust for this.
*/
#define DEFAULT_RXD 1024
#define PERFORM_RXD 2048
#define MAX_RXD 4096
#define MIN_RXD 64
/* Alignment for rings */
#define DBA_ALIGN 128
/*
* This parameter controls the maximum no of times the driver will loop in
* the isr. Minimum Value = 1
*/
#define MAX_LOOP 10
/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*/
#define IXGBE_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
* transmit descriptors.
*/
#define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
#define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
#define IXGBE_MAX_FRAME_SIZE 0x3F00
/* Flow control constants */
#define IXGBE_FC_PAUSE 0xFFFF
#define IXGBE_FC_HI 0x20000
#define IXGBE_FC_LO 0x10000
/* Keep older OS drivers building... */
#if !defined(SYSCTL_ADD_UQUAD)
#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
#endif
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define IXGBE_82598_SCATTER 100
#define IXGBE_82599_SCATTER 32
#define MSIX_82598_BAR 3
#define MSIX_82599_BAR 4
#define IXGBE_TSO_SIZE 65535
#define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
#define IXGBE_RX_HDR 128
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
#define IXGBE_QUEUE_IDLE 0
#define IXGBE_QUEUE_WORKING 1
#define IXGBE_QUEUE_HUNG 2
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
#else
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
/* For 6.X code compatibility */
#if !defined(ETHER_BPF_MTAP)
#define ETHER_BPF_MTAP BPF_MTAP
#endif
#if __FreeBSD_version < 700000
#define CSUM_TSO 0
#define IFCAP_TSO4 0
#endif
/*
* Interrupt Moderation parameters
*/
#define IXGBE_LOW_LATENCY 128
#define IXGBE_AVE_LATENCY 400
#define IXGBE_BULK_LATENCY 1200
#define IXGBE_LINK_ITR 2000
/*
*****************************************************************************
* vendor_info_array
*
* This array contains the list of Subvendor/Subdevice IDs on which the driver
* should load.
*
*****************************************************************************
*/
typedef struct _ixgbe_vendor_info_t {
unsigned int vendor_id;
unsigned int device_id;
unsigned int subvendor_id;
unsigned int subdevice_id;
unsigned int index;
} ixgbe_vendor_info_t;
struct ixgbe_tx_buf {
u32 eop_index;
struct mbuf *m_head;
bus_dmamap_t map;
};
struct ixgbe_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
struct mbuf *fmp;
bus_dmamap_t hmap;
bus_dmamap_t pmap;
};
/*
* Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
*/
struct ixgbe_dma_alloc {
bus_addr_t dma_paddr;
caddr_t dma_vaddr;
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
bus_dma_segment_t dma_seg;
bus_size_t dma_size;
int dma_nseg;
};
/*
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
*/
struct ix_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
struct resource *res;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* The transmit ring, one per queue
*/
struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
int queue_status;
int watchdog_time;
union ixgbe_adv_tx_desc *tx_base;
struct ixgbe_dma_alloc txdma;
u32 next_avail_desc;
u32 next_to_clean;
struct ixgbe_tx_buf *tx_buffers;
volatile u16 tx_avail;
u32 txd_cmd;
bus_dma_tag_t txtag;
char mtx_name[16];
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
#ifdef IXGBE_FDIR
u16 atr_sample;
u16 atr_count;
#endif
u32 bytes; /* used for AIM */
u32 packets;
/* Soft Stats */
u64 no_desc_avail;
u64 total_packets;
};
/*
* The Receive ring, one per rx queue
*/
struct rx_ring {
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
bool hw_rsc;
bool discard;
u32 next_to_refresh;
u32 next_to_check;
char mtx_name[16];
struct ixgbe_rx_buf *rx_buffers;
bus_dma_tag_t htag;
bus_dma_tag_t ptag;
u32 bytes; /* Used for AIM calc */
u32 packets;
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
u64 rx_packets;
u64 rx_bytes;
u64 rx_discarded;
u64 rsc_num;
#ifdef IXGBE_FDIR
u64 flm;
#endif
};
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
struct device *dev;
struct resource *pci_mem;
struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSIX
*/
void *tag;
struct resource *res;
struct ifmedia media;
struct callout timer;
int msix;
int if_flags;
struct mtx core_mtx;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u16 num_vlans;
u16 num_queues;
/*
** Shadow VFTA table, this is needed because
** the real vlan filter table gets cleared during
** a soft reset and the driver needs to be able
** to repopulate it.
*/
u32 shadow_vfta[IXGBE_VFTA_SIZE];
/* Info about the interface */
u32 optics;
u32 fc; /* local flow ctrl setting */
int advertise; /* link speeds */
bool link_active;
u16 max_frame_size;
u16 num_segs;
u32 link_speed;
bool link_up;
u32 linkvec;
/* Mbuf cluster size */
u32 rx_mbuf_sz;
/* Support for pluggable optics */
bool sfp_probe;
struct task link_task; /* Link tasklet */
struct task mod_task; /* SFP tasklet */
struct task msf_task; /* Multispeed Fiber */
#ifdef IXGBE_FDIR
int fdir_reinit;
struct task fdir_task;
#endif
struct taskqueue *tq;
/*
** Queues:
** This is the irq holder, it has
** and RX/TX pair or rings associated
** with it.
*/
struct ix_queue *queues;
/*
* Transmit rings:
* Allocated at run time, an array of rings.
*/
struct tx_ring *tx_rings;
int num_tx_desc;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
int num_rx_desc;
u64 que_mask;
u32 rx_process_limit;
/* Multicast array memory */
u8 *mta;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
unsigned long link_irq;
struct ixgbe_hw_stats stats;
};
/* Precision Time Sync (IEEE 1588) defines */
#define ETHERTYPE_IEEE1588 0x88F7
#define PICOSECS_PER_TICK 20833
#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */
#define IXGBE_ADVTXD_TSTAMP 0x00080000
#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
#define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define IXGBE_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
#define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
static inline bool
ixgbe_is_sfp(struct ixgbe_hw *hw)
{
switch (hw->phy.type) {
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
return TRUE;
default:
return FALSE;
}
}
/* Workaround to make 8.0 buildable */
#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
static __inline int
drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (1);
#endif
return (!buf_ring_empty(br));
}
#endif
/*
** Find the number of unrefreshed RX descriptors
*/
static inline u16
ixgbe_rx_unrefreshed(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
if (rxr->next_to_check > rxr->next_to_refresh)
return (rxr->next_to_check - rxr->next_to_refresh - 1);
else
return ((adapter->num_rx_desc + rxr->next_to_check) -
rxr->next_to_refresh - 1);
}
#endif /* _IXGBE_H_ */

View File

@ -32,17 +32,16 @@ POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_type.h"
#include "ixgbe_82598.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#ident "$Id: ixgbe_82598.c,v 1.194 2012/03/28 00:54:08 jtkirshe Exp $"
u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
@ -57,21 +56,8 @@ STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
bool autoneg,
bool autoneg_wait_to_complete);
STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
@ -117,31 +103,6 @@ void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
/**
* ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
* @hw: pointer to hardware structure
*
* Read PCIe configuration space, and get the MSI-X vector count from
* the capabilities table.
**/
u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
{
u32 msix_count = 18;
DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
if (hw->mac.msix_vectors_from_pcie) {
msix_count = IXGBE_READ_PCIE_WORD(hw,
IXGBE_PCIE_MSIX_82598_CAPS);
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW, so increment to give
* proper value */
msix_count++;
}
return msix_count;
}
/**
* ixgbe_init_ops_82598 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
@ -178,6 +139,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
mac->ops.set_vfta = &ixgbe_set_vfta_82598;
mac->ops.set_vlvf = NULL;
mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
/* Flow Control */
@ -189,7 +151,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
mac->rx_pb_size = 512;
mac->max_tx_queues = 32;
mac->max_rx_queues = 64;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/* SFP+ Module */
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
@ -293,15 +255,15 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@ -429,21 +391,41 @@ STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
/**
* ixgbe_fc_enable_82598 - Enable flow control
* @hw: pointer to hardware structure
* @packetbuf_num: packet buffer number (0-7)
*
* Enable flow control according to the current settings.
**/
s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
u32 fcrtl, fcrth;
u32 link_speed = 0;
int i;
bool link_up;
DEBUGFUNC("ixgbe_fc_enable_82598");
/* Validate the water mark configuration */
if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
/* Low water mark of zero causes XOFF floods */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
if (!hw->fc.low_water[i] ||
hw->fc.low_water[i] >= hw->fc.high_water[i]) {
DEBUGOUT("Invalid water mark configuration\n");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
}
}
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
@ -465,9 +447,7 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
}
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
ixgbe_fc_autoneg(hw);
/* Disable any previous flow control settings */
fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@ -529,28 +509,27 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
reg = hw->fc.low_water << 6;
if (hw->fc.send_xon)
reg |= IXGBE_FCRTL_XONE;
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
} else {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
reg = hw->fc.high_water[packetbuf_num] << 6;
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
}
/* Configure pause time (2 TCs per register) */
reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
else
reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
reg = hw->fc.pause_time * 0x00010001;
for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
out:
return ret_val;
@ -725,11 +704,6 @@ STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
(ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
*link_up = false;
/* if link is down, zero out the current_mode */
if (*link_up == FALSE) {
hw->fc.current_mode = ixgbe_fc_none;
hw->fc.fc_was_autonegged = FALSE;
}
out:
return IXGBE_SUCCESS;
}
@ -990,7 +964,6 @@ STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
UNREFERENCED_1PARAMETER(vmdq);
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
@ -1343,15 +1316,15 @@ void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@ -1369,7 +1342,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
{
u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
UNREFERENCED_1PARAMETER(headroom);
if (!num_pb)
return;

View File

@ -0,0 +1,52 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_82598_H_
#define _IXGBE_82598_H_
#ident "$Id: ixgbe_82598.h,v 1.3 2012/03/27 22:16:51 jtkirshe Exp $"
u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
#endif /* _IXGBE_82598_H_ */

View File

@ -32,46 +32,17 @@ POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_type.h"
#include "ixgbe_82599.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#ident "$Id: ixgbe_82599.c,v 1.301 2012/11/08 11:33:27 jtkirshe Exp $"
s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
u16 offset, u16 *data);
STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
@ -164,9 +135,8 @@ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
u32 reg_anlp1 = 0;
u32 i = 0;
u16 list_offset, data_offset, data_value;
bool got_lock = false;
DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
@ -200,28 +170,39 @@ s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
/* Delay obtaining semaphore again to allow FW access */
msec_delay(hw->eeprom.semaphore_delay);
/* Now restart DSP by setting Restart_AN and clearing LMS */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
IXGBE_AUTOC_AN_RESTART));
/* Need SW/FW semaphore around AUTOC writes if LESM on,
* likewise reset_pipeline requires lock as it also writes
* AUTOC.
*/
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != IXGBE_SUCCESS) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
}
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
msec_delay(4);
reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
break;
got_lock = true;
}
if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
/* Restart DSP and set SFI mode */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
ret_val = ixgbe_reset_pipeline_82599(hw);
if (got_lock) {
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
got_lock = false;
}
if (ret_val) {
DEBUGOUT("sfp module setup not complete\n");
ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
goto setup_sfp_out;
}
/* Restart DSP by setting Restart_AN and return to SFI mode */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@ -258,6 +239,8 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->ops.get_media_type = &ixgbe_get_media_type_82599;
mac->ops.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_82599;
mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
@ -270,10 +253,12 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
@ -330,7 +315,9 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
/* Check if 1G SFP module. */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
goto out;
@ -446,6 +433,8 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
@ -478,17 +467,32 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
s32 status = IXGBE_SUCCESS;
bool got_lock = false;
DEBUGFUNC("ixgbe_start_mac_link_82599");
/* reset_pipeline requires us to hold this lock as it writes to
* AUTOC.
*/
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (status != IXGBE_SUCCESS)
goto out;
got_lock = true;
}
/* Restart link */
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@ -512,6 +516,7 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
/* Add delay to filter out noises during initial link setup */
msec_delay(50);
out:
return status;
}
@ -864,12 +869,13 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
bool got_lock = false;
DEBUGFUNC("ixgbe_setup_mac_link_82599");
/* Check to see if speed passed in is supported. */
status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
if (status != IXGBE_SUCCESS)
if (status)
goto out;
speed &= link_capabilities;
@ -890,12 +896,13 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
(hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
}
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
@ -921,9 +928,30 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
/* Need SW/FW semaphore around AUTOC writes if LESM is on,
* likewise reset_pipeline requires us to hold this lock as
* it also writes to AUTOC.
*/
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
goto out;
}
got_lock = true;
}
/* Restart link */
autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
ixgbe_reset_pipeline_82599(hw);
if (got_lock) {
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
got_lock = false;
}
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@ -1077,14 +1105,43 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
*/
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
if (hw->mac.orig_link_settings_stored == FALSE) {
/* Enable link if disabled in NVM */
if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
IXGBE_WRITE_FLUSH(hw);
}
if (hw->mac.orig_link_settings_stored == false) {
hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
if (autoc != hw->mac.orig_autoc)
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
IXGBE_AUTOC_AN_RESTART));
if (autoc != hw->mac.orig_autoc) {
/* Need SW/FW semaphore around AUTOC writes if LESM is
* on, likewise reset_pipeline requires us to hold
* this lock as it also writes to AUTOC.
*/
bool got_lock = false;
if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
status = hw->mac.ops.acquire_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_SWFW_SYNC;
goto reset_hw_out;
}
got_lock = true;
}
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
ixgbe_reset_pipeline_82599(hw);
if (got_lock)
hw->mac.ops.release_swfw_sync(hw,
IXGBE_GSSR_MAC_CSR_SM);
}
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@ -1114,6 +1171,9 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@ -1186,7 +1246,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
usec_delay(10);
msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
DEBUGOUT("Flow Director Signature poll time exceeded!\n");
@ -1935,7 +1995,7 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
goto out;
else
status = ixgbe_identify_sfp_module_generic(hw);
status = ixgbe_identify_module_generic(hw);
}
/* Set PHY type none if no PHY detected */
@ -2063,6 +2123,8 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
break;
default:
break;
@ -2081,9 +2143,6 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
**/
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
{
#define IXGBE_MAX_SECRX_POLL 30
int i;
int secrxreg;
DEBUGFUNC("ixgbe_enable_rx_dma_82599");
@ -2093,28 +2152,12 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* the Rx DMA unit. Therefore, make sure the security engine is
* completely disabled prior to enabling the Rx unit.
*/
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
break;
else
/* Use interrupt-safe sleep just in case */
usec_delay(10);
}
/* For informational purposes only */
if (i >= IXGBE_MAX_SECRX_POLL)
DEBUGOUT("Rx unit being enabled before security "
"path fully disabled. Continuing with init.\n");
hw->mac.ops.disable_sec_rx_path(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
IXGBE_WRITE_FLUSH(hw);
hw->mac.ops.enable_sec_rx_path(hw);
return IXGBE_SUCCESS;
}
@ -2129,7 +2172,7 @@ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
* Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
* if the FW version is not supported.
**/
STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
@ -2277,3 +2320,55 @@ STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
return ret_val;
}
/**
* ixgbe_reset_pipeline_82599 - perform pipeline reset
*
* @hw: pointer to hardware structure
*
* Reset pipeline by asserting Restart_AN together with LMS change to ensure
* full pipeline reset
**/
s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
s32 i, autoc_reg, autoc2_reg, ret_val;
s32 anlp1_reg = 0;
/* Enable link if disabled in NVM */
autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
IXGBE_WRITE_FLUSH(hw);
}
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
msec_delay(4);
anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
break;
}
if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
DEBUGOUT("auto negotiation not completed\n");
ret_val = IXGBE_ERR_RESET_FAILED;
goto reset_pipeline_out;
}
ret_val = IXGBE_SUCCESS;
reset_pipeline_out:
/* Write AUTOC register with original LMS field and Restart_AN */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
return ret_val;
}

View File

@ -0,0 +1,64 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_82599_H_
#define _IXGBE_82599_H_
#ident "$Id: ixgbe_82599.h,v 1.7 2012/10/03 07:10:29 jtkirshe Exp $"
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
#endif /* _IXGBE_82599_H_ */

View File

@ -33,11 +33,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_common.h"
extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
#ident "$Id: ixgbe_api.c,v 1.187 2012/11/08 10:11:52 jtkirshe Exp $"
/**
* ixgbe_init_shared_code - Initialize the shared code
@ -97,7 +93,6 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_set_mac_type\n");
if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
switch (hw->device_id) {
case IXGBE_DEV_ID_82598:
case IXGBE_DEV_ID_82598_BX:
@ -122,27 +117,29 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
case IXGBE_DEV_ID_82599_VF:
case IXGBE_DEV_ID_82599_VF_HV:
hw->mac.type = ixgbe_mac_82599_vf;
break;
case IXGBE_DEV_ID_X540_VF:
case IXGBE_DEV_ID_X540_VF_HV:
hw->mac.type = ixgbe_mac_X540_vf;
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
hw->mac.type = ixgbe_mac_X540;
break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
}
} else {
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
}
DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
hw->mac.type, ret_val);
@ -815,6 +812,18 @@ s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address
* @hw: pointer to hardware structure
* @vmdq: VMDq default pool index
**/
s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac,
(hw, vmdq), IXGBE_NOT_IMPLEMENTED);
}
/**
@ -943,16 +952,33 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
vlan_on), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_set_vlvf - Set VLAN Pool Filter
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFVFB
* @vlan_on: boolean flag to turn on/off VLAN in VFVF
* @vfta_changed: pointer to boolean flag which indicates whether VFTA
* should be changed
*
* Turn on/off specified bit in VLVF table.
**/
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
bool *vfta_changed)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_fc_enable - Enable flow control
* @hw: pointer to hardware structure
* @packetbuf_num: packet buffer number (0-7)
*
* Configures the flow control settings based on SW configuration.
**/
s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num),
return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw),
IXGBE_NOT_IMPLEMENTED);
}
@ -1100,6 +1126,30 @@ s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
(hw, regval), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_disable_sec_rx_path - Stops the receive data path
* @hw: pointer to hardware structure
*
* Stops the receive data path.
**/
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path,
(hw), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_enable_sec_rx_path - Enables the receive data path
* @hw: pointer to hardware structure
*
* Enables the receive data path.
**/
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path,
(hw), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
* @hw: pointer to hardware structure

View File

@ -35,9 +35,15 @@ POSSIBILITY OF SUCH DAMAGE.
#define _IXGBE_API_H_
#include "ixgbe_type.h"
#ident "$Id: ixgbe_api.h,v 1.115 2012/08/23 23:30:15 jtkirshe Exp $"
s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
@ -98,6 +104,7 @@ s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
@ -112,8 +119,9 @@ s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num);
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
u8 ver);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
@ -125,6 +133,8 @@ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
@ -149,6 +159,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common);
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,

File diff suppressed because it is too large Load Diff

View File

@ -35,14 +35,18 @@ POSSIBILITY OF SUCH DAMAGE.
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
#ident "$Id: ixgbe_common.h,v 1.133 2012/11/05 23:08:30 jtkirshe Exp $"
#define IXGBE_WRITE_REG64(hw, reg, value) \
do { \
IXGBE_WRITE_REG(hw, reg, (u32) value); \
IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
} while (0)
struct ixgbe_pba {
u16 word[2];
u16 *pba_block;
};
u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@ -51,6 +55,13 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 max_pba_block_size,
struct ixgbe_pba *pba);
s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, struct ixgbe_pba *pba);
s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 *pba_block_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@ -91,9 +102,12 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@ -107,12 +121,16 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@ -131,4 +149,7 @@ void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#endif /* IXGBE_COMMON */

View File

@ -325,7 +325,6 @@ STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_msg_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
@ -347,7 +346,6 @@ STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_ack_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
@ -369,7 +367,6 @@ STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_rst_vf");
if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
@ -418,7 +415,6 @@ STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
s32 ret_val;
u16 i;
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_write_mbx_vf");
@ -461,7 +457,6 @@ STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 i;
DEBUGFUNC("ixgbe_read_mbx_vf");
UNREFERENCED_1PARAMETER(mbx_id);
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);

View File

@ -79,12 +79,39 @@ POSSIBILITY OF SUCH DAMAGE.
/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
/*
* each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* mailbox API, version 1.1 VF requests */
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
@ -93,6 +120,18 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
/* mailbox API, version 2.0 VF requests */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
/* mailbox API, version 2.0 PF requests */
#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */

View File

@ -55,11 +55,18 @@
#pragma warning(disable:1419) /* External declaration in primary source file */
#pragma warning(disable:111) /* Statement is unreachable */
#pragma warning(disable:981) /* Operands are evaluated in unspecified order */
#pragma warning(disable:593) /* Variable was set but never used */
#pragma warning(disable:174) /* expression has no effect */
#else
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wformat"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wunused-value"
#pragma GCC diagnostic ignored "-Wformat-extra-args"
#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 6))
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#endif
#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
@ -82,6 +89,12 @@
#define FALSE 0
#define TRUE 1
#define false 0
#define true 1
#define min(a,b) RTE_MIN(a,b)
#define EWARN(hw, S, args...) DEBUGOUT1(S, ##args)
/* Bunch of defines for shared code bogosity */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)
@ -89,10 +102,11 @@
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
#define STATIC static
#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
typedef uint8_t u8;
typedef int8_t s8;

View File

@ -34,6 +34,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#ident "$Id: ixgbe_phy.c,v 1.139 2012/05/24 23:36:12 jtkirshe Exp $"
static void ixgbe_i2c_start(struct ixgbe_hw *hw);
static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
@ -46,7 +47,6 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
static bool ixgbe_get_i2c_data(u32 *i2cctl);
void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
/**
* ixgbe_init_phy_ops_generic - Inits PHY function ptrs
@ -74,7 +74,7 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
phy->ops.identify_sfp = &ixgbe_identify_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
return IXGBE_SUCCESS;
@ -547,10 +547,9 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
&autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
break;
}
}
if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
@ -571,7 +570,6 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
bool autoneg,
bool autoneg_wait_to_complete)
{
UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete);
DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
@ -763,10 +761,9 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
&autoneg_reg);
autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE)
break;
}
}
if (time_out == max_time_out) {
status = IXGBE_ERR_LINK_SETUP;
@ -913,6 +910,33 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
return ret_val;
}
/**
* ixgbe_identify_module_generic - Identifies module type
* @hw: pointer to hardware structure
*
* Determines HW type and calls appropriate function.
**/
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
DEBUGFUNC("ixgbe_identify_module_generic");
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
status = ixgbe_identify_sfp_module_generic(hw);
break;
default:
hw->phy.sfp_type = ixgbe_sfp_type_not_present;
status = IXGBE_ERR_SFP_NOT_PRESENT;
break;
}
return status;
}
/**
* ixgbe_identify_sfp_module_generic - Identifies SFP modules
* @hw: pointer to hardware structure
@ -995,6 +1019,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
* 9 SFP_1g_cu_CORE0 - 82599-specific
* 10 SFP_1g_cu_CORE1 - 82599-specific
* 11 SFP_1g_sx_CORE0 - 82599-specific
* 12 SFP_1g_sx_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@ -1045,6 +1071,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core1;
} else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_1g_sx_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_sx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@ -1137,7 +1170,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Verify supported 1G SFP modules */
if (comp_codes_10g == 0 &&
!(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@ -1152,16 +1187,32 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
ixgbe_get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
!((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
(hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
(hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
EWARN(hw, "WARNING: Intel (R) Network "
"Connections are quality tested "
"using Intel (R) Ethernet Optics."
" Using untested modules is not "
"supported and may cause unstable"
" operation or damage to the "
"module or the adapter. Intel "
"Corporation is not responsible "
"for any harm caused by using "
"untested modules.\n", status);
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("SFP+ module not supported\n");
hw->phy.type =
ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
}
}
} else {
status = IXGBE_SUCCESS;
}
@ -1214,10 +1265,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0)
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
sfp_type == ixgbe_sfp_type_1g_sx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1)
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
sfp_type == ixgbe_sfp_type_1g_sx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
@ -1691,15 +1744,24 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
**/
static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
{
u32 i = 0;
u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
u32 i2cctl_r = 0;
DEBUGFUNC("ixgbe_raise_i2c_clk");
for (i = 0; i < timeout; i++) {
*i2cctl |= IXGBE_I2C_CLK_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
IXGBE_WRITE_FLUSH(hw);
/* SCL rise time (1000ns) */
usec_delay(IXGBE_I2C_T_RISE);
i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
if (i2cctl_r & IXGBE_I2C_CLK_IN)
break;
}
}
/**

View File

@ -94,6 +94,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
#ident "$Id: ixgbe_phy.h,v 1.48 2012/01/04 01:49:02 jtkirshe Exp $"
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@ -124,6 +126,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
@ -137,4 +140,5 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
#endif /* _IXGBE_PHY_H_ */

View File

@ -36,9 +36,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_osdep.h"
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
#ident "$Id: ixgbe_type.h,v 1.552 2012/11/08 11:33:27 jtkirshe Exp $"
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
@ -61,15 +59,23 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
#define IXGBE_DEV_ID_82599_VF 0x10ED
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_82599_VF_HV 0x152E
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X540_VF_HV 0x1530
#define IXGBE_DEV_ID_X540T1 0x1560
/* General Registers */
#define IXGBE_CTRL 0x00000
@ -116,7 +122,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_I2C_CLK_OUT 0x00000002
#define IXGBE_I2C_DATA_IN 0x00000004
#define IXGBE_I2C_DATA_OUT 0x00000008
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
/* Interrupt Registers */
#define IXGBE_EICR 0x00800
@ -168,19 +175,19 @@ POSSIBILITY OF SUCH DAMAGE.
/* Receive DMA Registers */
#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
(0x0D000 + ((_i - 64) * 0x40)))
(0x0D000 + (((_i) - 64) * 0x40)))
#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
(0x0D004 + ((_i - 64) * 0x40)))
(0x0D004 + (((_i) - 64) * 0x40)))
#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
(0x0D008 + ((_i - 64) * 0x40)))
(0x0D008 + (((_i) - 64) * 0x40)))
#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
(0x0D010 + ((_i - 64) * 0x40)))
(0x0D010 + (((_i) - 64) * 0x40)))
#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
(0x0D018 + ((_i - 64) * 0x40)))
(0x0D018 + (((_i) - 64) * 0x40)))
#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
(0x0D028 + ((_i - 64) * 0x40)))
(0x0D028 + (((_i) - 64) * 0x40)))
#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
(0x0D02C + ((_i - 64) * 0x40)))
(0x0D02C + (((_i) - 64) * 0x40)))
#define IXGBE_RSCDBU 0x03028
#define IXGBE_RDDCC 0x02F20
#define IXGBE_RXMEMWRAP 0x03190
@ -193,7 +200,7 @@ POSSIBILITY OF SUCH DAMAGE.
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
(((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
(0x0D014 + ((_i - 64) * 0x40))))
(0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
* 00-15 : 0x02200 + n*4
@ -202,7 +209,7 @@ POSSIBILITY OF SUCH DAMAGE.
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + ((_i - 64) * 0x40))))
(0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RDRXCTL_RSC_PUSH 0x80
/* 8 of these 0x03C00 - 0x03C1C */
@ -419,7 +426,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
/* DCB registers */
#define MAX_TRAFFIC_CLASS 8
#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
@ -832,6 +839,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
IXGBE_GCR_EXT_VT_MODE_64)
#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@ -852,6 +860,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */
#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */
#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
@ -1079,7 +1089,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
@ -1383,6 +1395,7 @@ enum {
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */
@ -1400,6 +1413,7 @@ enum {
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
@ -1418,6 +1432,7 @@ enum {
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
@ -1435,6 +1450,7 @@ enum {
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */
@ -1520,6 +1536,7 @@ enum {
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
@ -1574,8 +1591,18 @@ enum {
#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */
#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */
#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */
#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */
#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */
#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
/* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040
@ -1646,6 +1673,7 @@ enum {
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
#define IXGBE_MACC_FSV_10G 0x00030000
@ -1771,13 +1799,16 @@ enum {
#define IXGBE_FW_PTR 0x0F
#define IXGBE_PBANUM0_PTR 0x15
#define IXGBE_PBANUM1_PTR 0x16
#define IXGBE_ALT_MAC_ADDR_PTR 0x37
#define IXGBE_FREE_SPACE_PTR 0X3E
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
#define IXGBE_DEVICE_CAPS_EXT_THERMAL_SENSOR 0x10
#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
/* MSI-X capability fields masks */
#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
@ -1872,6 +1903,7 @@ enum {
#define IXGBE_PCI_LINK_SPEED 0xF
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@ -1934,6 +1966,10 @@ enum {
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
#define IXGBE_TSAUXC_EN_CLK 0x00000004
#define IXGBE_TSAUXC_SYNCLK 0x00000008
#define IXGBE_TSAUXC_SDP0_INT 0x00000040
#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */
@ -1977,6 +2013,7 @@ enum {
#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */
#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */
/* Multiple Receive Queue Control */
@ -2007,6 +2044,8 @@ enum {
#define IXGBE_QDE_ENABLE 0x00000001
#define IXGBE_QDE_IDX_MASK 0x00007F00
#define IXGBE_QDE_IDX_SHIFT 8
#define IXGBE_QDE_WRITE 0x00010000
#define IXGBE_QDE_READ 0x00020000
#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
@ -2197,9 +2236,9 @@ enum {
/* SR-IOV specific macros */
#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
/* Little Endian defines */
#ifndef __le16
@ -2297,6 +2336,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_DROP_QUEUE 127
#define IXGBE_STATUS_OVERHEATING_BIT 20 /* STATUS overtemp bit num */
/* Manageablility Host Interface defines */
#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
@ -2507,13 +2547,14 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
*/
/* BitTimes (BT) conversion */
#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
#define IXGBE_B2BT(BT) (BT * 8)
/* Calculate Delay to respond to PFC */
@ -2544,24 +2585,31 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PCI_DELAY 10000
/* Calculate X540 delay value in bit times */
#define IXGBE_FILL_RATE (36 / 25)
#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
(IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
((36 * \
(IXGBE_B2BT(_max_frame_link) + \
IXGBE_PFC_D + \
(2 * IXGBE_CABLE_DC) + \
(2 * IXGBE_ID_X540) + \
IXGBE_HD + IXGBE_B2BT(TC)))
IXGBE_HD) / 25 + 1) + \
2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate 82599, 82598 delay value in bit times */
#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
(IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
(2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
IXGBE_HD + IXGBE_B2BT(TC)))
#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
((36 * \
(IXGBE_B2BT(_max_frame_link) + \
IXGBE_PFC_D + \
(2 * IXGBE_CABLE_DC) + \
(2 * IXGBE_ID) + \
IXGBE_HD) / 25 + 1) + \
2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
(IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
#define IXGBE_LOW_DV_X540(_max_frame_tc) \
(2 * IXGBE_B2BT(_max_frame_tc) + \
(36 * IXGBE_PCI_DELAY / 25) + 1)
#define IXGBE_LOW_DV(_max_frame_tc) \
(2 * IXGBE_LOW_DV_X540(_max_frame_tc))
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@ -2705,6 +2753,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_1g_cu_core0 = 9,
ixgbe_sfp_type_1g_cu_core1 = 10,
ixgbe_sfp_type_1g_sx_core0 = 11,
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@ -2754,6 +2804,7 @@ enum ixgbe_bus_speed {
ixgbe_bus_speed_133 = 133,
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_8000 = 8000,
ixgbe_bus_speed_reserved
};
@ -2789,8 +2840,8 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
u32 low_water; /* Flow Control Low-water */
u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */
u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
@ -2920,6 +2971,8 @@ struct ixgbe_mac_operations {
s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
s32 (*setup_sfp)(struct ixgbe_hw *);
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*disable_sec_rx_path)(struct ixgbe_hw *);
s32 (*enable_sec_rx_path)(struct ixgbe_hw *);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
@ -2947,6 +3000,7 @@ struct ixgbe_mac_operations {
s32 (*clear_rar)(struct ixgbe_hw *, u32);
s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
@ -2957,12 +3011,13 @@ struct ixgbe_mac_operations {
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *);
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *, s32);
s32 (*fc_enable)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@ -3018,11 +3073,11 @@ struct ixgbe_mac_info {
u32 rx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
u32 max_msix_vectors;
bool msix_vectors_from_pcie;
u32 orig_autoc;
bool arc_subsystem_valid;
u8 san_mac_rar_index;
u32 orig_autoc2;
u16 max_msix_vectors;
bool arc_subsystem_valid;
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
@ -3092,7 +3147,9 @@ struct ixgbe_hw {
u16 subsystem_vendor_id;
u8 revision_id;
bool adapter_stopped;
int api_version;
bool force_full_reset;
bool allow_unsupported_sfp;
};
#define ixgbe_call_func(hw, func, params, error) \
@ -3129,7 +3186,6 @@ struct ixgbe_hw {
#define IXGBE_ERR_OVERTEMP -26
#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
#define IXGBE_ERR_FC_NOT_SUPPORTED -28
#define IXGBE_ERR_FLOW_CONTROL -29
#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
#define IXGBE_ERR_PBA_SECTION -31
#define IXGBE_ERR_INVALID_ARGUMENT -32

View File

@ -35,27 +35,7 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_api.h"
#include "ixgbe_type.h"
#include "ixgbe_vf.h"
s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete);
s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr,
bool clear);
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
#ident "$Id: ixgbe_vf.c,v 1.58 2012/08/09 20:24:53 cmwyborn Exp $"
#ifndef IXGBE_VFWRITE_REG
#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
@ -162,6 +142,9 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
DEBUGOUT("Issuing a function level reset to MAC\n");
ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
@ -292,6 +275,17 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
return vector;
}
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 retmsg[IXGBE_VFMAILBOX_SIZE];
s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
if (!retval)
mbx->ops.read_posted(hw, retmsg, size, 0);
}
/**
* ixgbe_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
@ -307,7 +301,6 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
memset(msgbuf, 0, 12);
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
@ -347,7 +340,6 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 cnt, i;
u32 vmdq;
UNREFERENCED_1PARAMETER(clear);
DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
@ -380,20 +372,27 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if TRUE then set bit, else clear bit
* @vlan_on: if true then set bit, else clear bit
**/
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
UNREFERENCED_1PARAMETER(vind);
s32 ret_val;
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
return(mbx->ops.write_posted(hw, msgbuf, 2, 0));
ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0);
if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
return IXGBE_SUCCESS;
return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
}
/**
@ -404,7 +403,6 @@ s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
**/
u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
{
UNREFERENCED_1PARAMETER(hw);
return IXGBE_VF_MAX_TX_QUEUES;
}
@ -416,7 +414,6 @@ u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
**/
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
{
UNREFERENCED_1PARAMETER(hw);
return IXGBE_VF_MAX_RX_QUEUES;
}
@ -479,7 +476,6 @@ s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete);
return IXGBE_SUCCESS;
}
@ -496,7 +492,6 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete)
{
u32 links_reg;
UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
*link_up = false;
@ -507,16 +502,128 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
if (links_reg & IXGBE_LINKS_UP)
*link_up = TRUE;
*link_up = true;
else
*link_up = FALSE;
*link_up = false;
if ((links_reg & IXGBE_LINKS_SPEED_10G_82599) ==
IXGBE_LINKS_SPEED_10G_82599)
switch (links_reg & IXGBE_LINKS_SPEED_10G_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
}
return IXGBE_SUCCESS;
}
/**
* ixgbevf_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
}
/**
* ixgbevf_negotiate_api_version - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
**/
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
{
int err;
u32 msg[3];
/* Negotiate the mailbox API version */
msg[0] = IXGBE_VF_API_NEGOTIATE;
msg[1] = api;
msg[2] = 0;
err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
if (!err)
err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
hw->api_version = api;
return 0;
}
err = IXGBE_ERR_INVALID_ARGUMENT;
}
return err;
}
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
int err;
u32 msg[5];
/* do nothing if API doesn't support ixgbevf_get_queues */
switch (hw->api_version) {
case ixgbe_mbox_api_11:
break;
default:
return 0;
}
/* Fetch queue configuration from the PF */
msg[0] = IXGBE_VF_GET_QUEUES;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
if (!err)
err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/*
* if we we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_MBX;
/* record and validate values from message */
hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
if (hw->mac.max_tx_queues == 0 ||
hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
if (hw->mac.max_rx_queues == 0 ||
hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
/* in case of unknown state assume we cannot tag frames */
if (*num_tcs > hw->mac.max_rx_queues)
*num_tcs = 1;
*default_tc = msg[IXGBE_VF_DEF_QUEUE];
/* default to queue 0 on out-of-bounds queue number */
if (*default_tc >= hw->mac.max_tx_queues)
*default_tc = 0;
}
return err;
}

View File

@ -33,11 +33,15 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
#ident "$Id: ixgbe_vf.h,v 1.34 2012/08/09 17:04:18 cmwyborn Exp $"
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
/* DCB define */
#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
#define IXGBE_VFCTRL 0x00000
#define IXGBE_VFSTATUS 0x00008
#define IXGBE_VFLINKS 0x00010
@ -49,30 +53,30 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
#define IXGBE_VTIVAR_MISC 0x00140
#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
/* define IXGBE_VFPBACL still says TBD in EAS */
#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
#define IXGBE_VFPSRTYPE 0x00300
#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
@ -108,5 +112,27 @@ struct ixgbevf_hw_stats {
u64 saved_reset_vfmprc;
};
#endif /* __IXGBE_VF_H__ */
s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete);
s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr,
bool clear);
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
#endif /* __IXGBE_VF_H__ */

View File

@ -37,32 +37,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg, bool link_up_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data);
s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data);
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
STATIC s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
@ -72,7 +46,7 @@ STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
* ixgbe_init_ops_X540 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
* Initialize the function pointers and assign the MAC type for 82599.
* Initialize the function pointers and assign the MAC type for X540.
* Does not touch the hardware.
**/
s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
@ -118,13 +92,17 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
@ -191,7 +169,6 @@ s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
**/
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
{
UNREFERENCED_1PARAMETER(hw);
return ixgbe_media_type_copper;
}
@ -285,6 +262,9 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@ -935,18 +915,22 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
ixgbe_link_speed speed;
bool link_up;
DEBUGFUNC("ixgbe_blink_led_start_X540");
/*
* In order for the blink bit in the LED control register
* to work, link and speed must be forced in the MAC. We
* will reverse this when we stop the blinking.
* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
* This will be reversed when we stop the blinking.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (link_up == false) {
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
}
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);

View File

@ -35,6 +35,30 @@ POSSIBILITY OF SUCH DAMAGE.
#define _IXGBE_X540_H_
#include "ixgbe_type.h"
#ident "$Id: ixgbe_x540.h,v 1.6 2012/08/09 20:43:58 cmwyborn Exp $"
s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool link_up_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
u16 *data);
s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
u16 *data);
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);

File diff suppressed because it is too large Load Diff

View File

@ -1,430 +0,0 @@
/******************************************************************************
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXV_H_
#define _IXV_H_
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet/udp.h>
#include <machine/in_cksum.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/clock.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/taskqueue.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
#include <machine/smp.h>
#include "ixgbe_api.h"
#include "ixgbe_vf.h"
/* Tunables */
/*
* TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
* number of transmit descriptors allocated by the driver. Increasing this
* value allows the driver to queue more transmits. Each descriptor is 16
* bytes. Performance tests have show the 2K value to be optimal for top
* performance.
*/
#define DEFAULT_TXD 1024
#define PERFORM_TXD 2048
#define MAX_TXD 4096
#define MIN_TXD 64
/*
* RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
* number of receive descriptors allocated for each RX queue. Increasing this
* value allows the driver to buffer more incoming packets. Each descriptor
* is 16 bytes. A receive buffer is also allocated for each descriptor.
*
* Note: with 8 rings and a dual port card, it is possible to bump up
* against the system mbuf pool limit, you can tune nmbclusters
* to adjust for this.
*/
#define DEFAULT_RXD 1024
#define PERFORM_RXD 2048
#define MAX_RXD 4096
#define MIN_RXD 64
/* Alignment for rings */
#define DBA_ALIGN 128
/*
* This parameter controls the maximum no of times the driver will loop in
* the isr. Minimum Value = 1
*/
#define MAX_LOOP 10
/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*/
#define IXV_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
* transmit descriptors.
*/
#define IXV_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
#define IXV_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
#define IXV_MAX_FRAME_SIZE 0x3F00
/* Flow control constants */
#define IXV_FC_PAUSE 0xFFFF
#define IXV_FC_HI 0x20000
#define IXV_FC_LO 0x10000
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define IXV_EITR_DEFAULT 128
#define IXV_SCATTER 32
#define IXV_RX_HDR 128
#define MSIX_BAR 3
#define IXV_TSO_SIZE 65535
#define IXV_BR_SIZE 4096
#define IXV_LINK_ITR 2000
#define TX_BUFFER_SIZE ((u32) 1514)
#define VFTA_SIZE 128
/* Offload bits in mbuf flag */
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
/*
*****************************************************************************
* vendor_info_array
*
* This array contains the list of Subvendor/Subdevice IDs on which the driver
* should load.
*
*****************************************************************************
*/
typedef struct _ixv_vendor_info_t {
unsigned int vendor_id;
unsigned int device_id;
unsigned int subvendor_id;
unsigned int subdevice_id;
unsigned int index;
} ixv_vendor_info_t;
struct ixv_tx_buf {
u32 eop_index;
struct mbuf *m_head;
bus_dmamap_t map;
};
struct ixv_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
struct mbuf *fmp;
bus_dmamap_t hmap;
bus_dmamap_t pmap;
};
/*
* Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free.
*/
struct ixv_dma_alloc {
bus_addr_t dma_paddr;
caddr_t dma_vaddr;
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
bus_dma_segment_t dma_seg;
bus_size_t dma_size;
int dma_nseg;
};
/*
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
*/
struct ix_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
u32 eitr; /* cached reg */
struct resource *res;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* The transmit ring, one per queue
*/
struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
bool watchdog_check;
int watchdog_time;
union ixgbe_adv_tx_desc *tx_base;
struct ixv_dma_alloc txdma;
u32 next_avail_desc;
u32 next_to_clean;
struct ixv_tx_buf *tx_buffers;
volatile u16 tx_avail;
u32 txd_cmd;
bus_dma_tag_t txtag;
char mtx_name[16];
struct buf_ring *br;
/* Soft Stats */
u32 bytes;
u32 packets;
u64 no_desc_avail;
u64 total_packets;
};
/*
* The Receive ring, one per rx queue
*/
struct rx_ring {
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
union ixgbe_adv_rx_desc *rx_base;
struct ixv_dma_alloc rxdma;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
bool discard;
u32 next_to_refresh;
u32 next_to_check;
char mtx_name[16];
struct ixv_rx_buf *rx_buffers;
bus_dma_tag_t htag;
bus_dma_tag_t ptag;
u32 bytes; /* Used for AIM calc */
u32 packets;
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
u64 rx_packets;
u64 rx_bytes;
u64 rx_discarded;
};
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
struct device *dev;
struct resource *pci_mem;
struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSIX
*/
void *tag;
struct resource *res;
struct ifmedia media;
struct callout timer;
int msix;
int if_flags;
struct mtx core_mtx;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u16 num_vlans;
u16 num_queues;
/* Info about the board itself */
bool link_active;
u16 max_frame_size;
u32 link_speed;
bool link_up;
u32 mbxvec;
/* Mbuf cluster size */
u32 rx_mbuf_sz;
/* Support for pluggable optics */
struct task mbx_task; /* Mailbox tasklet */
struct taskqueue *tq;
/*
** Queues:
** This is the irq holder, it has
** and RX/TX pair or rings associated
** with it.
*/
struct ix_queue *queues;
/*
* Transmit rings:
* Allocated at run time, an array of rings.
*/
struct tx_ring *tx_rings;
int num_tx_desc;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
int num_rx_desc;
u64 que_mask;
u32 rx_process_limit;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
unsigned long mbx_irq;
struct ixgbevf_hw_stats stats;
};
#define IXV_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF)
#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
/* Workaround to make 8.0 buildable */
#if __FreeBSD_version < 800504
static __inline int
drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (1);
#endif
return (!buf_ring_empty(br));
}
#endif
/*
** Find the number of unrefreshed RX descriptors
*/
static inline u16
ixv_rx_unrefreshed(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
if (rxr->next_to_check > rxr->next_to_refresh)
return (rxr->next_to_check - rxr->next_to_refresh - 1);
else
return ((adapter->num_rx_desc + rxr->next_to_check) -
rxr->next_to_refresh - 1);
}
#endif /* _IXV_H_ */