kni: remove ethtool support
Current design requires kernel drivers and they need to be probed by Linux up to some level so that they can be usable by DPDK for ethtool support, this requires maintaining the Linux drivers in DPDK. Also ethtool support is limited and hard, if not impossible, to expand to other PMDs. Since KNI ethtool support is not used commonly, if not used at all, removing the support for the sake of simplicity and maintenance. Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> Acked-by: Stephen Hemminger <stephen@networkplumber.org> Acked-by: Neil Horman <nhorman@tuxdriver.com> Acked-by: Thomas Monjalon <thomas@monjalon.net>
This commit is contained in:
parent
221a07fae7
commit
ea6b39b5b8
@ -942,7 +942,6 @@ CONFIG_RTE_PIPELINE_STATS_COLLECT=n
|
||||
CONFIG_RTE_LIBRTE_KNI=n
|
||||
CONFIG_RTE_LIBRTE_PMD_KNI=n
|
||||
CONFIG_RTE_KNI_KMOD=n
|
||||
CONFIG_RTE_KNI_KMOD_ETHTOOL=n
|
||||
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
|
||||
|
||||
#
|
||||
|
@ -62,15 +62,6 @@ Deprecation Notices
|
||||
Target release for removal of the legacy API will be defined once most
|
||||
PMDs have switched to rte_flow.
|
||||
|
||||
* kni: remove KNI ethtool support. To clarify, this is not to remove the KNI,
|
||||
but only to remove ethtool support of it that is disabled by default and
|
||||
can be enabled via ``CONFIG_RTE_KNI_KMOD_ETHTOOL`` config option.
|
||||
Existing KNI ethtool implementation is only supported by ``igb`` & ``ixgbe``
|
||||
drivers, by using a copy of kernel drivers in DPDK. This model cannot be
|
||||
extended to all drivers in DPDK and it is too much effort to maintain
|
||||
kernel modules in DPDK. As a result users won't be able to use ``ethtool``
|
||||
via ``igb`` & ``ixgbe`` anymore.
|
||||
|
||||
* cryptodev: New member in ``rte_cryptodev_config`` to allow applications to
|
||||
disable features supported by the crypto device. Only the following features
|
||||
would be allowed to be disabled this way,
|
||||
|
@ -67,6 +67,8 @@ Removed Items
|
||||
Also, make sure to start the actual text at the margin.
|
||||
=========================================================
|
||||
|
||||
* Removed KNI ethtool, CONFIG_RTE_KNI_KMOD_ETHTOOL, support.
|
||||
|
||||
|
||||
API Changes
|
||||
-----------
|
||||
|
@ -262,16 +262,6 @@ Change the MTU size:
|
||||
|
||||
# ifconfig vEth0_0 mtu 1450
|
||||
|
||||
If DPDK is compiled with ``CONFIG_RTE_KNI_KMOD_ETHTOOL=y`` and an Intel
|
||||
NIC is used, the user can use ``ethtool`` on the KNI interface as if it
|
||||
were a normal Linux kernel interface.
|
||||
|
||||
Displaying the NIC registers:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# ethtool -d vEth0_0
|
||||
|
||||
When the ``kni`` application is closed, all the KNI interfaces are deleted
|
||||
from the Linux kernel.
|
||||
|
||||
|
@ -897,19 +897,10 @@ kni_alloc(uint16_t port_id)
|
||||
if (i == 0) {
|
||||
struct rte_kni_ops ops;
|
||||
struct rte_eth_dev_info dev_info;
|
||||
const struct rte_pci_device *pci_dev;
|
||||
const struct rte_bus *bus = NULL;
|
||||
|
||||
memset(&dev_info, 0, sizeof(dev_info));
|
||||
rte_eth_dev_info_get(port_id, &dev_info);
|
||||
|
||||
if (dev_info.device)
|
||||
bus = rte_bus_find_by_device(dev_info.device);
|
||||
if (bus && !strcmp(bus->name, "pci")) {
|
||||
pci_dev = RTE_DEV_TO_PCI(dev_info.device);
|
||||
conf.addr = pci_dev->addr;
|
||||
conf.id = pci_dev->id;
|
||||
}
|
||||
/* Get the interface default mac address */
|
||||
rte_eth_macaddr_get(port_id,
|
||||
(struct rte_ether_addr *)&conf.mac_addr);
|
||||
|
@ -3,6 +3,4 @@
|
||||
|
||||
ccflags-y := $(MODULE_CFLAGS)
|
||||
obj-m := rte_kni.o
|
||||
rte_kni-y := $(patsubst $(src)/%.c,%.o,$(wildcard $(src)/*.c)) \
|
||||
$(patsubst $(src)/%.c,%.o,$(wildcard $(src)/ethtool/ixgbe/*.c)) \
|
||||
$(patsubst $(src)/%.c,%.o,$(wildcard $(src)/ethtool/igb/*.c))
|
||||
rte_kni-y := $(patsubst $(src)/%.c,%.o,$(wildcard $(src)/*.c))
|
||||
|
@ -12,7 +12,7 @@ MODULE = rte_kni
|
||||
# CFLAGS
|
||||
#
|
||||
MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50
|
||||
MODULE_CFLAGS += -I$(RTE_OUTPUT)/include -I$(SRCDIR)/ethtool/ixgbe -I$(SRCDIR)/ethtool/igb
|
||||
MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
|
||||
MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
|
||||
MODULE_CFLAGS += -Wall -Werror
|
||||
|
||||
@ -30,29 +30,5 @@ endif
|
||||
#
|
||||
SRCS-y := kni_misc.c
|
||||
SRCS-y += kni_net.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += kni_ethtool.c
|
||||
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_main.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_api.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_common.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_ethtool.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_82599.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_82598.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_x540.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/ixgbe_phy.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/ixgbe/kcompat.c
|
||||
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_82575.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_i210.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_api.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_mac.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_manage.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_mbx.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_nvm.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/e1000_phy.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_ethtool.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_main.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_param.c
|
||||
SRCS-$(CONFIG_RTE_KNI_KMOD_ETHTOOL) += ethtool/igb/igb_vmdq.c
|
||||
|
||||
include $(RTE_SDK)/mk/rte.module.mk
|
||||
|
@ -1,71 +0,0 @@
|
||||
.. SPDX-License-Identifier: BSD-3-Clause
|
||||
Copyright(c) 2010-2014 Intel Corporation.
|
||||
|
||||
Description
|
||||
|
||||
In order to support ethtool in Kernel NIC Interface, the standard Linux kernel
|
||||
drivers of ixgbe/igb are needed to be reused here. ixgbe-3.9.17 is the version
|
||||
modified from in kernel NIC interface kernel module to support ixgbe NIC, and
|
||||
igb-3.4.8 is the version modified from in kernel NIC interface kernel module to
|
||||
support igb NIC.
|
||||
|
||||
The source code package of ixgbe can be downloaded from sourceforge.net as below.
|
||||
http://sourceforge.net/projects/e1000/files/ixgbe%20stable/
|
||||
Below source files are copied or modified from ixgbe.
|
||||
|
||||
ixgbe_82598.h
|
||||
ixgbe_82599.c
|
||||
ixgbe_82599.h
|
||||
ixgbe_api.c
|
||||
ixgbe_api.h
|
||||
ixgbe_common.c
|
||||
ixgbe_common.h
|
||||
ixgbe_dcb.h
|
||||
ixgbe_ethtool.c
|
||||
ixgbe_fcoe.h
|
||||
ixgbe.h
|
||||
ixgbe_main.c
|
||||
ixgbe_mbx.h
|
||||
ixgbe_osdep.h
|
||||
ixgbe_phy.c
|
||||
ixgbe_phy.h
|
||||
ixgbe_sriov.h
|
||||
ixgbe_type.h
|
||||
kcompat.c
|
||||
kcompat.h
|
||||
|
||||
The source code package of igb can be downloaded from sourceforge.net as below.
|
||||
http://sourceforge.net/projects/e1000/files/igb%20stable/
|
||||
Below source files are copied or modified from igb.
|
||||
|
||||
e1000_82575.c
|
||||
e1000_82575.h
|
||||
e1000_api.c
|
||||
e1000_api.h
|
||||
e1000_defines.h
|
||||
e1000_hw.h
|
||||
e1000_mac.c
|
||||
e1000_mac.h
|
||||
e1000_manage.c
|
||||
e1000_manage.h
|
||||
e1000_mbx.c
|
||||
e1000_mbx.h
|
||||
e1000_nvm.c
|
||||
e1000_nvm.h
|
||||
e1000_osdep.h
|
||||
e1000_phy.c
|
||||
e1000_phy.h
|
||||
e1000_regs.h
|
||||
igb_ethtool.c
|
||||
igb.h
|
||||
igb_main.c
|
||||
igb_param.c
|
||||
igb_procfs.c
|
||||
igb_regtest.h
|
||||
igb_sysfs.c
|
||||
igb_vmdq.c
|
||||
igb_vmdq.h
|
||||
kcompat.c
|
||||
kcompat_ethtool.c
|
||||
kcompat.h
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,494 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_82575_H_
|
||||
#define _E1000_82575_H_
|
||||
|
||||
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
|
||||
(ID_LED_DEF1_DEF2 << 8) | \
|
||||
(ID_LED_DEF1_DEF2 << 4) | \
|
||||
(ID_LED_OFF1_ON2))
|
||||
/*
|
||||
* Receive Address Register Count
|
||||
* Number of high/low register pairs in the RAR. The RAR (Receive Address
|
||||
* Registers) holds the directed and multicast addresses that we monitor.
|
||||
* These entries are also used for MAC-based filtering.
|
||||
*/
|
||||
/*
|
||||
* For 82576, there are an additional set of RARs that begin at an offset
|
||||
* separate from the first set of RARs.
|
||||
*/
|
||||
#define E1000_RAR_ENTRIES_82575 16
|
||||
#define E1000_RAR_ENTRIES_82576 24
|
||||
#define E1000_RAR_ENTRIES_82580 24
|
||||
#define E1000_RAR_ENTRIES_I350 32
|
||||
#define E1000_SW_SYNCH_MB 0x00000100
|
||||
#define E1000_STAT_DEV_RST_SET 0x00100000
|
||||
#define E1000_CTRL_DEV_RST 0x20000000
|
||||
|
||||
struct e1000_adv_data_desc {
|
||||
__le64 buffer_addr; /* Address of the descriptor's data buffer */
|
||||
union {
|
||||
u32 data;
|
||||
struct {
|
||||
u32 datalen:16; /* Data buffer length */
|
||||
u32 rsvd:4;
|
||||
u32 dtyp:4; /* Descriptor type */
|
||||
u32 dcmd:8; /* Descriptor command */
|
||||
} config;
|
||||
} lower;
|
||||
union {
|
||||
u32 data;
|
||||
struct {
|
||||
u32 status:4; /* Descriptor status */
|
||||
u32 idx:4;
|
||||
u32 popts:6; /* Packet Options */
|
||||
u32 paylen:18; /* Payload length */
|
||||
} options;
|
||||
} upper;
|
||||
};
|
||||
|
||||
#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */
|
||||
#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */
|
||||
#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */
|
||||
#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */
|
||||
#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */
|
||||
#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */
|
||||
#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */
|
||||
#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */
|
||||
#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */
|
||||
#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */
|
||||
#define E1000_ADV_DCMD_RS 0x8 /* Report Status */
|
||||
#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */
|
||||
#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */
|
||||
/* Extended Device Control */
|
||||
#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */
|
||||
|
||||
struct e1000_adv_context_desc {
|
||||
union {
|
||||
u32 ip_config;
|
||||
struct {
|
||||
u32 iplen:9;
|
||||
u32 maclen:7;
|
||||
u32 vlan_tag:16;
|
||||
} fields;
|
||||
} ip_setup;
|
||||
u32 seq_num;
|
||||
union {
|
||||
u64 l4_config;
|
||||
struct {
|
||||
u32 mkrloc:9;
|
||||
u32 tucmd:11;
|
||||
u32 dtyp:4;
|
||||
u32 adv:8;
|
||||
u32 rsvd:4;
|
||||
u32 idx:4;
|
||||
u32 l4len:8;
|
||||
u32 mss:16;
|
||||
} fields;
|
||||
} l4_setup;
|
||||
};
|
||||
|
||||
/* SRRCTL bit definitions */
|
||||
#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
|
||||
#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00
|
||||
#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
|
||||
#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000
|
||||
#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
|
||||
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
|
||||
#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
|
||||
#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
|
||||
#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
|
||||
#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
|
||||
#define E1000_SRRCTL_TIMESTAMP 0x40000000
|
||||
#define E1000_SRRCTL_DROP_EN 0x80000000
|
||||
|
||||
#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
|
||||
#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
|
||||
|
||||
#define E1000_TX_HEAD_WB_ENABLE 0x1
|
||||
#define E1000_TX_SEQNUM_WB_ENABLE 0x2
|
||||
|
||||
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
|
||||
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
|
||||
#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
|
||||
#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
|
||||
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
|
||||
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
|
||||
#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002
|
||||
|
||||
#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8
|
||||
#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \
|
||||
E1000_VMRCTL_MIRROR_PORT_SHIFT)
|
||||
#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0)
|
||||
#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1)
|
||||
#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
|
||||
|
||||
#define E1000_EICR_TX_QUEUE ( \
|
||||
E1000_EICR_TX_QUEUE0 | \
|
||||
E1000_EICR_TX_QUEUE1 | \
|
||||
E1000_EICR_TX_QUEUE2 | \
|
||||
E1000_EICR_TX_QUEUE3)
|
||||
|
||||
#define E1000_EICR_RX_QUEUE ( \
|
||||
E1000_EICR_RX_QUEUE0 | \
|
||||
E1000_EICR_RX_QUEUE1 | \
|
||||
E1000_EICR_RX_QUEUE2 | \
|
||||
E1000_EICR_RX_QUEUE3)
|
||||
|
||||
#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
|
||||
#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
|
||||
|
||||
#define EIMS_ENABLE_MASK ( \
|
||||
E1000_EIMS_RX_QUEUE | \
|
||||
E1000_EIMS_TX_QUEUE | \
|
||||
E1000_EIMS_TCP_TIMER | \
|
||||
E1000_EIMS_OTHER)
|
||||
|
||||
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
|
||||
#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
|
||||
#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
|
||||
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
|
||||
#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */
|
||||
#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */
|
||||
#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */
|
||||
#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */
|
||||
#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */
|
||||
#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */
|
||||
#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
|
||||
|
||||
/* Receive Descriptor - Advanced */
|
||||
union e1000_adv_rx_desc {
|
||||
struct {
|
||||
__le64 pkt_addr; /* Packet buffer address */
|
||||
__le64 hdr_addr; /* Header buffer address */
|
||||
} read;
|
||||
struct {
|
||||
struct {
|
||||
union {
|
||||
__le32 data;
|
||||
struct {
|
||||
__le16 pkt_info; /*RSS type, Pkt type*/
|
||||
/* Split Header, header buffer len */
|
||||
__le16 hdr_info;
|
||||
} hs_rss;
|
||||
} lo_dword;
|
||||
union {
|
||||
__le32 rss; /* RSS Hash */
|
||||
struct {
|
||||
__le16 ip_id; /* IP id */
|
||||
__le16 csum; /* Packet Checksum */
|
||||
} csum_ip;
|
||||
} hi_dword;
|
||||
} lower;
|
||||
struct {
|
||||
__le32 status_error; /* ext status/error */
|
||||
__le16 length; /* Packet length */
|
||||
__le16 vlan; /* VLAN tag */
|
||||
} upper;
|
||||
} wb; /* writeback */
|
||||
};
|
||||
|
||||
#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F
|
||||
#define E1000_RXDADV_RSSTYPE_SHIFT 12
|
||||
#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
|
||||
#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
|
||||
#define E1000_RXDADV_SPLITHEADER_EN 0x00001000
|
||||
#define E1000_RXDADV_SPH 0x8000
|
||||
#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
|
||||
#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
|
||||
#define E1000_RXDADV_ERR_HBO 0x00800000
|
||||
|
||||
/* RSS Hash results */
|
||||
#define E1000_RXDADV_RSSTYPE_NONE 0x00000000
|
||||
#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001
|
||||
#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002
|
||||
#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003
|
||||
#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004
|
||||
#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005
|
||||
#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
|
||||
#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007
|
||||
#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008
|
||||
#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
|
||||
|
||||
/* RSS Packet Types as indicated in the receive descriptor */
|
||||
#define E1000_RXDADV_PKTTYPE_NONE 0x00000000
|
||||
#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */
|
||||
#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */
|
||||
#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */
|
||||
#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */
|
||||
#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */
|
||||
#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
|
||||
#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
|
||||
#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
|
||||
|
||||
#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
|
||||
#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */
|
||||
#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */
|
||||
#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */
|
||||
#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */
|
||||
#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */
|
||||
|
||||
/* LinkSec results */
|
||||
/* Security Processing bit Indication */
|
||||
#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000
|
||||
#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000
|
||||
#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000
|
||||
#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000
|
||||
#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000
|
||||
|
||||
#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000
|
||||
#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000
|
||||
#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000
|
||||
#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000
|
||||
#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000
|
||||
|
||||
/* Transmit Descriptor - Advanced */
|
||||
union e1000_adv_tx_desc {
|
||||
struct {
|
||||
__le64 buffer_addr; /* Address of descriptor's data buf */
|
||||
__le32 cmd_type_len;
|
||||
__le32 olinfo_status;
|
||||
} read;
|
||||
struct {
|
||||
__le64 rsvd; /* Reserved */
|
||||
__le32 nxtseq_seed;
|
||||
__le32 status;
|
||||
} wb;
|
||||
};
|
||||
|
||||
/* Adv Transmit Descriptor Config Masks */
|
||||
#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */
|
||||
#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
|
||||
#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */
|
||||
#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
|
||||
#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */
|
||||
#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
|
||||
#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */
|
||||
#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */
|
||||
#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
|
||||
#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */
|
||||
#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */
|
||||
#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */
|
||||
#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
|
||||
#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
|
||||
#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
|
||||
#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
|
||||
/* 1st & Last TSO-full iSCSI PDU*/
|
||||
#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800
|
||||
#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */
|
||||
#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
|
||||
|
||||
/* Context descriptors */
|
||||
struct e1000_adv_tx_context_desc {
|
||||
__le32 vlan_macip_lens;
|
||||
__le32 seqnum_seed;
|
||||
__le32 type_tucmd_mlhl;
|
||||
__le32 mss_l4len_idx;
|
||||
};
|
||||
|
||||
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
|
||||
#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
|
||||
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
|
||||
#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
|
||||
#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
|
||||
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
|
||||
#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
|
||||
#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
|
||||
/* IPSec Encrypt Enable for ESP */
|
||||
#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
|
||||
/* Req requires Markers and CRC */
|
||||
#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000
|
||||
#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
|
||||
#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
|
||||
/* Adv ctxt IPSec SA IDX mask */
|
||||
#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF
|
||||
/* Adv ctxt IPSec ESP len mask */
|
||||
#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF
|
||||
|
||||
/* Additional Transmit Descriptor Control definitions */
|
||||
#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
|
||||
#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */
|
||||
/* Tx Queue Arbitration Priority 0=low, 1=high */
|
||||
#define E1000_TXDCTL_PRIORITY 0x08000000
|
||||
|
||||
/* Additional Receive Descriptor Control definitions */
|
||||
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
|
||||
#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */
|
||||
|
||||
/* Direct Cache Access (DCA) definitions */
|
||||
#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */
|
||||
#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
|
||||
|
||||
#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
|
||||
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
|
||||
|
||||
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
|
||||
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
|
||||
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */
|
||||
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */
|
||||
#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */
|
||||
|
||||
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
|
||||
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
|
||||
#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
|
||||
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
|
||||
#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
|
||||
|
||||
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
|
||||
#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
|
||||
#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
|
||||
#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
|
||||
|
||||
/* Additional interrupt register bit definitions */
|
||||
#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */
|
||||
#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
|
||||
#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */
|
||||
|
||||
/* ETQF register bit definitions */
|
||||
#define E1000_ETQF_FILTER_ENABLE (1 << 26)
|
||||
#define E1000_ETQF_IMM_INT (1 << 29)
|
||||
#define E1000_ETQF_1588 (1 << 30)
|
||||
#define E1000_ETQF_QUEUE_ENABLE (1 << 31)
|
||||
/*
|
||||
* ETQF filter list: one static filter per filter consumer. This is
|
||||
* to avoid filter collisions later. Add new filters
|
||||
* here!!
|
||||
*
|
||||
* Current filters:
|
||||
* EAPOL 802.1x (0x888e): Filter 0
|
||||
*/
|
||||
#define E1000_ETQF_FILTER_EAPOL 0
|
||||
|
||||
#define E1000_FTQF_VF_BP 0x00008000
|
||||
#define E1000_FTQF_1588_TIME_STAMP 0x08000000
|
||||
#define E1000_FTQF_MASK 0xF0000000
|
||||
#define E1000_FTQF_MASK_PROTO_BP 0x10000000
|
||||
#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
|
||||
#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000
|
||||
#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
|
||||
|
||||
#define E1000_NVM_APME_82575 0x0400
|
||||
#define MAX_NUM_VFS 7
|
||||
|
||||
#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */
|
||||
#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */
|
||||
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
|
||||
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
|
||||
#define E1000_DTXSWC_LLE_SHIFT 16
|
||||
#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
|
||||
|
||||
/* Easy defines for setting default pool, would normally be left a zero */
|
||||
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
|
||||
#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
|
||||
|
||||
/* Other useful VMD_CTL register defines */
|
||||
#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
|
||||
#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
|
||||
#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
|
||||
|
||||
/* Per VM Offload register setup */
|
||||
#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
|
||||
#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
|
||||
#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
|
||||
#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
|
||||
#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
|
||||
#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
|
||||
#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
|
||||
#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
|
||||
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
|
||||
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
|
||||
|
||||
#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */
|
||||
#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */
|
||||
#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */
|
||||
#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
|
||||
#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
|
||||
|
||||
#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */
|
||||
#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */
|
||||
|
||||
#define E1000_VLVF_ARRAY_SIZE 32
|
||||
#define E1000_VLVF_VLANID_MASK 0x00000FFF
|
||||
#define E1000_VLVF_POOLSEL_SHIFT 12
|
||||
#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
|
||||
#define E1000_VLVF_LVLAN 0x00100000
|
||||
#define E1000_VLVF_VLANID_ENABLE 0x80000000
|
||||
|
||||
#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
|
||||
#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
|
||||
|
||||
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
|
||||
|
||||
#define E1000_IOVCTL 0x05BBC
|
||||
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
|
||||
|
||||
#define E1000_RPLOLR_STRVLAN 0x40000000
|
||||
#define E1000_RPLOLR_STRCRC 0x80000000
|
||||
|
||||
#define E1000_TCTL_EXT_COLD 0x000FFC00
|
||||
#define E1000_TCTL_EXT_COLD_SHIFT 10
|
||||
|
||||
#define E1000_DTXCTL_8023LL 0x0004
|
||||
#define E1000_DTXCTL_VLAN_ADDED 0x0008
|
||||
#define E1000_DTXCTL_OOS_ENABLE 0x0010
|
||||
#define E1000_DTXCTL_MDP_EN 0x0020
|
||||
#define E1000_DTXCTL_SPOOF_INT 0x0040
|
||||
|
||||
#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
|
||||
|
||||
#define ALL_QUEUES 0xFFFF
|
||||
|
||||
/* Rx packet buffer size defines */
|
||||
#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
|
||||
void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
|
||||
void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
|
||||
void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
|
||||
s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
|
||||
|
||||
u16 e1000_rxpbs_adjust_82580(u32 data);
|
||||
s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
|
||||
s32 e1000_set_eee_i350(struct e1000_hw *);
|
||||
s32 e1000_set_eee_i354(struct e1000_hw *);
|
||||
s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *);
|
||||
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
|
||||
#define E1000_EMC_INTERNAL_DATA 0x00
|
||||
#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
|
||||
#define E1000_EMC_DIODE1_DATA 0x01
|
||||
#define E1000_EMC_DIODE1_THERM_LIMIT 0x19
|
||||
#define E1000_EMC_DIODE2_DATA 0x23
|
||||
#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
|
||||
#define E1000_EMC_DIODE3_DATA 0x2A
|
||||
#define E1000_EMC_DIODE3_THERM_LIMIT 0x30
|
||||
|
||||
s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw);
|
||||
s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw);
|
||||
|
||||
/* I2C SDA and SCL timing parameters for standard mode */
|
||||
#define E1000_I2C_T_HD_STA 4
|
||||
#define E1000_I2C_T_LOW 5
|
||||
#define E1000_I2C_T_HIGH 4
|
||||
#define E1000_I2C_T_SU_STA 5
|
||||
#define E1000_I2C_T_HD_DATA 5
|
||||
#define E1000_I2C_T_SU_DATA 1
|
||||
#define E1000_I2C_T_RISE 1
|
||||
#define E1000_I2C_T_FALL 1
|
||||
#define E1000_I2C_T_SU_STO 4
|
||||
#define E1000_I2C_T_BUF 5
|
||||
|
||||
s32 e1000_set_i2c_bb(struct e1000_hw *hw);
|
||||
s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 *data);
|
||||
s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 data);
|
||||
void e1000_i2c_bus_clear(struct e1000_hw *hw);
|
||||
#endif /* _E1000_82575_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,142 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_API_H_
|
||||
#define _E1000_API_H_
|
||||
|
||||
#include "e1000_hw.h"
|
||||
|
||||
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
|
||||
extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
|
||||
extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
|
||||
extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
|
||||
extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
|
||||
extern void e1000_init_function_pointers_i210(struct e1000_hw *hw);
|
||||
|
||||
s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr);
|
||||
s32 e1000_set_mac_type(struct e1000_hw *hw);
|
||||
s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
|
||||
s32 e1000_init_mac_params(struct e1000_hw *hw);
|
||||
s32 e1000_init_nvm_params(struct e1000_hw *hw);
|
||||
s32 e1000_init_phy_params(struct e1000_hw *hw);
|
||||
s32 e1000_init_mbx_params(struct e1000_hw *hw);
|
||||
s32 e1000_get_bus_info(struct e1000_hw *hw);
|
||||
void e1000_clear_vfta(struct e1000_hw *hw);
|
||||
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
|
||||
s32 e1000_force_mac_fc(struct e1000_hw *hw);
|
||||
s32 e1000_check_for_link(struct e1000_hw *hw);
|
||||
s32 e1000_reset_hw(struct e1000_hw *hw);
|
||||
s32 e1000_init_hw(struct e1000_hw *hw);
|
||||
s32 e1000_setup_link(struct e1000_hw *hw);
|
||||
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
|
||||
s32 e1000_disable_pcie_master(struct e1000_hw *hw);
|
||||
void e1000_config_collision_dist(struct e1000_hw *hw);
|
||||
void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
|
||||
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
|
||||
void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
|
||||
u32 mc_addr_count);
|
||||
s32 e1000_setup_led(struct e1000_hw *hw);
|
||||
s32 e1000_cleanup_led(struct e1000_hw *hw);
|
||||
s32 e1000_check_reset_block(struct e1000_hw *hw);
|
||||
s32 e1000_blink_led(struct e1000_hw *hw);
|
||||
s32 e1000_led_on(struct e1000_hw *hw);
|
||||
s32 e1000_led_off(struct e1000_hw *hw);
|
||||
s32 e1000_id_led_init(struct e1000_hw *hw);
|
||||
void e1000_reset_adaptive(struct e1000_hw *hw);
|
||||
void e1000_update_adaptive(struct e1000_hw *hw);
|
||||
s32 e1000_get_cable_length(struct e1000_hw *hw);
|
||||
s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
|
||||
s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
|
||||
u8 data);
|
||||
s32 e1000_get_phy_info(struct e1000_hw *hw);
|
||||
void e1000_release_phy(struct e1000_hw *hw);
|
||||
s32 e1000_acquire_phy(struct e1000_hw *hw);
|
||||
s32 e1000_phy_hw_reset(struct e1000_hw *hw);
|
||||
s32 e1000_phy_commit(struct e1000_hw *hw);
|
||||
void e1000_power_up_phy(struct e1000_hw *hw);
|
||||
void e1000_power_down_phy(struct e1000_hw *hw);
|
||||
s32 e1000_read_mac_addr(struct e1000_hw *hw);
|
||||
s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size);
|
||||
s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
|
||||
void e1000_reload_nvm(struct e1000_hw *hw);
|
||||
s32 e1000_update_nvm_checksum(struct e1000_hw *hw);
|
||||
s32 e1000_validate_nvm_checksum(struct e1000_hw *hw);
|
||||
s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||
s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||
s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
|
||||
s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
|
||||
bool e1000_check_mng_mode(struct e1000_hw *hw);
|
||||
bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
|
||||
s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
|
||||
s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length,
|
||||
u16 offset, u8 *sum);
|
||||
s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
|
||||
struct e1000_host_mng_command_header *hdr);
|
||||
s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
|
||||
s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw);
|
||||
s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* TBI_ACCEPT macro definition:
|
||||
*
|
||||
* This macro requires:
|
||||
* adapter = a pointer to struct e1000_hw
|
||||
* status = the 8 bit status field of the Rx descriptor with EOP set
|
||||
* error = the 8 bit error field of the Rx descriptor with EOP set
|
||||
* length = the sum of all the length fields of the Rx descriptors that
|
||||
* make up the current frame
|
||||
* last_byte = the last byte of the frame DMAed by the hardware
|
||||
* max_frame_length = the maximum frame length we want to accept.
|
||||
* min_frame_length = the minimum frame length we want to accept.
|
||||
*
|
||||
* This macro is a conditional that should be used in the interrupt
|
||||
* handler's Rx processing routine when RxErrors have been detected.
|
||||
*
|
||||
* Typical use:
|
||||
* ...
|
||||
* if (TBI_ACCEPT) {
|
||||
* accept_frame = true;
|
||||
* e1000_tbi_adjust_stats(adapter, MacAddress);
|
||||
* frame_length--;
|
||||
* } else {
|
||||
* accept_frame = false;
|
||||
* }
|
||||
* ...
|
||||
*/
|
||||
|
||||
/* The carrier extension symbol, as received by the NIC. */
|
||||
#define CARRIER_EXTENSION 0x0F
|
||||
|
||||
#define TBI_ACCEPT(a, status, errors, length, last_byte, \
|
||||
min_frame_size, max_frame_size) \
|
||||
(e1000_tbi_sbp_enabled_82543(a) && \
|
||||
(((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
|
||||
((last_byte) == CARRIER_EXTENSION) && \
|
||||
(((status) & E1000_RXD_STAT_VP) ? \
|
||||
(((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
|
||||
((length) <= (max_frame_size + 1))) : \
|
||||
(((length) > min_frame_size) && \
|
||||
((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
|
||||
|
||||
#ifndef E1000_MAX
|
||||
#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
#endif
|
||||
#ifndef E1000_DIVIDE_ROUND_UP
|
||||
#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */
|
||||
#endif
|
||||
#endif /* _E1000_API_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,778 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_HW_H_
|
||||
#define _E1000_HW_H_
|
||||
|
||||
#include "e1000_osdep.h"
|
||||
#include "e1000_regs.h"
|
||||
#include "e1000_defines.h"
|
||||
|
||||
struct e1000_hw;
|
||||
|
||||
#define E1000_DEV_ID_82576 0x10C9
|
||||
#define E1000_DEV_ID_82576_FIBER 0x10E6
|
||||
#define E1000_DEV_ID_82576_SERDES 0x10E7
|
||||
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
|
||||
#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526
|
||||
#define E1000_DEV_ID_82576_NS 0x150A
|
||||
#define E1000_DEV_ID_82576_NS_SERDES 0x1518
|
||||
#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
|
||||
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
|
||||
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
|
||||
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
|
||||
#define E1000_DEV_ID_82580_COPPER 0x150E
|
||||
#define E1000_DEV_ID_82580_FIBER 0x150F
|
||||
#define E1000_DEV_ID_82580_SERDES 0x1510
|
||||
#define E1000_DEV_ID_82580_SGMII 0x1511
|
||||
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
|
||||
#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
|
||||
#define E1000_DEV_ID_I350_COPPER 0x1521
|
||||
#define E1000_DEV_ID_I350_FIBER 0x1522
|
||||
#define E1000_DEV_ID_I350_SERDES 0x1523
|
||||
#define E1000_DEV_ID_I350_SGMII 0x1524
|
||||
#define E1000_DEV_ID_I350_DA4 0x1546
|
||||
#define E1000_DEV_ID_I210_COPPER 0x1533
|
||||
#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534
|
||||
#define E1000_DEV_ID_I210_COPPER_IT 0x1535
|
||||
#define E1000_DEV_ID_I210_FIBER 0x1536
|
||||
#define E1000_DEV_ID_I210_SERDES 0x1537
|
||||
#define E1000_DEV_ID_I210_SGMII 0x1538
|
||||
#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
|
||||
#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
|
||||
#define E1000_DEV_ID_I211_COPPER 0x1539
|
||||
#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
|
||||
#define E1000_DEV_ID_I354_SGMII 0x1F41
|
||||
#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45
|
||||
#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
|
||||
#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
|
||||
#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
|
||||
#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
|
||||
|
||||
#define E1000_REVISION_0 0
|
||||
#define E1000_REVISION_1 1
|
||||
#define E1000_REVISION_2 2
|
||||
#define E1000_REVISION_3 3
|
||||
#define E1000_REVISION_4 4
|
||||
|
||||
#define E1000_FUNC_0 0
|
||||
#define E1000_FUNC_1 1
|
||||
#define E1000_FUNC_2 2
|
||||
#define E1000_FUNC_3 3
|
||||
|
||||
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
|
||||
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
|
||||
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
|
||||
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
|
||||
|
||||
enum e1000_mac_type {
|
||||
e1000_undefined = 0,
|
||||
e1000_82575,
|
||||
e1000_82576,
|
||||
e1000_82580,
|
||||
e1000_i350,
|
||||
e1000_i354,
|
||||
e1000_i210,
|
||||
e1000_i211,
|
||||
e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
|
||||
};
|
||||
|
||||
enum e1000_media_type {
|
||||
e1000_media_type_unknown = 0,
|
||||
e1000_media_type_copper = 1,
|
||||
e1000_media_type_fiber = 2,
|
||||
e1000_media_type_internal_serdes = 3,
|
||||
e1000_num_media_types
|
||||
};
|
||||
|
||||
enum e1000_nvm_type {
|
||||
e1000_nvm_unknown = 0,
|
||||
e1000_nvm_none,
|
||||
e1000_nvm_eeprom_spi,
|
||||
e1000_nvm_flash_hw,
|
||||
e1000_nvm_invm,
|
||||
e1000_nvm_flash_sw
|
||||
};
|
||||
|
||||
enum e1000_nvm_override {
|
||||
e1000_nvm_override_none = 0,
|
||||
e1000_nvm_override_spi_small,
|
||||
e1000_nvm_override_spi_large,
|
||||
};
|
||||
|
||||
enum e1000_phy_type {
|
||||
e1000_phy_unknown = 0,
|
||||
e1000_phy_none,
|
||||
e1000_phy_m88,
|
||||
e1000_phy_igp,
|
||||
e1000_phy_igp_2,
|
||||
e1000_phy_gg82563,
|
||||
e1000_phy_igp_3,
|
||||
e1000_phy_ife,
|
||||
e1000_phy_82580,
|
||||
e1000_phy_vf,
|
||||
e1000_phy_i210,
|
||||
};
|
||||
|
||||
enum e1000_bus_type {
|
||||
e1000_bus_type_unknown = 0,
|
||||
e1000_bus_type_pci,
|
||||
e1000_bus_type_pcix,
|
||||
e1000_bus_type_pci_express,
|
||||
e1000_bus_type_reserved
|
||||
};
|
||||
|
||||
enum e1000_bus_speed {
|
||||
e1000_bus_speed_unknown = 0,
|
||||
e1000_bus_speed_33,
|
||||
e1000_bus_speed_66,
|
||||
e1000_bus_speed_100,
|
||||
e1000_bus_speed_120,
|
||||
e1000_bus_speed_133,
|
||||
e1000_bus_speed_2500,
|
||||
e1000_bus_speed_5000,
|
||||
e1000_bus_speed_reserved
|
||||
};
|
||||
|
||||
enum e1000_bus_width {
|
||||
e1000_bus_width_unknown = 0,
|
||||
e1000_bus_width_pcie_x1,
|
||||
e1000_bus_width_pcie_x2,
|
||||
e1000_bus_width_pcie_x4 = 4,
|
||||
e1000_bus_width_pcie_x8 = 8,
|
||||
e1000_bus_width_32,
|
||||
e1000_bus_width_64,
|
||||
e1000_bus_width_reserved
|
||||
};
|
||||
|
||||
enum e1000_1000t_rx_status {
|
||||
e1000_1000t_rx_status_not_ok = 0,
|
||||
e1000_1000t_rx_status_ok,
|
||||
e1000_1000t_rx_status_undefined = 0xFF
|
||||
};
|
||||
|
||||
enum e1000_rev_polarity {
|
||||
e1000_rev_polarity_normal = 0,
|
||||
e1000_rev_polarity_reversed,
|
||||
e1000_rev_polarity_undefined = 0xFF
|
||||
};
|
||||
|
||||
enum e1000_fc_mode {
|
||||
e1000_fc_none = 0,
|
||||
e1000_fc_rx_pause,
|
||||
e1000_fc_tx_pause,
|
||||
e1000_fc_full,
|
||||
e1000_fc_default = 0xFF
|
||||
};
|
||||
|
||||
enum e1000_ms_type {
|
||||
e1000_ms_hw_default = 0,
|
||||
e1000_ms_force_master,
|
||||
e1000_ms_force_slave,
|
||||
e1000_ms_auto
|
||||
};
|
||||
|
||||
enum e1000_smart_speed {
|
||||
e1000_smart_speed_default = 0,
|
||||
e1000_smart_speed_on,
|
||||
e1000_smart_speed_off
|
||||
};
|
||||
|
||||
enum e1000_serdes_link_state {
|
||||
e1000_serdes_link_down = 0,
|
||||
e1000_serdes_link_autoneg_progress,
|
||||
e1000_serdes_link_autoneg_complete,
|
||||
e1000_serdes_link_forced_up
|
||||
};
|
||||
|
||||
#ifndef __le16
|
||||
#define __le16 u16
|
||||
#endif
|
||||
#ifndef __le32
|
||||
#define __le32 u32
|
||||
#endif
|
||||
#ifndef __le64
|
||||
#define __le64 u64
|
||||
#endif
|
||||
/* Receive Descriptor */
|
||||
struct e1000_rx_desc {
|
||||
__le64 buffer_addr; /* Address of the descriptor's data buffer */
|
||||
__le16 length; /* Length of data DMAed into data buffer */
|
||||
__le16 csum; /* Packet checksum */
|
||||
u8 status; /* Descriptor status */
|
||||
u8 errors; /* Descriptor Errors */
|
||||
__le16 special;
|
||||
};
|
||||
|
||||
/* Receive Descriptor - Extended */
|
||||
union e1000_rx_desc_extended {
|
||||
struct {
|
||||
__le64 buffer_addr;
|
||||
__le64 reserved;
|
||||
} read;
|
||||
struct {
|
||||
struct {
|
||||
__le32 mrq; /* Multiple Rx Queues */
|
||||
union {
|
||||
__le32 rss; /* RSS Hash */
|
||||
struct {
|
||||
__le16 ip_id; /* IP id */
|
||||
__le16 csum; /* Packet Checksum */
|
||||
} csum_ip;
|
||||
} hi_dword;
|
||||
} lower;
|
||||
struct {
|
||||
__le32 status_error; /* ext status/error */
|
||||
__le16 length;
|
||||
__le16 vlan; /* VLAN tag */
|
||||
} upper;
|
||||
} wb; /* writeback */
|
||||
};
|
||||
|
||||
#define MAX_PS_BUFFERS 4
|
||||
|
||||
/* Number of packet split data buffers (not including the header buffer) */
|
||||
#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
|
||||
|
||||
/* Receive Descriptor - Packet Split */
|
||||
union e1000_rx_desc_packet_split {
|
||||
struct {
|
||||
/* one buffer for protocol header(s), three data buffers */
|
||||
__le64 buffer_addr[MAX_PS_BUFFERS];
|
||||
} read;
|
||||
struct {
|
||||
struct {
|
||||
__le32 mrq; /* Multiple Rx Queues */
|
||||
union {
|
||||
__le32 rss; /* RSS Hash */
|
||||
struct {
|
||||
__le16 ip_id; /* IP id */
|
||||
__le16 csum; /* Packet Checksum */
|
||||
} csum_ip;
|
||||
} hi_dword;
|
||||
} lower;
|
||||
struct {
|
||||
__le32 status_error; /* ext status/error */
|
||||
__le16 length0; /* length of buffer 0 */
|
||||
__le16 vlan; /* VLAN tag */
|
||||
} middle;
|
||||
struct {
|
||||
__le16 header_status;
|
||||
/* length of buffers 1-3 */
|
||||
__le16 length[PS_PAGE_BUFFERS];
|
||||
} upper;
|
||||
__le64 reserved;
|
||||
} wb; /* writeback */
|
||||
};
|
||||
|
||||
/* Transmit Descriptor */
|
||||
struct e1000_tx_desc {
|
||||
__le64 buffer_addr; /* Address of the descriptor's data buffer */
|
||||
union {
|
||||
__le32 data;
|
||||
struct {
|
||||
__le16 length; /* Data buffer length */
|
||||
u8 cso; /* Checksum offset */
|
||||
u8 cmd; /* Descriptor control */
|
||||
} flags;
|
||||
} lower;
|
||||
union {
|
||||
__le32 data;
|
||||
struct {
|
||||
u8 status; /* Descriptor status */
|
||||
u8 css; /* Checksum start */
|
||||
__le16 special;
|
||||
} fields;
|
||||
} upper;
|
||||
};
|
||||
|
||||
/* Offload Context Descriptor */
|
||||
struct e1000_context_desc {
|
||||
union {
|
||||
__le32 ip_config;
|
||||
struct {
|
||||
u8 ipcss; /* IP checksum start */
|
||||
u8 ipcso; /* IP checksum offset */
|
||||
__le16 ipcse; /* IP checksum end */
|
||||
} ip_fields;
|
||||
} lower_setup;
|
||||
union {
|
||||
__le32 tcp_config;
|
||||
struct {
|
||||
u8 tucss; /* TCP checksum start */
|
||||
u8 tucso; /* TCP checksum offset */
|
||||
__le16 tucse; /* TCP checksum end */
|
||||
} tcp_fields;
|
||||
} upper_setup;
|
||||
__le32 cmd_and_length;
|
||||
union {
|
||||
__le32 data;
|
||||
struct {
|
||||
u8 status; /* Descriptor status */
|
||||
u8 hdr_len; /* Header length */
|
||||
__le16 mss; /* Maximum segment size */
|
||||
} fields;
|
||||
} tcp_seg_setup;
|
||||
};
|
||||
|
||||
/* Offload data descriptor */
|
||||
struct e1000_data_desc {
|
||||
__le64 buffer_addr; /* Address of the descriptor's buffer address */
|
||||
union {
|
||||
__le32 data;
|
||||
struct {
|
||||
__le16 length; /* Data buffer length */
|
||||
u8 typ_len_ext;
|
||||
u8 cmd;
|
||||
} flags;
|
||||
} lower;
|
||||
union {
|
||||
__le32 data;
|
||||
struct {
|
||||
u8 status; /* Descriptor status */
|
||||
u8 popts; /* Packet Options */
|
||||
__le16 special;
|
||||
} fields;
|
||||
} upper;
|
||||
};
|
||||
|
||||
/* Statistics counters collected by the MAC */
|
||||
struct e1000_hw_stats {
|
||||
u64 crcerrs;
|
||||
u64 algnerrc;
|
||||
u64 symerrs;
|
||||
u64 rxerrc;
|
||||
u64 mpc;
|
||||
u64 scc;
|
||||
u64 ecol;
|
||||
u64 mcc;
|
||||
u64 latecol;
|
||||
u64 colc;
|
||||
u64 dc;
|
||||
u64 tncrs;
|
||||
u64 sec;
|
||||
u64 cexterr;
|
||||
u64 rlec;
|
||||
u64 xonrxc;
|
||||
u64 xontxc;
|
||||
u64 xoffrxc;
|
||||
u64 xofftxc;
|
||||
u64 fcruc;
|
||||
u64 prc64;
|
||||
u64 prc127;
|
||||
u64 prc255;
|
||||
u64 prc511;
|
||||
u64 prc1023;
|
||||
u64 prc1522;
|
||||
u64 gprc;
|
||||
u64 bprc;
|
||||
u64 mprc;
|
||||
u64 gptc;
|
||||
u64 gorc;
|
||||
u64 gotc;
|
||||
u64 rnbc;
|
||||
u64 ruc;
|
||||
u64 rfc;
|
||||
u64 roc;
|
||||
u64 rjc;
|
||||
u64 mgprc;
|
||||
u64 mgpdc;
|
||||
u64 mgptc;
|
||||
u64 tor;
|
||||
u64 tot;
|
||||
u64 tpr;
|
||||
u64 tpt;
|
||||
u64 ptc64;
|
||||
u64 ptc127;
|
||||
u64 ptc255;
|
||||
u64 ptc511;
|
||||
u64 ptc1023;
|
||||
u64 ptc1522;
|
||||
u64 mptc;
|
||||
u64 bptc;
|
||||
u64 tsctc;
|
||||
u64 tsctfc;
|
||||
u64 iac;
|
||||
u64 icrxptc;
|
||||
u64 icrxatc;
|
||||
u64 ictxptc;
|
||||
u64 ictxatc;
|
||||
u64 ictxqec;
|
||||
u64 ictxqmtc;
|
||||
u64 icrxdmtc;
|
||||
u64 icrxoc;
|
||||
u64 cbtmpc;
|
||||
u64 htdpmc;
|
||||
u64 cbrdpc;
|
||||
u64 cbrmpc;
|
||||
u64 rpthc;
|
||||
u64 hgptc;
|
||||
u64 htcbdpc;
|
||||
u64 hgorc;
|
||||
u64 hgotc;
|
||||
u64 lenerrs;
|
||||
u64 scvpc;
|
||||
u64 hrmpc;
|
||||
u64 doosync;
|
||||
u64 o2bgptc;
|
||||
u64 o2bspc;
|
||||
u64 b2ospc;
|
||||
u64 b2ogprc;
|
||||
};
|
||||
|
||||
|
||||
struct e1000_phy_stats {
|
||||
u32 idle_errors;
|
||||
u32 receive_errors;
|
||||
};
|
||||
|
||||
struct e1000_host_mng_dhcp_cookie {
|
||||
u32 signature;
|
||||
u8 status;
|
||||
u8 reserved0;
|
||||
u16 vlan_id;
|
||||
u32 reserved1;
|
||||
u16 reserved2;
|
||||
u8 reserved3;
|
||||
u8 checksum;
|
||||
};
|
||||
|
||||
/* Host Interface "Rev 1" */
|
||||
struct e1000_host_command_header {
|
||||
u8 command_id;
|
||||
u8 command_length;
|
||||
u8 command_options;
|
||||
u8 checksum;
|
||||
};
|
||||
|
||||
#define E1000_HI_MAX_DATA_LENGTH 252
|
||||
struct e1000_host_command_info {
|
||||
struct e1000_host_command_header command_header;
|
||||
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
|
||||
};
|
||||
|
||||
/* Host Interface "Rev 2" */
|
||||
struct e1000_host_mng_command_header {
|
||||
u8 command_id;
|
||||
u8 checksum;
|
||||
u16 reserved1;
|
||||
u16 reserved2;
|
||||
u16 command_length;
|
||||
};
|
||||
|
||||
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
|
||||
struct e1000_host_mng_command_info {
|
||||
struct e1000_host_mng_command_header command_header;
|
||||
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
|
||||
};
|
||||
|
||||
#include "e1000_mac.h"
|
||||
#include "e1000_phy.h"
|
||||
#include "e1000_nvm.h"
|
||||
#include "e1000_manage.h"
|
||||
#include "e1000_mbx.h"
|
||||
|
||||
/* Function pointers for the MAC. */
|
||||
struct e1000_mac_operations {
|
||||
s32 (*init_params)(struct e1000_hw *);
|
||||
s32 (*id_led_init)(struct e1000_hw *);
|
||||
s32 (*blink_led)(struct e1000_hw *);
|
||||
bool (*check_mng_mode)(struct e1000_hw *);
|
||||
s32 (*check_for_link)(struct e1000_hw *);
|
||||
s32 (*cleanup_led)(struct e1000_hw *);
|
||||
void (*clear_hw_cntrs)(struct e1000_hw *);
|
||||
void (*clear_vfta)(struct e1000_hw *);
|
||||
s32 (*get_bus_info)(struct e1000_hw *);
|
||||
void (*set_lan_id)(struct e1000_hw *);
|
||||
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
|
||||
s32 (*led_on)(struct e1000_hw *);
|
||||
s32 (*led_off)(struct e1000_hw *);
|
||||
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
|
||||
s32 (*reset_hw)(struct e1000_hw *);
|
||||
s32 (*init_hw)(struct e1000_hw *);
|
||||
void (*shutdown_serdes)(struct e1000_hw *);
|
||||
void (*power_up_serdes)(struct e1000_hw *);
|
||||
s32 (*setup_link)(struct e1000_hw *);
|
||||
s32 (*setup_physical_interface)(struct e1000_hw *);
|
||||
s32 (*setup_led)(struct e1000_hw *);
|
||||
void (*write_vfta)(struct e1000_hw *, u32, u32);
|
||||
void (*config_collision_dist)(struct e1000_hw *);
|
||||
void (*rar_set)(struct e1000_hw *, u8*, u32);
|
||||
s32 (*read_mac_addr)(struct e1000_hw *);
|
||||
s32 (*validate_mdi_setting)(struct e1000_hw *);
|
||||
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
|
||||
s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
|
||||
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
|
||||
void (*release_swfw_sync)(struct e1000_hw *, u16);
|
||||
};
|
||||
|
||||
/* When to use various PHY register access functions:
|
||||
*
|
||||
* Func Caller
|
||||
* Function Does Does When to use
|
||||
* ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
* X_reg L,P,A n/a for simple PHY reg accesses
|
||||
* X_reg_locked P,A L for multiple accesses of different regs
|
||||
* on different pages
|
||||
* X_reg_page A L,P for multiple accesses of different regs
|
||||
* on the same page
|
||||
*
|
||||
* Where X=[read|write], L=locking, P=sets page, A=register access
|
||||
*
|
||||
*/
|
||||
struct e1000_phy_operations {
|
||||
s32 (*init_params)(struct e1000_hw *);
|
||||
s32 (*acquire)(struct e1000_hw *);
|
||||
s32 (*check_polarity)(struct e1000_hw *);
|
||||
s32 (*check_reset_block)(struct e1000_hw *);
|
||||
s32 (*commit)(struct e1000_hw *);
|
||||
s32 (*force_speed_duplex)(struct e1000_hw *);
|
||||
s32 (*get_cfg_done)(struct e1000_hw *hw);
|
||||
s32 (*get_cable_length)(struct e1000_hw *);
|
||||
s32 (*get_info)(struct e1000_hw *);
|
||||
s32 (*set_page)(struct e1000_hw *, u16);
|
||||
s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
|
||||
s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
|
||||
s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *);
|
||||
void (*release)(struct e1000_hw *);
|
||||
s32 (*reset)(struct e1000_hw *);
|
||||
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
|
||||
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
|
||||
s32 (*write_reg)(struct e1000_hw *, u32, u16);
|
||||
s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
|
||||
s32 (*write_reg_page)(struct e1000_hw *, u32, u16);
|
||||
void (*power_up)(struct e1000_hw *);
|
||||
void (*power_down)(struct e1000_hw *);
|
||||
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
|
||||
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
|
||||
};
|
||||
|
||||
/* Function pointers for the NVM. */
|
||||
struct e1000_nvm_operations {
|
||||
s32 (*init_params)(struct e1000_hw *);
|
||||
s32 (*acquire)(struct e1000_hw *);
|
||||
s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
|
||||
void (*release)(struct e1000_hw *);
|
||||
void (*reload)(struct e1000_hw *);
|
||||
s32 (*update)(struct e1000_hw *);
|
||||
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
|
||||
s32 (*validate)(struct e1000_hw *);
|
||||
s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
|
||||
};
|
||||
|
||||
#define E1000_MAX_SENSORS 3
|
||||
|
||||
struct e1000_thermal_diode_data {
|
||||
u8 location;
|
||||
u8 temp;
|
||||
u8 caution_thresh;
|
||||
u8 max_op_thresh;
|
||||
};
|
||||
|
||||
struct e1000_thermal_sensor_data {
|
||||
struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
|
||||
};
|
||||
|
||||
struct e1000_mac_info {
|
||||
struct e1000_mac_operations ops;
|
||||
u8 addr[ETH_ADDR_LEN];
|
||||
u8 perm_addr[ETH_ADDR_LEN];
|
||||
|
||||
enum e1000_mac_type type;
|
||||
|
||||
u32 collision_delta;
|
||||
u32 ledctl_default;
|
||||
u32 ledctl_mode1;
|
||||
u32 ledctl_mode2;
|
||||
u32 mc_filter_type;
|
||||
u32 tx_packet_delta;
|
||||
u32 txcw;
|
||||
|
||||
u16 current_ifs_val;
|
||||
u16 ifs_max_val;
|
||||
u16 ifs_min_val;
|
||||
u16 ifs_ratio;
|
||||
u16 ifs_step_size;
|
||||
u16 mta_reg_count;
|
||||
u16 uta_reg_count;
|
||||
|
||||
/* Maximum size of the MTA register table in all supported adapters */
|
||||
#define MAX_MTA_REG 128
|
||||
u32 mta_shadow[MAX_MTA_REG];
|
||||
u16 rar_entry_count;
|
||||
|
||||
u8 forced_speed_duplex;
|
||||
|
||||
bool adaptive_ifs;
|
||||
bool has_fwsm;
|
||||
bool arc_subsystem_valid;
|
||||
bool asf_firmware_present;
|
||||
bool autoneg;
|
||||
bool autoneg_failed;
|
||||
bool get_link_status;
|
||||
bool in_ifs_mode;
|
||||
enum e1000_serdes_link_state serdes_link_state;
|
||||
bool serdes_has_link;
|
||||
bool tx_pkt_filtering;
|
||||
struct e1000_thermal_sensor_data thermal_sensor_data;
|
||||
};
|
||||
|
||||
struct e1000_phy_info {
|
||||
struct e1000_phy_operations ops;
|
||||
enum e1000_phy_type type;
|
||||
|
||||
enum e1000_1000t_rx_status local_rx;
|
||||
enum e1000_1000t_rx_status remote_rx;
|
||||
enum e1000_ms_type ms_type;
|
||||
enum e1000_ms_type original_ms_type;
|
||||
enum e1000_rev_polarity cable_polarity;
|
||||
enum e1000_smart_speed smart_speed;
|
||||
|
||||
u32 addr;
|
||||
u32 id;
|
||||
u32 reset_delay_us; /* in usec */
|
||||
u32 revision;
|
||||
|
||||
enum e1000_media_type media_type;
|
||||
|
||||
u16 autoneg_advertised;
|
||||
u16 autoneg_mask;
|
||||
u16 cable_length;
|
||||
u16 max_cable_length;
|
||||
u16 min_cable_length;
|
||||
|
||||
u8 mdix;
|
||||
|
||||
bool disable_polarity_correction;
|
||||
bool is_mdix;
|
||||
bool polarity_correction;
|
||||
bool reset_disable;
|
||||
bool speed_downgraded;
|
||||
bool autoneg_wait_to_complete;
|
||||
};
|
||||
|
||||
struct e1000_nvm_info {
|
||||
struct e1000_nvm_operations ops;
|
||||
enum e1000_nvm_type type;
|
||||
enum e1000_nvm_override override;
|
||||
|
||||
u32 flash_bank_size;
|
||||
u32 flash_base_addr;
|
||||
|
||||
u16 word_size;
|
||||
u16 delay_usec;
|
||||
u16 address_bits;
|
||||
u16 opcode_bits;
|
||||
u16 page_size;
|
||||
};
|
||||
|
||||
struct e1000_bus_info {
|
||||
enum e1000_bus_type type;
|
||||
enum e1000_bus_speed speed;
|
||||
enum e1000_bus_width width;
|
||||
|
||||
u16 func;
|
||||
u16 pci_cmd_word;
|
||||
};
|
||||
|
||||
struct e1000_fc_info {
|
||||
u32 high_water; /* Flow control high-water mark */
|
||||
u32 low_water; /* Flow control low-water mark */
|
||||
u16 pause_time; /* Flow control pause timer */
|
||||
u16 refresh_time; /* Flow control refresh timer */
|
||||
bool send_xon; /* Flow control send XON */
|
||||
bool strict_ieee; /* Strict IEEE mode */
|
||||
enum e1000_fc_mode current_mode; /* FC mode in effect */
|
||||
enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
|
||||
};
|
||||
|
||||
struct e1000_mbx_operations {
|
||||
s32 (*init_params)(struct e1000_hw *hw);
|
||||
s32 (*read)(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 (*check_for_msg)(struct e1000_hw *, u16);
|
||||
s32 (*check_for_ack)(struct e1000_hw *, u16);
|
||||
s32 (*check_for_rst)(struct e1000_hw *, u16);
|
||||
};
|
||||
|
||||
struct e1000_mbx_stats {
|
||||
u32 msgs_tx;
|
||||
u32 msgs_rx;
|
||||
|
||||
u32 acks;
|
||||
u32 reqs;
|
||||
u32 rsts;
|
||||
};
|
||||
|
||||
struct e1000_mbx_info {
|
||||
struct e1000_mbx_operations ops;
|
||||
struct e1000_mbx_stats stats;
|
||||
u32 timeout;
|
||||
u32 usec_delay;
|
||||
u16 size;
|
||||
};
|
||||
|
||||
struct e1000_dev_spec_82575 {
|
||||
bool sgmii_active;
|
||||
bool global_device_reset;
|
||||
bool eee_disable;
|
||||
bool module_plugged;
|
||||
bool clear_semaphore_once;
|
||||
u32 mtu;
|
||||
struct sfp_e1000_flags eth_flags;
|
||||
u8 media_port;
|
||||
bool media_changed;
|
||||
};
|
||||
|
||||
struct e1000_dev_spec_vf {
|
||||
u32 vf_number;
|
||||
u32 v2p_mailbox;
|
||||
};
|
||||
|
||||
struct e1000_hw {
|
||||
void *back;
|
||||
|
||||
u8 __iomem *hw_addr;
|
||||
u8 __iomem *flash_address;
|
||||
unsigned long io_base;
|
||||
|
||||
struct e1000_mac_info mac;
|
||||
struct e1000_fc_info fc;
|
||||
struct e1000_phy_info phy;
|
||||
struct e1000_nvm_info nvm;
|
||||
struct e1000_bus_info bus;
|
||||
struct e1000_mbx_info mbx;
|
||||
struct e1000_host_mng_dhcp_cookie mng_cookie;
|
||||
|
||||
union {
|
||||
struct e1000_dev_spec_82575 _82575;
|
||||
struct e1000_dev_spec_vf vf;
|
||||
} dev_spec;
|
||||
|
||||
u16 device_id;
|
||||
u16 subsystem_vendor_id;
|
||||
u16 subsystem_device_id;
|
||||
u16 vendor_id;
|
||||
|
||||
u8 revision_id;
|
||||
};
|
||||
|
||||
#include "e1000_82575.h"
|
||||
#include "e1000_i210.h"
|
||||
|
||||
/* These functions must be implemented by drivers */
|
||||
s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
|
||||
s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
|
||||
|
||||
#endif
|
@ -1,894 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include "e1000_api.h"
|
||||
|
||||
|
||||
static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
|
||||
static void e1000_release_nvm_i210(struct e1000_hw *hw);
|
||||
static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
|
||||
static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data);
|
||||
static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
|
||||
static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
|
||||
|
||||
/**
|
||||
* e1000_acquire_nvm_i210 - Request for access to EEPROM
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Acquire the necessary semaphores for exclusive access to the EEPROM.
|
||||
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
|
||||
* Return successful if access grant bit set, else clear the request for
|
||||
* EEPROM access and return -E1000_ERR_NVM (-1).
|
||||
**/
|
||||
static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val;
|
||||
|
||||
DEBUGFUNC("e1000_acquire_nvm_i210");
|
||||
|
||||
ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_release_nvm_i210 - Release exclusive access to EEPROM
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
|
||||
* then release the semaphores acquired.
|
||||
**/
|
||||
static void e1000_release_nvm_i210(struct e1000_hw *hw)
|
||||
{
|
||||
DEBUGFUNC("e1000_release_nvm_i210");
|
||||
|
||||
e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
|
||||
* @hw: pointer to the HW structure
|
||||
* @mask: specifies which semaphore to acquire
|
||||
*
|
||||
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
|
||||
* will also specify which port we're acquiring the lock for.
|
||||
**/
|
||||
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
|
||||
{
|
||||
u32 swfw_sync;
|
||||
u32 swmask = mask;
|
||||
u32 fwmask = mask << 16;
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
|
||||
|
||||
DEBUGFUNC("e1000_acquire_swfw_sync_i210");
|
||||
|
||||
while (i < timeout) {
|
||||
if (e1000_get_hw_semaphore_i210(hw)) {
|
||||
ret_val = -E1000_ERR_SWFW_SYNC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
|
||||
if (!(swfw_sync & (fwmask | swmask)))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Firmware currently using resource (fwmask)
|
||||
* or other software thread using resource (swmask)
|
||||
*/
|
||||
e1000_put_hw_semaphore_generic(hw);
|
||||
msec_delay_irq(5);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i == timeout) {
|
||||
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
|
||||
ret_val = -E1000_ERR_SWFW_SYNC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
swfw_sync |= swmask;
|
||||
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
|
||||
|
||||
e1000_put_hw_semaphore_generic(hw);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_release_swfw_sync_i210 - Release SW/FW semaphore
|
||||
* @hw: pointer to the HW structure
|
||||
* @mask: specifies which semaphore to acquire
|
||||
*
|
||||
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
|
||||
* will also specify which port we're releasing the lock for.
|
||||
**/
|
||||
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
|
||||
{
|
||||
u32 swfw_sync;
|
||||
|
||||
DEBUGFUNC("e1000_release_swfw_sync_i210");
|
||||
|
||||
while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
|
||||
; /* Empty */
|
||||
|
||||
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
|
||||
swfw_sync &= ~mask;
|
||||
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
|
||||
|
||||
e1000_put_hw_semaphore_generic(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Acquire the HW semaphore to access the PHY or NVM
|
||||
**/
|
||||
static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
|
||||
{
|
||||
u32 swsm;
|
||||
s32 timeout = hw->nvm.word_size + 1;
|
||||
s32 i = 0;
|
||||
|
||||
DEBUGFUNC("e1000_get_hw_semaphore_i210");
|
||||
|
||||
/* Get the SW semaphore */
|
||||
while (i < timeout) {
|
||||
swsm = E1000_READ_REG(hw, E1000_SWSM);
|
||||
if (!(swsm & E1000_SWSM_SMBI))
|
||||
break;
|
||||
|
||||
usec_delay(50);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (i == timeout) {
|
||||
/* In rare circumstances, the SW semaphore may already be held
|
||||
* unintentionally. Clear the semaphore once before giving up.
|
||||
*/
|
||||
if (hw->dev_spec._82575.clear_semaphore_once) {
|
||||
hw->dev_spec._82575.clear_semaphore_once = false;
|
||||
e1000_put_hw_semaphore_generic(hw);
|
||||
for (i = 0; i < timeout; i++) {
|
||||
swsm = E1000_READ_REG(hw, E1000_SWSM);
|
||||
if (!(swsm & E1000_SWSM_SMBI))
|
||||
break;
|
||||
|
||||
usec_delay(50);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we do not have the semaphore here, we have to give up. */
|
||||
if (i == timeout) {
|
||||
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the FW semaphore. */
|
||||
for (i = 0; i < timeout; i++) {
|
||||
swsm = E1000_READ_REG(hw, E1000_SWSM);
|
||||
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
|
||||
|
||||
/* Semaphore acquired if bit latched */
|
||||
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
|
||||
break;
|
||||
|
||||
usec_delay(50);
|
||||
}
|
||||
|
||||
if (i == timeout) {
|
||||
/* Release semaphores */
|
||||
e1000_put_hw_semaphore_generic(hw);
|
||||
DEBUGOUT("Driver can't access the NVM\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset of word in the Shadow Ram to read
|
||||
* @words: number of words to read
|
||||
* @data: word read from the Shadow Ram
|
||||
*
|
||||
* Reads a 16 bit word from the Shadow Ram using the EERD register.
|
||||
* Uses necessary synchronization semaphores.
|
||||
**/
|
||||
s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data)
|
||||
{
|
||||
s32 status = E1000_SUCCESS;
|
||||
u16 i, count;
|
||||
|
||||
DEBUGFUNC("e1000_read_nvm_srrd_i210");
|
||||
|
||||
/* We cannot hold synchronization semaphores for too long,
|
||||
* because of forceful takeover procedure. However it is more efficient
|
||||
* to read in bursts than synchronizing access for each word. */
|
||||
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
|
||||
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
|
||||
E1000_EERD_EEWR_MAX_COUNT : (words - i);
|
||||
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
|
||||
status = e1000_read_nvm_eerd(hw, offset, count,
|
||||
data + i);
|
||||
hw->nvm.ops.release(hw);
|
||||
} else {
|
||||
status = E1000_ERR_SWFW_SYNC;
|
||||
}
|
||||
|
||||
if (status != E1000_SUCCESS)
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset within the Shadow RAM to be written to
|
||||
* @words: number of words to write
|
||||
* @data: 16 bit word(s) to be written to the Shadow RAM
|
||||
*
|
||||
* Writes data to Shadow RAM at offset using EEWR register.
|
||||
*
|
||||
* If e1000_update_nvm_checksum is not called after this function , the
|
||||
* data will not be committed to FLASH and also Shadow RAM will most likely
|
||||
* contain an invalid checksum.
|
||||
*
|
||||
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
|
||||
* partially written.
|
||||
**/
|
||||
s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data)
|
||||
{
|
||||
s32 status = E1000_SUCCESS;
|
||||
u16 i, count;
|
||||
|
||||
DEBUGFUNC("e1000_write_nvm_srwr_i210");
|
||||
|
||||
/* We cannot hold synchronization semaphores for too long,
|
||||
* because of forceful takeover procedure. However it is more efficient
|
||||
* to write in bursts than synchronizing access for each word. */
|
||||
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
|
||||
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
|
||||
E1000_EERD_EEWR_MAX_COUNT : (words - i);
|
||||
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
|
||||
status = e1000_write_nvm_srwr(hw, offset, count,
|
||||
data + i);
|
||||
hw->nvm.ops.release(hw);
|
||||
} else {
|
||||
status = E1000_ERR_SWFW_SYNC;
|
||||
}
|
||||
|
||||
if (status != E1000_SUCCESS)
|
||||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset within the Shadow Ram to be written to
|
||||
* @words: number of words to write
|
||||
* @data: 16 bit word(s) to be written to the Shadow Ram
|
||||
*
|
||||
* Writes data to Shadow Ram at offset using EEWR register.
|
||||
*
|
||||
* If e1000_update_nvm_checksum is not called after this function , the
|
||||
* Shadow Ram will most likely contain an invalid checksum.
|
||||
**/
|
||||
static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
u32 i, k, eewr = 0;
|
||||
u32 attempts = 100000;
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
|
||||
DEBUGFUNC("e1000_write_nvm_srwr");
|
||||
|
||||
/*
|
||||
* A check for invalid values: offset too large, too many words,
|
||||
* too many words for the offset, and not enough words.
|
||||
*/
|
||||
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
||||
(words == 0)) {
|
||||
DEBUGOUT("nvm parameter(s) out of bounds\n");
|
||||
ret_val = -E1000_ERR_NVM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < words; i++) {
|
||||
eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
|
||||
(data[i] << E1000_NVM_RW_REG_DATA) |
|
||||
E1000_NVM_RW_REG_START;
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_SRWR, eewr);
|
||||
|
||||
for (k = 0; k < attempts; k++) {
|
||||
if (E1000_NVM_RW_REG_DONE &
|
||||
E1000_READ_REG(hw, E1000_SRWR)) {
|
||||
ret_val = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
usec_delay(5);
|
||||
}
|
||||
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
DEBUGOUT("Shadow RAM write EEWR timed out\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/** e1000_read_invm_word_i210 - Reads OTP
|
||||
* @hw: pointer to the HW structure
|
||||
* @address: the word address (aka eeprom offset) to read
|
||||
* @data: pointer to the data read
|
||||
*
|
||||
* Reads 16-bit words from the OTP. Return error when the word is not
|
||||
* stored in OTP.
|
||||
**/
|
||||
static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
|
||||
{
|
||||
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
|
||||
u32 invm_dword;
|
||||
u16 i;
|
||||
u8 record_type, word_address;
|
||||
|
||||
DEBUGFUNC("e1000_read_invm_word_i210");
|
||||
|
||||
for (i = 0; i < E1000_INVM_SIZE; i++) {
|
||||
invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
|
||||
/* Get record type */
|
||||
record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
|
||||
if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
|
||||
break;
|
||||
if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
|
||||
i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
|
||||
if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
|
||||
i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
|
||||
if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
|
||||
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
|
||||
if (word_address == address) {
|
||||
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
|
||||
DEBUGOUT2("Read INVM Word 0x%02x = %x",
|
||||
address, *data);
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (status != E1000_SUCCESS)
|
||||
DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
|
||||
return status;
|
||||
}
|
||||
|
||||
/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211
|
||||
* @hw: pointer to the HW structure
|
||||
* @address: the word address (aka eeprom offset) to read
|
||||
* @data: pointer to the data read
|
||||
*
|
||||
* Wrapper function to return data formerly found in the NVM.
|
||||
**/
|
||||
static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset,
|
||||
u16 E1000_UNUSEDARG words, u16 *data)
|
||||
{
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
|
||||
DEBUGFUNC("e1000_read_invm_i210");
|
||||
|
||||
/* Only the MAC addr is required to be present in the iNVM */
|
||||
switch (offset) {
|
||||
case NVM_MAC_ADDR:
|
||||
ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]);
|
||||
ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1,
|
||||
&data[1]);
|
||||
ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2,
|
||||
&data[2]);
|
||||
if (ret_val != E1000_SUCCESS)
|
||||
DEBUGOUT("MAC Addr not found in iNVM\n");
|
||||
break;
|
||||
case NVM_INIT_CTRL_2:
|
||||
ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
|
||||
ret_val = E1000_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case NVM_INIT_CTRL_4:
|
||||
ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
|
||||
ret_val = E1000_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case NVM_LED_1_CFG:
|
||||
ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
*data = NVM_LED_1_CFG_DEFAULT_I211;
|
||||
ret_val = E1000_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case NVM_LED_0_2_CFG:
|
||||
ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
|
||||
ret_val = E1000_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case NVM_ID_LED_SETTINGS:
|
||||
ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
*data = ID_LED_RESERVED_FFFF;
|
||||
ret_val = E1000_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case NVM_SUB_DEV_ID:
|
||||
*data = hw->subsystem_device_id;
|
||||
break;
|
||||
case NVM_SUB_VEN_ID:
|
||||
*data = hw->subsystem_vendor_id;
|
||||
break;
|
||||
case NVM_DEV_ID:
|
||||
*data = hw->device_id;
|
||||
break;
|
||||
case NVM_VEN_ID:
|
||||
*data = hw->vendor_id;
|
||||
break;
|
||||
default:
|
||||
DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
|
||||
*data = NVM_RESERVED_WORD;
|
||||
break;
|
||||
}
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_invm_version - Reads iNVM version and image type
|
||||
* @hw: pointer to the HW structure
|
||||
* @invm_ver: version structure for the version read
|
||||
*
|
||||
* Reads iNVM version and image type.
|
||||
**/
|
||||
s32 e1000_read_invm_version(struct e1000_hw *hw,
|
||||
struct e1000_fw_version *invm_ver)
|
||||
{
|
||||
u32 *record = NULL;
|
||||
u32 *next_record = NULL;
|
||||
u32 i = 0;
|
||||
u32 invm_dword = 0;
|
||||
u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
|
||||
E1000_INVM_RECORD_SIZE_IN_BYTES);
|
||||
u32 buffer[E1000_INVM_SIZE];
|
||||
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
|
||||
u16 version = 0;
|
||||
|
||||
DEBUGFUNC("e1000_read_invm_version");
|
||||
|
||||
/* Read iNVM memory */
|
||||
for (i = 0; i < E1000_INVM_SIZE; i++) {
|
||||
invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
|
||||
buffer[i] = invm_dword;
|
||||
}
|
||||
|
||||
/* Read version number */
|
||||
for (i = 1; i < invm_blocks; i++) {
|
||||
record = &buffer[invm_blocks - i];
|
||||
next_record = &buffer[invm_blocks - i + 1];
|
||||
|
||||
/* Check if we have first version location used */
|
||||
if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
|
||||
version = 0;
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
/* Check if we have second version location used */
|
||||
else if ((i == 1) &&
|
||||
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
|
||||
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Check if we have odd version location
|
||||
* used and it is the last one used
|
||||
*/
|
||||
else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
|
||||
((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
|
||||
(i != 1))) {
|
||||
version = (*next_record & E1000_INVM_VER_FIELD_TWO)
|
||||
>> 13;
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Check if we have even version location
|
||||
* used and it is the last one used
|
||||
*/
|
||||
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
|
||||
((*record & 0x3) == 0)) {
|
||||
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (status == E1000_SUCCESS) {
|
||||
invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
|
||||
>> E1000_INVM_MAJOR_SHIFT;
|
||||
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
|
||||
}
|
||||
/* Read Image Type */
|
||||
for (i = 1; i < invm_blocks; i++) {
|
||||
record = &buffer[invm_blocks - i];
|
||||
next_record = &buffer[invm_blocks - i + 1];
|
||||
|
||||
/* Check if we have image type in first location used */
|
||||
if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
|
||||
invm_ver->invm_img_type = 0;
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
/* Check if we have image type in first location used */
|
||||
else if ((((*record & 0x3) == 0) &&
|
||||
((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
|
||||
((((*record & 0x3) != 0) && (i != 1)))) {
|
||||
invm_ver->invm_img_type =
|
||||
(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
|
||||
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
|
||||
**/
|
||||
s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
|
||||
{
|
||||
s32 status = E1000_SUCCESS;
|
||||
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
|
||||
|
||||
DEBUGFUNC("e1000_validate_nvm_checksum_i210");
|
||||
|
||||
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
|
||||
|
||||
/*
|
||||
* Replace the read function with semaphore grabbing with
|
||||
* the one that skips this for a while.
|
||||
* We have semaphore taken already here.
|
||||
*/
|
||||
read_op_ptr = hw->nvm.ops.read;
|
||||
hw->nvm.ops.read = e1000_read_nvm_eerd;
|
||||
|
||||
status = e1000_validate_nvm_checksum_generic(hw);
|
||||
|
||||
/* Revert original read operation. */
|
||||
hw->nvm.ops.read = read_op_ptr;
|
||||
|
||||
hw->nvm.ops.release(hw);
|
||||
} else {
|
||||
status = E1000_ERR_SWFW_SYNC;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* e1000_update_nvm_checksum_i210 - Update EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
|
||||
* up to the checksum. Then calculates the EEPROM checksum and writes the
|
||||
* value to the EEPROM. Next commit EEPROM data onto the Flash.
|
||||
**/
|
||||
s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
u16 checksum = 0;
|
||||
u16 i, nvm_data;
|
||||
|
||||
DEBUGFUNC("e1000_update_nvm_checksum_i210");
|
||||
|
||||
/*
|
||||
* Read the first word from the EEPROM. If this times out or fails, do
|
||||
* not continue or we could be in for a very long wait while every
|
||||
* EEPROM read fails
|
||||
*/
|
||||
ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
DEBUGOUT("EEPROM read failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
|
||||
/*
|
||||
* Do not use hw->nvm.ops.write, hw->nvm.ops.read
|
||||
* because we do not want to take the synchronization
|
||||
* semaphores twice here.
|
||||
*/
|
||||
|
||||
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
|
||||
ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
hw->nvm.ops.release(hw);
|
||||
DEBUGOUT("NVM Read Error while updating checksum.\n");
|
||||
goto out;
|
||||
}
|
||||
checksum += nvm_data;
|
||||
}
|
||||
checksum = (u16) NVM_SUM - checksum;
|
||||
ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
|
||||
&checksum);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
hw->nvm.ops.release(hw);
|
||||
DEBUGOUT("NVM Write Error while updating checksum.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
hw->nvm.ops.release(hw);
|
||||
|
||||
ret_val = e1000_update_flash_i210(hw);
|
||||
} else {
|
||||
ret_val = E1000_ERR_SWFW_SYNC;
|
||||
}
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_flash_presence_i210 - Check if flash device is detected.
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
**/
|
||||
bool e1000_get_flash_presence_i210(struct e1000_hw *hw)
|
||||
{
|
||||
u32 eec = 0;
|
||||
bool ret_val = false;
|
||||
|
||||
DEBUGFUNC("e1000_get_flash_presence_i210");
|
||||
|
||||
eec = E1000_READ_REG(hw, E1000_EECD);
|
||||
|
||||
if (eec & E1000_EECD_FLASH_DETECTED_I210)
|
||||
ret_val = true;
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_update_flash_i210 - Commit EEPROM to the flash
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
**/
|
||||
s32 e1000_update_flash_i210(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
u32 flup;
|
||||
|
||||
DEBUGFUNC("e1000_update_flash_i210");
|
||||
|
||||
ret_val = e1000_pool_flash_update_done_i210(hw);
|
||||
if (ret_val == -E1000_ERR_NVM) {
|
||||
DEBUGOUT("Flash update time out\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, flup);
|
||||
|
||||
ret_val = e1000_pool_flash_update_done_i210(hw);
|
||||
if (ret_val == E1000_SUCCESS)
|
||||
DEBUGOUT("Flash update complete\n");
|
||||
else
|
||||
DEBUGOUT("Flash update time out\n");
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
**/
|
||||
s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = -E1000_ERR_NVM;
|
||||
u32 i, reg;
|
||||
|
||||
DEBUGFUNC("e1000_pool_flash_update_done_i210");
|
||||
|
||||
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
|
||||
reg = E1000_READ_REG(hw, E1000_EECD);
|
||||
if (reg & E1000_EECD_FLUDONE_I210) {
|
||||
ret_val = E1000_SUCCESS;
|
||||
break;
|
||||
}
|
||||
usec_delay(5);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Initialize the i210/i211 NVM parameters and function pointers.
|
||||
**/
|
||||
static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
|
||||
DEBUGFUNC("e1000_init_nvm_params_i210");
|
||||
|
||||
ret_val = e1000_init_nvm_params_82575(hw);
|
||||
nvm->ops.acquire = e1000_acquire_nvm_i210;
|
||||
nvm->ops.release = e1000_release_nvm_i210;
|
||||
nvm->ops.valid_led_default = e1000_valid_led_default_i210;
|
||||
if (e1000_get_flash_presence_i210(hw)) {
|
||||
hw->nvm.type = e1000_nvm_flash_hw;
|
||||
nvm->ops.read = e1000_read_nvm_srrd_i210;
|
||||
nvm->ops.write = e1000_write_nvm_srwr_i210;
|
||||
nvm->ops.validate = e1000_validate_nvm_checksum_i210;
|
||||
nvm->ops.update = e1000_update_nvm_checksum_i210;
|
||||
} else {
|
||||
hw->nvm.type = e1000_nvm_invm;
|
||||
nvm->ops.read = e1000_read_invm_i210;
|
||||
nvm->ops.write = e1000_null_write_nvm;
|
||||
nvm->ops.validate = e1000_null_ops_generic;
|
||||
nvm->ops.update = e1000_null_ops_generic;
|
||||
}
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_init_function_pointers_i210 - Init func ptrs.
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Called to initialize all function pointers and parameters.
|
||||
**/
|
||||
void e1000_init_function_pointers_i210(struct e1000_hw *hw)
|
||||
{
|
||||
e1000_init_function_pointers_82575(hw);
|
||||
hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_valid_led_default_i210 - Verify a valid default LED config
|
||||
* @hw: pointer to the HW structure
|
||||
* @data: pointer to the NVM (EEPROM)
|
||||
*
|
||||
* Read the EEPROM for the current default LED configuration. If the
|
||||
* LED configuration is not valid, set to a valid LED configuration.
|
||||
**/
|
||||
static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
|
||||
{
|
||||
s32 ret_val;
|
||||
|
||||
DEBUGFUNC("e1000_valid_led_default_i210");
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
|
||||
switch (hw->phy.media_type) {
|
||||
case e1000_media_type_internal_serdes:
|
||||
*data = ID_LED_DEFAULT_I210_SERDES;
|
||||
break;
|
||||
case e1000_media_type_copper:
|
||||
default:
|
||||
*data = ID_LED_DEFAULT_I210;
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* __e1000_access_xmdio_reg - Read/write XMDIO register
|
||||
* @hw: pointer to the HW structure
|
||||
* @address: XMDIO address to program
|
||||
* @dev_addr: device address to program
|
||||
* @data: pointer to value to read/write from/to the XMDIO address
|
||||
* @read: boolean flag to indicate read or write
|
||||
**/
|
||||
static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address,
|
||||
u8 dev_addr, u16 *data, bool read)
|
||||
{
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
|
||||
DEBUGFUNC("__e1000_access_xmdio_reg");
|
||||
|
||||
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
|
||||
dev_addr);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
if (read)
|
||||
ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
|
||||
else
|
||||
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Recalibrate the device back to 0 */
|
||||
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_xmdio_reg - Read XMDIO register
|
||||
* @hw: pointer to the HW structure
|
||||
* @addr: XMDIO address to program
|
||||
* @dev_addr: device address to program
|
||||
* @data: value to be read from the EMI address
|
||||
**/
|
||||
s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
|
||||
{
|
||||
DEBUGFUNC("e1000_read_xmdio_reg");
|
||||
|
||||
return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_xmdio_reg - Write XMDIO register
|
||||
* @hw: pointer to the HW structure
|
||||
* @addr: XMDIO address to program
|
||||
* @dev_addr: device address to program
|
||||
* @data: value to be written to the XMDIO address
|
||||
**/
|
||||
s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
|
||||
{
|
||||
DEBUGFUNC("e1000_read_xmdio_reg");
|
||||
|
||||
return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false);
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_I210_H_
|
||||
#define _E1000_I210_H_
|
||||
|
||||
bool e1000_get_flash_presence_i210(struct e1000_hw *hw);
|
||||
s32 e1000_update_flash_i210(struct e1000_hw *hw);
|
||||
s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw);
|
||||
s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw);
|
||||
s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
s32 e1000_read_invm_version(struct e1000_hw *hw,
|
||||
struct e1000_fw_version *invm_ver);
|
||||
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
|
||||
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
|
||||
s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
|
||||
u16 *data);
|
||||
s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
|
||||
u16 data);
|
||||
|
||||
#define E1000_STM_OPCODE 0xDB00
|
||||
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
|
||||
|
||||
#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
|
||||
(u8)((invm_dword) & 0x7)
|
||||
#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
|
||||
(u8)(((invm_dword) & 0x0000FE00) >> 9)
|
||||
#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
|
||||
(u16)(((invm_dword) & 0xFFFF0000) >> 16)
|
||||
|
||||
enum E1000_INVM_STRUCTURE_TYPE {
|
||||
E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00,
|
||||
E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01,
|
||||
E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02,
|
||||
E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03,
|
||||
E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04,
|
||||
E1000_INVM_INVALIDATED_STRUCTURE = 0x0F,
|
||||
};
|
||||
|
||||
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
|
||||
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
|
||||
#define E1000_INVM_ULT_BYTES_SIZE 8
|
||||
#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
|
||||
#define E1000_INVM_VER_FIELD_ONE 0x1FF8
|
||||
#define E1000_INVM_VER_FIELD_TWO 0x7FE000
|
||||
#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
|
||||
|
||||
#define E1000_INVM_MAJOR_MASK 0x3F0
|
||||
#define E1000_INVM_MINOR_MASK 0xF
|
||||
#define E1000_INVM_MAJOR_SHIFT 4
|
||||
|
||||
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
|
||||
(ID_LED_DEF1_DEF2 << 4) | \
|
||||
(ID_LED_OFF1_OFF2))
|
||||
#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \
|
||||
(ID_LED_DEF1_DEF2 << 4) | \
|
||||
(ID_LED_OFF1_ON2))
|
||||
|
||||
/* NVM offset defaults for I211 devices */
|
||||
#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
|
||||
#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
|
||||
#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
|
||||
#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -1,65 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_MAC_H_
|
||||
#define _E1000_MAC_H_
|
||||
|
||||
void e1000_init_mac_ops_generic(struct e1000_hw *hw);
|
||||
void e1000_null_mac_generic(struct e1000_hw *hw);
|
||||
s32 e1000_null_ops_generic(struct e1000_hw *hw);
|
||||
s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
|
||||
bool e1000_null_mng_mode(struct e1000_hw *hw);
|
||||
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
|
||||
void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
|
||||
void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
|
||||
s32 e1000_blink_led_generic(struct e1000_hw *hw);
|
||||
s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
|
||||
s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
|
||||
s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
|
||||
s32 e1000_cleanup_led_generic(struct e1000_hw *hw);
|
||||
s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
|
||||
s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw);
|
||||
s32 e1000_force_mac_fc_generic(struct e1000_hw *hw);
|
||||
s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
|
||||
s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
|
||||
void e1000_set_lan_id_single_port(struct e1000_hw *hw);
|
||||
s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
|
||||
s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
|
||||
u16 *duplex);
|
||||
s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
|
||||
u16 *speed, u16 *duplex);
|
||||
s32 e1000_id_led_init_generic(struct e1000_hw *hw);
|
||||
s32 e1000_led_on_generic(struct e1000_hw *hw);
|
||||
s32 e1000_led_off_generic(struct e1000_hw *hw);
|
||||
void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
|
||||
u8 *mc_addr_list, u32 mc_addr_count);
|
||||
s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
|
||||
s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
|
||||
s32 e1000_setup_led_generic(struct e1000_hw *hw);
|
||||
s32 e1000_setup_link_generic(struct e1000_hw *hw);
|
||||
s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
|
||||
s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
|
||||
u32 offset, u8 data);
|
||||
|
||||
u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
|
||||
|
||||
void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
|
||||
void e1000_clear_vfta_generic(struct e1000_hw *hw);
|
||||
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
|
||||
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
|
||||
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
|
||||
void e1000_reset_adaptive_generic(struct e1000_hw *hw);
|
||||
void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
|
||||
void e1000_update_adaptive_generic(struct e1000_hw *hw);
|
||||
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
|
||||
|
||||
#endif
|
@ -1,539 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include "e1000_api.h"
|
||||
|
||||
/**
|
||||
* e1000_calculate_checksum - Calculate checksum for buffer
|
||||
* @buffer: pointer to EEPROM
|
||||
* @length: size of EEPROM to calculate a checksum for
|
||||
*
|
||||
* Calculates the checksum for some buffer on a specified length. The
|
||||
* checksum calculated is returned.
|
||||
**/
|
||||
u8 e1000_calculate_checksum(u8 *buffer, u32 length)
|
||||
{
|
||||
u32 i;
|
||||
u8 sum = 0;
|
||||
|
||||
DEBUGFUNC("e1000_calculate_checksum");
|
||||
|
||||
if (!buffer)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < length; i++)
|
||||
sum += buffer[i];
|
||||
|
||||
return (u8) (0 - sum);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_mng_enable_host_if_generic - Checks host interface is enabled
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
|
||||
*
|
||||
* This function checks whether the HOST IF is enabled for command operation
|
||||
* and also checks whether the previous command is completed. It busy waits
|
||||
* in case of previous command is not completed.
|
||||
**/
|
||||
s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
|
||||
{
|
||||
u32 hicr;
|
||||
u8 i;
|
||||
|
||||
DEBUGFUNC("e1000_mng_enable_host_if_generic");
|
||||
|
||||
if (!hw->mac.arc_subsystem_valid) {
|
||||
DEBUGOUT("ARC subsystem not valid.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
/* Check that the host interface is enabled. */
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
if (!(hicr & E1000_HICR_EN)) {
|
||||
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
/* check the previous command is completed */
|
||||
for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
if (!(hicr & E1000_HICR_C))
|
||||
break;
|
||||
msec_delay_irq(1);
|
||||
}
|
||||
|
||||
if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
|
||||
DEBUGOUT("Previous command timeout failed .\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_mng_mode_generic - Generic check management mode
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Reads the firmware semaphore register and returns true (>0) if
|
||||
* manageability is enabled, else false (0).
|
||||
**/
|
||||
bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
|
||||
{
|
||||
u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
|
||||
|
||||
DEBUGFUNC("e1000_check_mng_mode_generic");
|
||||
|
||||
|
||||
return (fwsm & E1000_FWSM_MODE_MASK) ==
|
||||
(E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Enables packet filtering on transmit packets if manageability is enabled
|
||||
* and host interface is enabled.
|
||||
**/
|
||||
bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
|
||||
u32 *buffer = (u32 *)&hw->mng_cookie;
|
||||
u32 offset;
|
||||
s32 ret_val, hdr_csum, csum;
|
||||
u8 i, len;
|
||||
|
||||
DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
|
||||
|
||||
hw->mac.tx_pkt_filtering = true;
|
||||
|
||||
/* No manageability, no filtering */
|
||||
if (!hw->mac.ops.check_mng_mode(hw)) {
|
||||
hw->mac.tx_pkt_filtering = false;
|
||||
return hw->mac.tx_pkt_filtering;
|
||||
}
|
||||
|
||||
/* If we can't read from the host interface for whatever
|
||||
* reason, disable filtering.
|
||||
*/
|
||||
ret_val = e1000_mng_enable_host_if_generic(hw);
|
||||
if (ret_val != E1000_SUCCESS) {
|
||||
hw->mac.tx_pkt_filtering = false;
|
||||
return hw->mac.tx_pkt_filtering;
|
||||
}
|
||||
|
||||
/* Read in the header. Length and offset are in dwords. */
|
||||
len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
|
||||
offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
|
||||
for (i = 0; i < len; i++)
|
||||
*(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
|
||||
offset + i);
|
||||
hdr_csum = hdr->checksum;
|
||||
hdr->checksum = 0;
|
||||
csum = e1000_calculate_checksum((u8 *)hdr,
|
||||
E1000_MNG_DHCP_COOKIE_LENGTH);
|
||||
/* If either the checksums or signature don't match, then
|
||||
* the cookie area isn't considered valid, in which case we
|
||||
* take the safe route of assuming Tx filtering is enabled.
|
||||
*/
|
||||
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
|
||||
hw->mac.tx_pkt_filtering = true;
|
||||
return hw->mac.tx_pkt_filtering;
|
||||
}
|
||||
|
||||
/* Cookie area is valid, make the final check for filtering. */
|
||||
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
|
||||
hw->mac.tx_pkt_filtering = false;
|
||||
|
||||
return hw->mac.tx_pkt_filtering;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_mng_write_cmd_header_generic - Writes manageability command header
|
||||
* @hw: pointer to the HW structure
|
||||
* @hdr: pointer to the host interface command header
|
||||
*
|
||||
* Writes the command header after does the checksum calculation.
|
||||
**/
|
||||
s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
|
||||
struct e1000_host_mng_command_header *hdr)
|
||||
{
|
||||
u16 i, length = sizeof(struct e1000_host_mng_command_header);
|
||||
|
||||
DEBUGFUNC("e1000_mng_write_cmd_header_generic");
|
||||
|
||||
/* Write the whole command header structure with new checksum. */
|
||||
|
||||
hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
|
||||
|
||||
length >>= 2;
|
||||
/* Write the relevant command block into the ram area. */
|
||||
for (i = 0; i < length; i++) {
|
||||
E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
|
||||
*((u32 *) hdr + i));
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_mng_host_if_write_generic - Write to the manageability host interface
|
||||
* @hw: pointer to the HW structure
|
||||
* @buffer: pointer to the host interface buffer
|
||||
* @length: size of the buffer
|
||||
* @offset: location in the buffer to write to
|
||||
* @sum: sum of the data (not checksum)
|
||||
*
|
||||
* This function writes the buffer content at the offset given on the host if.
|
||||
* It also does alignment considerations to do the writes in most efficient
|
||||
* way. Also fills up the sum of the buffer in *buffer parameter.
|
||||
**/
|
||||
s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
|
||||
u16 length, u16 offset, u8 *sum)
|
||||
{
|
||||
u8 *tmp;
|
||||
u8 *bufptr = buffer;
|
||||
u32 data = 0;
|
||||
u16 remaining, i, j, prev_bytes;
|
||||
|
||||
DEBUGFUNC("e1000_mng_host_if_write_generic");
|
||||
|
||||
/* sum = only sum of the data and it is not checksum */
|
||||
|
||||
if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
|
||||
return -E1000_ERR_PARAM;
|
||||
|
||||
tmp = (u8 *)&data;
|
||||
prev_bytes = offset & 0x3;
|
||||
offset >>= 2;
|
||||
|
||||
if (prev_bytes) {
|
||||
data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
|
||||
for (j = prev_bytes; j < sizeof(u32); j++) {
|
||||
*(tmp + j) = *bufptr++;
|
||||
*sum += *(tmp + j);
|
||||
}
|
||||
E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
|
||||
length -= j - prev_bytes;
|
||||
offset++;
|
||||
}
|
||||
|
||||
remaining = length & 0x3;
|
||||
length -= remaining;
|
||||
|
||||
/* Calculate length in DWORDs */
|
||||
length >>= 2;
|
||||
|
||||
/* The device driver writes the relevant command block into the
|
||||
* ram area.
|
||||
*/
|
||||
for (i = 0; i < length; i++) {
|
||||
for (j = 0; j < sizeof(u32); j++) {
|
||||
*(tmp + j) = *bufptr++;
|
||||
*sum += *(tmp + j);
|
||||
}
|
||||
|
||||
E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
|
||||
data);
|
||||
}
|
||||
if (remaining) {
|
||||
for (j = 0; j < sizeof(u32); j++) {
|
||||
if (j < remaining)
|
||||
*(tmp + j) = *bufptr++;
|
||||
else
|
||||
*(tmp + j) = 0;
|
||||
|
||||
*sum += *(tmp + j);
|
||||
}
|
||||
E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
|
||||
data);
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
|
||||
* @hw: pointer to the HW structure
|
||||
* @buffer: pointer to the host interface
|
||||
* @length: size of the buffer
|
||||
*
|
||||
* Writes the DHCP information to the host interface.
|
||||
**/
|
||||
s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
|
||||
u16 length)
|
||||
{
|
||||
struct e1000_host_mng_command_header hdr;
|
||||
s32 ret_val;
|
||||
u32 hicr;
|
||||
|
||||
DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
|
||||
|
||||
hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
|
||||
hdr.command_length = length;
|
||||
hdr.reserved1 = 0;
|
||||
hdr.reserved2 = 0;
|
||||
hdr.checksum = 0;
|
||||
|
||||
/* Enable the host interface */
|
||||
ret_val = e1000_mng_enable_host_if_generic(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Populate the host interface with the contents of "buffer". */
|
||||
ret_val = e1000_mng_host_if_write_generic(hw, buffer, length,
|
||||
sizeof(hdr), &(hdr.checksum));
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Write the manageability command header */
|
||||
ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
/* Tell the ARC a new command is pending. */
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_enable_mng_pass_thru - Check if management passthrough is needed
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Verifies the hardware needs to leave interface enabled so that frames can
|
||||
* be directed to and from the management interface.
|
||||
**/
|
||||
bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
|
||||
{
|
||||
u32 manc;
|
||||
u32 fwsm, factps;
|
||||
|
||||
DEBUGFUNC("e1000_enable_mng_pass_thru");
|
||||
|
||||
if (!hw->mac.asf_firmware_present)
|
||||
return false;
|
||||
|
||||
manc = E1000_READ_REG(hw, E1000_MANC);
|
||||
|
||||
if (!(manc & E1000_MANC_RCV_TCO_EN))
|
||||
return false;
|
||||
|
||||
if (hw->mac.has_fwsm) {
|
||||
fwsm = E1000_READ_REG(hw, E1000_FWSM);
|
||||
factps = E1000_READ_REG(hw, E1000_FACTPS);
|
||||
|
||||
if (!(factps & E1000_FACTPS_MNGCG) &&
|
||||
((fwsm & E1000_FWSM_MODE_MASK) ==
|
||||
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)))
|
||||
return true;
|
||||
} else if ((manc & E1000_MANC_SMBUS_EN) &&
|
||||
!(manc & E1000_MANC_ASF_EN)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_host_interface_command - Writes buffer to host interface
|
||||
* @hw: pointer to the HW structure
|
||||
* @buffer: contains a command to write
|
||||
* @length: the byte length of the buffer, must be multiple of 4 bytes
|
||||
*
|
||||
* Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS
|
||||
* else returns E1000_ERR_HOST_INTERFACE_COMMAND.
|
||||
**/
|
||||
s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
|
||||
{
|
||||
u32 hicr, i;
|
||||
|
||||
DEBUGFUNC("e1000_host_interface_command");
|
||||
|
||||
if (!(hw->mac.arc_subsystem_valid)) {
|
||||
DEBUGOUT("Hardware doesn't support host interface command.\n");
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
if (!hw->mac.asf_firmware_present) {
|
||||
DEBUGOUT("Firmware is not present.\n");
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
if (length == 0 || length & 0x3 ||
|
||||
length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
|
||||
DEBUGOUT("Buffer length failure.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
/* Check that the host interface is enabled. */
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
if (!(hicr & E1000_HICR_EN)) {
|
||||
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
/* Calculate length in DWORDs */
|
||||
length >>= 2;
|
||||
|
||||
/* The device driver writes the relevant command block
|
||||
* into the ram area.
|
||||
*/
|
||||
for (i = 0; i < length; i++)
|
||||
E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
|
||||
*((u32 *)buffer + i));
|
||||
|
||||
/* Setting this bit tells the ARC that a new command is pending. */
|
||||
E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
|
||||
|
||||
for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
if (!(hicr & E1000_HICR_C))
|
||||
break;
|
||||
msec_delay(1);
|
||||
}
|
||||
|
||||
/* Check command successful completion. */
|
||||
if (i == E1000_HI_COMMAND_TIMEOUT ||
|
||||
(!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
|
||||
DEBUGOUT("Command has failed with no status valid.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
for (i = 0; i < length; i++)
|
||||
*((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
|
||||
E1000_HOST_IF,
|
||||
i);
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
/**
|
||||
* e1000_load_firmware - Writes proxy FW code buffer to host interface
|
||||
* and execute.
|
||||
* @hw: pointer to the HW structure
|
||||
* @buffer: contains a firmware to write
|
||||
* @length: the byte length of the buffer, must be multiple of 4 bytes
|
||||
*
|
||||
* Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled
|
||||
* in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND.
|
||||
**/
|
||||
s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length)
|
||||
{
|
||||
u32 hicr, hibba, fwsm, icr, i;
|
||||
|
||||
DEBUGFUNC("e1000_load_firmware");
|
||||
|
||||
if (hw->mac.type < e1000_i210) {
|
||||
DEBUGOUT("Hardware doesn't support loading FW by the driver\n");
|
||||
return -E1000_ERR_CONFIG;
|
||||
}
|
||||
|
||||
/* Check that the host interface is enabled. */
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
if (!(hicr & E1000_HICR_EN)) {
|
||||
DEBUGOUT("E1000_HOST_EN bit disabled.\n");
|
||||
return -E1000_ERR_CONFIG;
|
||||
}
|
||||
if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) {
|
||||
DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n");
|
||||
return -E1000_ERR_CONFIG;
|
||||
}
|
||||
|
||||
if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) {
|
||||
DEBUGOUT("Buffer length failure.\n");
|
||||
return -E1000_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
/* Clear notification from ROM-FW by reading ICR register */
|
||||
icr = E1000_READ_REG(hw, E1000_ICR_V2);
|
||||
|
||||
/* Reset ROM-FW */
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
hicr |= E1000_HICR_FW_RESET_ENABLE;
|
||||
E1000_WRITE_REG(hw, E1000_HICR, hicr);
|
||||
hicr |= E1000_HICR_FW_RESET;
|
||||
E1000_WRITE_REG(hw, E1000_HICR, hicr);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
|
||||
/* Wait till MAC notifies about its readiness after ROM-FW reset */
|
||||
for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) {
|
||||
icr = E1000_READ_REG(hw, E1000_ICR_V2);
|
||||
if (icr & E1000_ICR_MNG)
|
||||
break;
|
||||
msec_delay(1);
|
||||
}
|
||||
|
||||
/* Check for timeout */
|
||||
if (i == E1000_HI_COMMAND_TIMEOUT) {
|
||||
DEBUGOUT("FW reset failed.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
/* Wait till MAC is ready to accept new FW code */
|
||||
for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
|
||||
fwsm = E1000_READ_REG(hw, E1000_FWSM);
|
||||
if ((fwsm & E1000_FWSM_FW_VALID) &&
|
||||
((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT ==
|
||||
E1000_FWSM_HI_EN_ONLY_MODE))
|
||||
break;
|
||||
msec_delay(1);
|
||||
}
|
||||
|
||||
/* Check for timeout */
|
||||
if (i == E1000_HI_COMMAND_TIMEOUT) {
|
||||
DEBUGOUT("FW reset failed.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
/* Calculate length in DWORDs */
|
||||
length >>= 2;
|
||||
|
||||
/* The device driver writes the relevant FW code block
|
||||
* into the ram area in DWORDs via 1kB ram addressing window.
|
||||
*/
|
||||
for (i = 0; i < length; i++) {
|
||||
if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) {
|
||||
/* Point to correct 1kB ram window */
|
||||
hibba = E1000_HI_FW_BASE_ADDRESS +
|
||||
((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) *
|
||||
(i / E1000_HI_FW_BLOCK_DWORD_LENGTH));
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_HIBBA, hibba);
|
||||
}
|
||||
|
||||
E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
|
||||
i % E1000_HI_FW_BLOCK_DWORD_LENGTH,
|
||||
*((u32 *)buffer + i));
|
||||
}
|
||||
|
||||
/* Setting this bit tells the ARC that a new FW is ready to execute. */
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
|
||||
|
||||
for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
|
||||
hicr = E1000_READ_REG(hw, E1000_HICR);
|
||||
if (!(hicr & E1000_HICR_C))
|
||||
break;
|
||||
msec_delay(1);
|
||||
}
|
||||
|
||||
/* Check for successful FW start. */
|
||||
if (i == E1000_HI_COMMAND_TIMEOUT) {
|
||||
DEBUGOUT("New FW did not start within timeout period.\n");
|
||||
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_MANAGE_H_
|
||||
#define _E1000_MANAGE_H_
|
||||
|
||||
bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
|
||||
bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
|
||||
s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
|
||||
s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
|
||||
u16 length, u16 offset, u8 *sum);
|
||||
s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
|
||||
struct e1000_host_mng_command_header *hdr);
|
||||
s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
|
||||
u8 *buffer, u16 length);
|
||||
bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
|
||||
u8 e1000_calculate_checksum(u8 *buffer, u32 length);
|
||||
s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
|
||||
s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length);
|
||||
|
||||
enum e1000_mng_mode {
|
||||
e1000_mng_mode_none = 0,
|
||||
e1000_mng_mode_asf,
|
||||
e1000_mng_mode_pt,
|
||||
e1000_mng_mode_ipmi,
|
||||
e1000_mng_mode_host_if_only
|
||||
};
|
||||
|
||||
#define E1000_FACTPS_MNGCG 0x20000000
|
||||
|
||||
#define E1000_FWSM_MODE_MASK 0xE
|
||||
#define E1000_FWSM_MODE_SHIFT 1
|
||||
#define E1000_FWSM_FW_VALID 0x00008000
|
||||
#define E1000_FWSM_HI_EN_ONLY_MODE 0x4
|
||||
|
||||
#define E1000_MNG_IAMT_MODE 0x3
|
||||
#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
|
||||
#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
|
||||
#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
|
||||
#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
|
||||
#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
|
||||
#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
|
||||
|
||||
#define E1000_VFTA_ENTRY_SHIFT 5
|
||||
#define E1000_VFTA_ENTRY_MASK 0x7F
|
||||
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
|
||||
|
||||
#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
|
||||
#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
|
||||
#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */
|
||||
#define E1000_HI_FW_BASE_ADDRESS 0x10000
|
||||
#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */
|
||||
#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */
|
||||
#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */
|
||||
#define E1000_HICR_EN 0x01 /* Enable bit - RO */
|
||||
/* Driver sets this bit when done to put command in RAM */
|
||||
#define E1000_HICR_C 0x02
|
||||
#define E1000_HICR_SV 0x04 /* Status Validity */
|
||||
#define E1000_HICR_FW_RESET_ENABLE 0x40
|
||||
#define E1000_HICR_FW_RESET 0x80
|
||||
|
||||
/* Intel(R) Active Management Technology signature */
|
||||
#define E1000_IAMT_SIGNATURE 0x544D4149
|
||||
|
||||
#endif
|
@ -1,510 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include "e1000_mbx.h"
|
||||
|
||||
/**
|
||||
* e1000_null_mbx_check_for_flag - No-op function, return 0
|
||||
* @hw: pointer to the HW structure
|
||||
**/
|
||||
static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw,
|
||||
u16 E1000_UNUSEDARG mbx_id)
|
||||
{
|
||||
DEBUGFUNC("e1000_null_mbx_check_flag");
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_null_mbx_transact - No-op function, return 0
|
||||
* @hw: pointer to the HW structure
|
||||
**/
|
||||
static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw,
|
||||
u32 E1000_UNUSEDARG *msg,
|
||||
u16 E1000_UNUSEDARG size,
|
||||
u16 E1000_UNUSEDARG mbx_id)
|
||||
{
|
||||
DEBUGFUNC("e1000_null_mbx_rw_msg");
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_mbx - Reads a message from the mailbox
|
||||
* @hw: pointer to the HW structure
|
||||
* @msg: The message buffer
|
||||
* @size: Length of buffer
|
||||
* @mbx_id: id of mailbox to read
|
||||
*
|
||||
* returns SUCCESS if it successfully read message from buffer
|
||||
**/
|
||||
s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_read_mbx");
|
||||
|
||||
/* limit read to size of mailbox */
|
||||
if (size > mbx->size)
|
||||
size = mbx->size;
|
||||
|
||||
if (mbx->ops.read)
|
||||
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_mbx - Write a message to the mailbox
|
||||
* @hw: pointer to the HW structure
|
||||
* @msg: The message buffer
|
||||
* @size: Length of buffer
|
||||
* @mbx_id: id of mailbox to write
|
||||
*
|
||||
* returns SUCCESS if it successfully copied message into the buffer
|
||||
**/
|
||||
s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
|
||||
DEBUGFUNC("e1000_write_mbx");
|
||||
|
||||
if (size > mbx->size)
|
||||
ret_val = -E1000_ERR_MBX;
|
||||
|
||||
else if (mbx->ops.write)
|
||||
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_msg - checks to see if someone sent us mail
|
||||
* @hw: pointer to the HW structure
|
||||
* @mbx_id: id of mailbox to check
|
||||
*
|
||||
* returns SUCCESS if the Status bit was found or else ERR_MBX
|
||||
**/
|
||||
s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_check_for_msg");
|
||||
|
||||
if (mbx->ops.check_for_msg)
|
||||
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_ack - checks to see if someone sent us ACK
|
||||
* @hw: pointer to the HW structure
|
||||
* @mbx_id: id of mailbox to check
|
||||
*
|
||||
* returns SUCCESS if the Status bit was found or else ERR_MBX
|
||||
**/
|
||||
s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_check_for_ack");
|
||||
|
||||
if (mbx->ops.check_for_ack)
|
||||
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_rst - checks to see if other side has reset
|
||||
* @hw: pointer to the HW structure
|
||||
* @mbx_id: id of mailbox to check
|
||||
*
|
||||
* returns SUCCESS if the Status bit was found or else ERR_MBX
|
||||
**/
|
||||
s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_check_for_rst");
|
||||
|
||||
if (mbx->ops.check_for_rst)
|
||||
ret_val = mbx->ops.check_for_rst(hw, mbx_id);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_poll_for_msg - Wait for message notification
|
||||
* @hw: pointer to the HW structure
|
||||
* @mbx_id: id of mailbox to write
|
||||
*
|
||||
* returns SUCCESS if it successfully received a message notification
|
||||
**/
|
||||
static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
int countdown = mbx->timeout;
|
||||
|
||||
DEBUGFUNC("e1000_poll_for_msg");
|
||||
|
||||
if (!countdown || !mbx->ops.check_for_msg)
|
||||
goto out;
|
||||
|
||||
while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
|
||||
countdown--;
|
||||
if (!countdown)
|
||||
break;
|
||||
usec_delay(mbx->usec_delay);
|
||||
}
|
||||
|
||||
/* if we failed, all future posted messages fail until reset */
|
||||
if (!countdown)
|
||||
mbx->timeout = 0;
|
||||
out:
|
||||
return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_poll_for_ack - Wait for message acknowledgement
|
||||
* @hw: pointer to the HW structure
|
||||
* @mbx_id: id of mailbox to write
|
||||
*
|
||||
* returns SUCCESS if it successfully received a message acknowledgement
|
||||
**/
|
||||
static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
int countdown = mbx->timeout;
|
||||
|
||||
DEBUGFUNC("e1000_poll_for_ack");
|
||||
|
||||
if (!countdown || !mbx->ops.check_for_ack)
|
||||
goto out;
|
||||
|
||||
while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
|
||||
countdown--;
|
||||
if (!countdown)
|
||||
break;
|
||||
usec_delay(mbx->usec_delay);
|
||||
}
|
||||
|
||||
/* if we failed, all future posted messages fail until reset */
|
||||
if (!countdown)
|
||||
mbx->timeout = 0;
|
||||
out:
|
||||
return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_posted_mbx - Wait for message notification and receive message
|
||||
* @hw: pointer to the HW structure
|
||||
* @msg: The message buffer
|
||||
* @size: Length of buffer
|
||||
* @mbx_id: id of mailbox to write
|
||||
*
|
||||
* returns SUCCESS if it successfully received a message notification and
|
||||
* copied it into the receive buffer.
|
||||
**/
|
||||
s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_read_posted_mbx");
|
||||
|
||||
if (!mbx->ops.read)
|
||||
goto out;
|
||||
|
||||
ret_val = e1000_poll_for_msg(hw, mbx_id);
|
||||
|
||||
/* if ack received read message, otherwise we timed out */
|
||||
if (!ret_val)
|
||||
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
|
||||
* @hw: pointer to the HW structure
|
||||
* @msg: The message buffer
|
||||
* @size: Length of buffer
|
||||
* @mbx_id: id of mailbox to write
|
||||
*
|
||||
* returns SUCCESS if it successfully copied message into the buffer and
|
||||
* received an ack to that message within delay * timeout period
|
||||
**/
|
||||
s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_write_posted_mbx");
|
||||
|
||||
/* exit if either we can't write or there isn't a defined timeout */
|
||||
if (!mbx->ops.write || !mbx->timeout)
|
||||
goto out;
|
||||
|
||||
/* send msg */
|
||||
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
|
||||
|
||||
/* if msg sent wait until we receive an ack */
|
||||
if (!ret_val)
|
||||
ret_val = e1000_poll_for_ack(hw, mbx_id);
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_init_mbx_ops_generic - Initialize mbx function pointers
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Sets the function pointers to no-op functions
|
||||
**/
|
||||
void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
mbx->ops.init_params = e1000_null_ops_generic;
|
||||
mbx->ops.read = e1000_null_mbx_transact;
|
||||
mbx->ops.write = e1000_null_mbx_transact;
|
||||
mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
|
||||
mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
|
||||
mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
|
||||
mbx->ops.read_posted = e1000_read_posted_mbx;
|
||||
mbx->ops.write_posted = e1000_write_posted_mbx;
|
||||
}
|
||||
|
||||
static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
|
||||
{
|
||||
u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
if (mbvficr & mask) {
|
||||
ret_val = E1000_SUCCESS;
|
||||
E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_msg_pf - checks to see if the VF has sent mail
|
||||
* @hw: pointer to the HW structure
|
||||
* @vf_number: the VF index
|
||||
*
|
||||
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
|
||||
**/
|
||||
static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
|
||||
{
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_check_for_msg_pf");
|
||||
|
||||
if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
|
||||
ret_val = E1000_SUCCESS;
|
||||
hw->mbx.stats.reqs++;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_ack_pf - checks to see if the VF has ACKed
|
||||
* @hw: pointer to the HW structure
|
||||
* @vf_number: the VF index
|
||||
*
|
||||
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
|
||||
**/
|
||||
static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
|
||||
{
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_check_for_ack_pf");
|
||||
|
||||
if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
|
||||
ret_val = E1000_SUCCESS;
|
||||
hw->mbx.stats.acks++;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_rst_pf - checks to see if the VF has reset
|
||||
* @hw: pointer to the HW structure
|
||||
* @vf_number: the VF index
|
||||
*
|
||||
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
|
||||
**/
|
||||
static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
|
||||
{
|
||||
u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
|
||||
DEBUGFUNC("e1000_check_for_rst_pf");
|
||||
|
||||
if (vflre & (1 << vf_number)) {
|
||||
ret_val = E1000_SUCCESS;
|
||||
E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
|
||||
hw->mbx.stats.rsts++;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_obtain_mbx_lock_pf - obtain mailbox lock
|
||||
* @hw: pointer to the HW structure
|
||||
* @vf_number: the VF index
|
||||
*
|
||||
* return SUCCESS if we obtained the mailbox lock
|
||||
**/
|
||||
static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
|
||||
{
|
||||
s32 ret_val = -E1000_ERR_MBX;
|
||||
u32 p2v_mailbox;
|
||||
|
||||
DEBUGFUNC("e1000_obtain_mbx_lock_pf");
|
||||
|
||||
/* Take ownership of the buffer */
|
||||
E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
|
||||
|
||||
/* reserve mailbox for vf use */
|
||||
p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
|
||||
if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
|
||||
ret_val = E1000_SUCCESS;
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_mbx_pf - Places a message in the mailbox
|
||||
* @hw: pointer to the HW structure
|
||||
* @msg: The message buffer
|
||||
* @size: Length of buffer
|
||||
* @vf_number: the VF index
|
||||
*
|
||||
* returns SUCCESS if it successfully copied message into the buffer
|
||||
**/
|
||||
static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
|
||||
u16 vf_number)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 i;
|
||||
|
||||
DEBUGFUNC("e1000_write_mbx_pf");
|
||||
|
||||
/* lock the mailbox to prevent pf/vf race condition */
|
||||
ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
|
||||
if (ret_val)
|
||||
goto out_no_write;
|
||||
|
||||
/* flush msg and acks as we are overwriting the message buffer */
|
||||
e1000_check_for_msg_pf(hw, vf_number);
|
||||
e1000_check_for_ack_pf(hw, vf_number);
|
||||
|
||||
/* copy the caller specified message to the mailbox memory buffer */
|
||||
for (i = 0; i < size; i++)
|
||||
E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
|
||||
|
||||
/* Interrupt VF to tell it a message has been sent and release buffer*/
|
||||
E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
|
||||
|
||||
/* update stats */
|
||||
hw->mbx.stats.msgs_tx++;
|
||||
|
||||
out_no_write:
|
||||
return ret_val;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_mbx_pf - Read a message from the mailbox
|
||||
* @hw: pointer to the HW structure
|
||||
* @msg: The message buffer
|
||||
* @size: Length of buffer
|
||||
* @vf_number: the VF index
|
||||
*
|
||||
* This function copies a message from the mailbox buffer to the caller's
|
||||
* memory buffer. The presumption is that the caller knows that there was
|
||||
* a message due to a VF request so no polling for message is needed.
|
||||
**/
|
||||
static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
|
||||
u16 vf_number)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 i;
|
||||
|
||||
DEBUGFUNC("e1000_read_mbx_pf");
|
||||
|
||||
/* lock the mailbox to prevent pf/vf race condition */
|
||||
ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
|
||||
if (ret_val)
|
||||
goto out_no_read;
|
||||
|
||||
/* copy the message to the mailbox memory buffer */
|
||||
for (i = 0; i < size; i++)
|
||||
msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
|
||||
|
||||
/* Acknowledge the message and release buffer */
|
||||
E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
|
||||
|
||||
/* update stats */
|
||||
hw->mbx.stats.msgs_rx++;
|
||||
|
||||
out_no_read:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_init_mbx_params_pf - set initial values for pf mailbox
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Initializes the hw->mbx struct to correct values for pf mailbox
|
||||
*/
|
||||
s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_mbx_info *mbx = &hw->mbx;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82576:
|
||||
case e1000_i350:
|
||||
case e1000_i354:
|
||||
mbx->timeout = 0;
|
||||
mbx->usec_delay = 0;
|
||||
|
||||
mbx->size = E1000_VFMAILBOX_SIZE;
|
||||
|
||||
mbx->ops.read = e1000_read_mbx_pf;
|
||||
mbx->ops.write = e1000_write_mbx_pf;
|
||||
mbx->ops.read_posted = e1000_read_posted_mbx;
|
||||
mbx->ops.write_posted = e1000_write_posted_mbx;
|
||||
mbx->ops.check_for_msg = e1000_check_for_msg_pf;
|
||||
mbx->ops.check_for_ack = e1000_check_for_ack_pf;
|
||||
mbx->ops.check_for_rst = e1000_check_for_rst_pf;
|
||||
|
||||
mbx->stats.msgs_tx = 0;
|
||||
mbx->stats.msgs_rx = 0;
|
||||
mbx->stats.reqs = 0;
|
||||
mbx->stats.acks = 0;
|
||||
mbx->stats.rsts = 0;
|
||||
default:
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_MBX_H_
|
||||
#define _E1000_MBX_H_
|
||||
|
||||
#include "e1000_api.h"
|
||||
|
||||
#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
|
||||
#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
|
||||
#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
|
||||
#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
|
||||
#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
|
||||
|
||||
#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
|
||||
#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
|
||||
#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
|
||||
#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
|
||||
|
||||
#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
|
||||
|
||||
/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
|
||||
* PF. The reverse is true if it is E1000_PF_*.
|
||||
* Message ACK's are the value or'd with 0xF0000000
|
||||
*/
|
||||
/* Msgs below or'd with this are the ACK */
|
||||
#define E1000_VT_MSGTYPE_ACK 0x80000000
|
||||
/* Msgs below or'd with this are the NACK */
|
||||
#define E1000_VT_MSGTYPE_NACK 0x40000000
|
||||
/* Indicates that VF is still clear to send requests */
|
||||
#define E1000_VT_MSGTYPE_CTS 0x20000000
|
||||
#define E1000_VT_MSGINFO_SHIFT 16
|
||||
/* bits 23:16 are used for extra info for certain messages */
|
||||
#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
|
||||
|
||||
#define E1000_VF_RESET 0x01 /* VF requests reset */
|
||||
#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
|
||||
#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
|
||||
#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
|
||||
#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT)
|
||||
#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
|
||||
#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT)
|
||||
#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */
|
||||
#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/
|
||||
#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT)
|
||||
#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
|
||||
|
||||
#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
|
||||
|
||||
#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
|
||||
#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
|
||||
|
||||
s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
|
||||
s32 e1000_check_for_msg(struct e1000_hw *, u16);
|
||||
s32 e1000_check_for_ack(struct e1000_hw *, u16);
|
||||
s32 e1000_check_for_rst(struct e1000_hw *, u16);
|
||||
void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
|
||||
s32 e1000_init_mbx_params_pf(struct e1000_hw *);
|
||||
|
||||
#endif /* _E1000_MBX_H_ */
|
@ -1,950 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include "e1000_api.h"
|
||||
|
||||
static void e1000_reload_nvm_generic(struct e1000_hw *hw);
|
||||
|
||||
/**
|
||||
* e1000_init_nvm_ops_generic - Initialize NVM function pointers
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Setups up the function pointers to no-op functions
|
||||
**/
|
||||
void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
DEBUGFUNC("e1000_init_nvm_ops_generic");
|
||||
|
||||
/* Initialize function pointers */
|
||||
nvm->ops.init_params = e1000_null_ops_generic;
|
||||
nvm->ops.acquire = e1000_null_ops_generic;
|
||||
nvm->ops.read = e1000_null_read_nvm;
|
||||
nvm->ops.release = e1000_null_nvm_generic;
|
||||
nvm->ops.reload = e1000_reload_nvm_generic;
|
||||
nvm->ops.update = e1000_null_ops_generic;
|
||||
nvm->ops.valid_led_default = e1000_null_led_default;
|
||||
nvm->ops.validate = e1000_null_ops_generic;
|
||||
nvm->ops.write = e1000_null_write_nvm;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_null_nvm_read - No-op function, return 0
|
||||
* @hw: pointer to the HW structure
|
||||
**/
|
||||
s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
|
||||
u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
|
||||
u16 E1000_UNUSEDARG *c)
|
||||
{
|
||||
DEBUGFUNC("e1000_null_read_nvm");
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_null_nvm_generic - No-op function, return void
|
||||
* @hw: pointer to the HW structure
|
||||
**/
|
||||
void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw)
|
||||
{
|
||||
DEBUGFUNC("e1000_null_nvm_generic");
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_null_led_default - No-op function, return 0
|
||||
* @hw: pointer to the HW structure
|
||||
**/
|
||||
s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw,
|
||||
u16 E1000_UNUSEDARG *data)
|
||||
{
|
||||
DEBUGFUNC("e1000_null_led_default");
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_null_write_nvm - No-op function, return 0
|
||||
* @hw: pointer to the HW structure
|
||||
**/
|
||||
s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw,
|
||||
u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b,
|
||||
u16 E1000_UNUSEDARG *c)
|
||||
{
|
||||
DEBUGFUNC("e1000_null_write_nvm");
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_raise_eec_clk - Raise EEPROM clock
|
||||
* @hw: pointer to the HW structure
|
||||
* @eecd: pointer to the EEPROM
|
||||
*
|
||||
* Enable/Raise the EEPROM clock bit.
|
||||
**/
|
||||
static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
|
||||
{
|
||||
*eecd = *eecd | E1000_EECD_SK;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, *eecd);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
usec_delay(hw->nvm.delay_usec);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_lower_eec_clk - Lower EEPROM clock
|
||||
* @hw: pointer to the HW structure
|
||||
* @eecd: pointer to the EEPROM
|
||||
*
|
||||
* Clear/Lower the EEPROM clock bit.
|
||||
**/
|
||||
static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
|
||||
{
|
||||
*eecd = *eecd & ~E1000_EECD_SK;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, *eecd);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
usec_delay(hw->nvm.delay_usec);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
|
||||
* @hw: pointer to the HW structure
|
||||
* @data: data to send to the EEPROM
|
||||
* @count: number of bits to shift out
|
||||
*
|
||||
* We need to shift 'count' bits out to the EEPROM. So, the value in the
|
||||
* "data" parameter will be shifted out to the EEPROM one bit at a time.
|
||||
* In order to do this, "data" must be broken down into bits.
|
||||
**/
|
||||
static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
u32 eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
u32 mask;
|
||||
|
||||
DEBUGFUNC("e1000_shift_out_eec_bits");
|
||||
|
||||
mask = 0x01 << (count - 1);
|
||||
if (nvm->type == e1000_nvm_eeprom_spi)
|
||||
eecd |= E1000_EECD_DO;
|
||||
|
||||
do {
|
||||
eecd &= ~E1000_EECD_DI;
|
||||
|
||||
if (data & mask)
|
||||
eecd |= E1000_EECD_DI;
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
|
||||
usec_delay(nvm->delay_usec);
|
||||
|
||||
e1000_raise_eec_clk(hw, &eecd);
|
||||
e1000_lower_eec_clk(hw, &eecd);
|
||||
|
||||
mask >>= 1;
|
||||
} while (mask);
|
||||
|
||||
eecd &= ~E1000_EECD_DI;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
|
||||
* @hw: pointer to the HW structure
|
||||
* @count: number of bits to shift in
|
||||
*
|
||||
* In order to read a register from the EEPROM, we need to shift 'count' bits
|
||||
* in from the EEPROM. Bits are "shifted in" by raising the clock input to
|
||||
* the EEPROM (setting the SK bit), and then reading the value of the data out
|
||||
* "DO" bit. During this "shifting in" process the data in "DI" bit should
|
||||
* always be clear.
|
||||
**/
|
||||
static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
|
||||
{
|
||||
u32 eecd;
|
||||
u32 i;
|
||||
u16 data;
|
||||
|
||||
DEBUGFUNC("e1000_shift_in_eec_bits");
|
||||
|
||||
eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
|
||||
eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
|
||||
data = 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
data <<= 1;
|
||||
e1000_raise_eec_clk(hw, &eecd);
|
||||
|
||||
eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
|
||||
eecd &= ~E1000_EECD_DI;
|
||||
if (eecd & E1000_EECD_DO)
|
||||
data |= 1;
|
||||
|
||||
e1000_lower_eec_clk(hw, &eecd);
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
|
||||
* @hw: pointer to the HW structure
|
||||
* @ee_reg: EEPROM flag for polling
|
||||
*
|
||||
* Polls the EEPROM status bit for either read or write completion based
|
||||
* upon the value of 'ee_reg'.
|
||||
**/
|
||||
s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
|
||||
{
|
||||
u32 attempts = 100000;
|
||||
u32 i, reg = 0;
|
||||
|
||||
DEBUGFUNC("e1000_poll_eerd_eewr_done");
|
||||
|
||||
for (i = 0; i < attempts; i++) {
|
||||
if (ee_reg == E1000_NVM_POLL_READ)
|
||||
reg = E1000_READ_REG(hw, E1000_EERD);
|
||||
else
|
||||
reg = E1000_READ_REG(hw, E1000_EEWR);
|
||||
|
||||
if (reg & E1000_NVM_RW_REG_DONE)
|
||||
return E1000_SUCCESS;
|
||||
|
||||
usec_delay(5);
|
||||
}
|
||||
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_acquire_nvm_generic - Generic request for access to EEPROM
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
|
||||
* Return successful if access grant bit set, else clear the request for
|
||||
* EEPROM access and return -E1000_ERR_NVM (-1).
|
||||
**/
|
||||
s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
|
||||
{
|
||||
u32 eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
|
||||
|
||||
DEBUGFUNC("e1000_acquire_nvm_generic");
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
|
||||
eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
|
||||
while (timeout) {
|
||||
if (eecd & E1000_EECD_GNT)
|
||||
break;
|
||||
usec_delay(5);
|
||||
eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
timeout--;
|
||||
}
|
||||
|
||||
if (!timeout) {
|
||||
eecd &= ~E1000_EECD_REQ;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd);
|
||||
DEBUGOUT("Could not acquire NVM grant\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_standby_nvm - Return EEPROM to standby state
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Return the EEPROM to a standby state.
|
||||
**/
|
||||
static void e1000_standby_nvm(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
u32 eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
|
||||
DEBUGFUNC("e1000_standby_nvm");
|
||||
|
||||
if (nvm->type == e1000_nvm_eeprom_spi) {
|
||||
/* Toggle CS to flush commands */
|
||||
eecd |= E1000_EECD_CS;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
usec_delay(nvm->delay_usec);
|
||||
eecd &= ~E1000_EECD_CS;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
usec_delay(nvm->delay_usec);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_stop_nvm - Terminate EEPROM command
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Terminates the current command by inverting the EEPROM's chip select pin.
|
||||
**/
|
||||
static void e1000_stop_nvm(struct e1000_hw *hw)
|
||||
{
|
||||
u32 eecd;
|
||||
|
||||
DEBUGFUNC("e1000_stop_nvm");
|
||||
|
||||
eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
|
||||
/* Pull CS high */
|
||||
eecd |= E1000_EECD_CS;
|
||||
e1000_lower_eec_clk(hw, &eecd);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_release_nvm_generic - Release exclusive access to EEPROM
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
|
||||
**/
|
||||
void e1000_release_nvm_generic(struct e1000_hw *hw)
|
||||
{
|
||||
u32 eecd;
|
||||
|
||||
DEBUGFUNC("e1000_release_nvm_generic");
|
||||
|
||||
e1000_stop_nvm(hw);
|
||||
|
||||
eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
eecd &= ~E1000_EECD_REQ;
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Setups the EEPROM for reading and writing.
|
||||
**/
|
||||
static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
u32 eecd = E1000_READ_REG(hw, E1000_EECD);
|
||||
u8 spi_stat_reg;
|
||||
|
||||
DEBUGFUNC("e1000_ready_nvm_eeprom");
|
||||
|
||||
if (nvm->type == e1000_nvm_eeprom_spi) {
|
||||
u16 timeout = NVM_MAX_RETRY_SPI;
|
||||
|
||||
/* Clear SK and CS */
|
||||
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
|
||||
E1000_WRITE_REG(hw, E1000_EECD, eecd);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
usec_delay(1);
|
||||
|
||||
/* Read "Status Register" repeatedly until the LSB is cleared.
|
||||
* The EEPROM will signal that the command has been completed
|
||||
* by clearing bit 0 of the internal status register. If it's
|
||||
* not cleared within 'timeout', then error out.
|
||||
*/
|
||||
while (timeout) {
|
||||
e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
|
||||
hw->nvm.opcode_bits);
|
||||
spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
|
||||
if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
|
||||
break;
|
||||
|
||||
usec_delay(5);
|
||||
e1000_standby_nvm(hw);
|
||||
timeout--;
|
||||
}
|
||||
|
||||
if (!timeout) {
|
||||
DEBUGOUT("SPI NVM Status error\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_nvm_spi - Read EEPROM's using SPI
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset of word in the EEPROM to read
|
||||
* @words: number of words to read
|
||||
* @data: word read from the EEPROM
|
||||
*
|
||||
* Reads a 16 bit word from the EEPROM.
|
||||
**/
|
||||
s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
u32 i = 0;
|
||||
s32 ret_val;
|
||||
u16 word_in;
|
||||
u8 read_opcode = NVM_READ_OPCODE_SPI;
|
||||
|
||||
DEBUGFUNC("e1000_read_nvm_spi");
|
||||
|
||||
/* A check for invalid values: offset too large, too many words,
|
||||
* and not enough words.
|
||||
*/
|
||||
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
||||
(words == 0)) {
|
||||
DEBUGOUT("nvm parameter(s) out of bounds\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
ret_val = nvm->ops.acquire(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val = e1000_ready_nvm_eeprom(hw);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
e1000_standby_nvm(hw);
|
||||
|
||||
if ((nvm->address_bits == 8) && (offset >= 128))
|
||||
read_opcode |= NVM_A8_OPCODE_SPI;
|
||||
|
||||
/* Send the READ command (opcode + addr) */
|
||||
e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
|
||||
e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
|
||||
|
||||
/* Read the data. SPI NVMs increment the address with each byte
|
||||
* read and will roll over if reading beyond the end. This allows
|
||||
* us to read the whole NVM from any offset
|
||||
*/
|
||||
for (i = 0; i < words; i++) {
|
||||
word_in = e1000_shift_in_eec_bits(hw, 16);
|
||||
data[i] = (word_in >> 8) | (word_in << 8);
|
||||
}
|
||||
|
||||
release:
|
||||
nvm->ops.release(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_nvm_eerd - Reads EEPROM using EERD register
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset of word in the EEPROM to read
|
||||
* @words: number of words to read
|
||||
* @data: word read from the EEPROM
|
||||
*
|
||||
* Reads a 16 bit word from the EEPROM using the EERD register.
|
||||
**/
|
||||
s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
u32 i, eerd = 0;
|
||||
s32 ret_val = E1000_SUCCESS;
|
||||
|
||||
DEBUGFUNC("e1000_read_nvm_eerd");
|
||||
|
||||
/* A check for invalid values: offset too large, too many words,
|
||||
* too many words for the offset, and not enough words.
|
||||
*/
|
||||
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
||||
(words == 0)) {
|
||||
DEBUGOUT("nvm parameter(s) out of bounds\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
for (i = 0; i < words; i++) {
|
||||
eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
|
||||
E1000_NVM_RW_REG_START;
|
||||
|
||||
E1000_WRITE_REG(hw, E1000_EERD, eerd);
|
||||
ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
|
||||
if (ret_val)
|
||||
break;
|
||||
|
||||
data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
|
||||
E1000_NVM_RW_REG_DATA);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_write_nvm_spi - Write to EEPROM using SPI
|
||||
* @hw: pointer to the HW structure
|
||||
* @offset: offset within the EEPROM to be written to
|
||||
* @words: number of words to write
|
||||
* @data: 16 bit word(s) to be written to the EEPROM
|
||||
*
|
||||
* Writes data to EEPROM at offset using SPI interface.
|
||||
*
|
||||
* If e1000_update_nvm_checksum is not called after this function , the
|
||||
* EEPROM will most likely contain an invalid checksum.
|
||||
**/
|
||||
s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||
{
|
||||
struct e1000_nvm_info *nvm = &hw->nvm;
|
||||
s32 ret_val = -E1000_ERR_NVM;
|
||||
u16 widx = 0;
|
||||
|
||||
DEBUGFUNC("e1000_write_nvm_spi");
|
||||
|
||||
/* A check for invalid values: offset too large, too many words,
|
||||
* and not enough words.
|
||||
*/
|
||||
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
||||
(words == 0)) {
|
||||
DEBUGOUT("nvm parameter(s) out of bounds\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
while (widx < words) {
|
||||
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
|
||||
|
||||
ret_val = nvm->ops.acquire(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val = e1000_ready_nvm_eeprom(hw);
|
||||
if (ret_val) {
|
||||
nvm->ops.release(hw);
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
e1000_standby_nvm(hw);
|
||||
|
||||
/* Send the WRITE ENABLE command (8 bit opcode) */
|
||||
e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
|
||||
nvm->opcode_bits);
|
||||
|
||||
e1000_standby_nvm(hw);
|
||||
|
||||
/* Some SPI eeproms use the 8th address bit embedded in the
|
||||
* opcode
|
||||
*/
|
||||
if ((nvm->address_bits == 8) && (offset >= 128))
|
||||
write_opcode |= NVM_A8_OPCODE_SPI;
|
||||
|
||||
/* Send the Write command (8-bit opcode + addr) */
|
||||
e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
|
||||
e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
|
||||
nvm->address_bits);
|
||||
|
||||
/* Loop to allow for up to whole page write of eeprom */
|
||||
while (widx < words) {
|
||||
u16 word_out = data[widx];
|
||||
word_out = (word_out >> 8) | (word_out << 8);
|
||||
e1000_shift_out_eec_bits(hw, word_out, 16);
|
||||
widx++;
|
||||
|
||||
if ((((offset + widx) * 2) % nvm->page_size) == 0) {
|
||||
e1000_standby_nvm(hw);
|
||||
break;
|
||||
}
|
||||
}
|
||||
msec_delay(10);
|
||||
nvm->ops.release(hw);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_pba_string_generic - Read device part number
|
||||
* @hw: pointer to the HW structure
|
||||
* @pba_num: pointer to device part number
|
||||
* @pba_num_size: size of part number buffer
|
||||
*
|
||||
* Reads the product board assembly (PBA) number from the EEPROM and stores
|
||||
* the value in pba_num.
|
||||
**/
|
||||
s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
|
||||
u32 pba_num_size)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 nvm_data;
|
||||
u16 pba_ptr;
|
||||
u16 offset;
|
||||
u16 length;
|
||||
|
||||
DEBUGFUNC("e1000_read_pba_string_generic");
|
||||
|
||||
if (pba_num == NULL) {
|
||||
DEBUGOUT("PBA string buffer was null\n");
|
||||
return -E1000_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* if nvm_data is not ptr guard the PBA must be in legacy format which
|
||||
* means pba_ptr is actually our second data word for the PBA number
|
||||
* and we can decode it into an ascii string
|
||||
*/
|
||||
if (nvm_data != NVM_PBA_PTR_GUARD) {
|
||||
DEBUGOUT("NVM PBA number is not stored as string\n");
|
||||
|
||||
/* make sure callers buffer is big enough to store the PBA */
|
||||
if (pba_num_size < E1000_PBANUM_LENGTH) {
|
||||
DEBUGOUT("PBA string buffer too small\n");
|
||||
return E1000_ERR_NO_SPACE;
|
||||
}
|
||||
|
||||
/* extract hex string from data and pba_ptr */
|
||||
pba_num[0] = (nvm_data >> 12) & 0xF;
|
||||
pba_num[1] = (nvm_data >> 8) & 0xF;
|
||||
pba_num[2] = (nvm_data >> 4) & 0xF;
|
||||
pba_num[3] = nvm_data & 0xF;
|
||||
pba_num[4] = (pba_ptr >> 12) & 0xF;
|
||||
pba_num[5] = (pba_ptr >> 8) & 0xF;
|
||||
pba_num[6] = '-';
|
||||
pba_num[7] = 0;
|
||||
pba_num[8] = (pba_ptr >> 4) & 0xF;
|
||||
pba_num[9] = pba_ptr & 0xF;
|
||||
|
||||
/* put a null character on the end of our string */
|
||||
pba_num[10] = '\0';
|
||||
|
||||
/* switch all the data but the '-' to hex char */
|
||||
for (offset = 0; offset < 10; offset++) {
|
||||
if (pba_num[offset] < 0xA)
|
||||
pba_num[offset] += '0';
|
||||
else if (pba_num[offset] < 0x10)
|
||||
pba_num[offset] += 'A' - 0xA;
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
if (length == 0xFFFF || length == 0) {
|
||||
DEBUGOUT("NVM PBA number section invalid length\n");
|
||||
return -E1000_ERR_NVM_PBA_SECTION;
|
||||
}
|
||||
/* check if pba_num buffer is big enough */
|
||||
if (pba_num_size < (((u32)length * 2) - 1)) {
|
||||
DEBUGOUT("PBA string buffer too small\n");
|
||||
return -E1000_ERR_NO_SPACE;
|
||||
}
|
||||
|
||||
/* trim pba length from start of string */
|
||||
pba_ptr++;
|
||||
length--;
|
||||
|
||||
for (offset = 0; offset < length; offset++) {
|
||||
ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
pba_num[offset * 2] = (u8)(nvm_data >> 8);
|
||||
pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
|
||||
}
|
||||
pba_num[offset * 2] = '\0';
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_read_pba_length_generic - Read device part number length
|
||||
* @hw: pointer to the HW structure
|
||||
* @pba_num_size: size of part number buffer
|
||||
*
|
||||
* Reads the product board assembly (PBA) number length from the EEPROM and
|
||||
* stores the value in pba_num_size.
|
||||
**/
|
||||
s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 nvm_data;
|
||||
u16 pba_ptr;
|
||||
u16 length;
|
||||
|
||||
DEBUGFUNC("e1000_read_pba_length_generic");
|
||||
|
||||
if (pba_num_size == NULL) {
|
||||
DEBUGOUT("PBA buffer size was null\n");
|
||||
return -E1000_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* if data is not ptr guard the PBA must be in legacy format */
|
||||
if (nvm_data != NVM_PBA_PTR_GUARD) {
|
||||
*pba_num_size = E1000_PBANUM_LENGTH;
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
if (length == 0xFFFF || length == 0) {
|
||||
DEBUGOUT("NVM PBA number section invalid length\n");
|
||||
return -E1000_ERR_NVM_PBA_SECTION;
|
||||
}
|
||||
|
||||
/* Convert from length in u16 values to u8 chars, add 1 for NULL,
|
||||
* and subtract 2 because length field is included in length.
|
||||
*/
|
||||
*pba_num_size = ((u32)length * 2) - 1;
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* e1000_read_mac_addr_generic - Read device MAC address
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Reads the device MAC address from the EEPROM and stores the value.
|
||||
* Since devices with two ports use the same EEPROM, we increment the
|
||||
* last bit in the MAC address for the second port.
|
||||
**/
|
||||
s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
|
||||
{
|
||||
u32 rar_high;
|
||||
u32 rar_low;
|
||||
u16 i;
|
||||
|
||||
rar_high = E1000_READ_REG(hw, E1000_RAH(0));
|
||||
rar_low = E1000_READ_REG(hw, E1000_RAL(0));
|
||||
|
||||
for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
|
||||
hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
|
||||
|
||||
for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
|
||||
hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
|
||||
|
||||
for (i = 0; i < ETH_ADDR_LEN; i++)
|
||||
hw->mac.addr[i] = hw->mac.perm_addr[i];
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
|
||||
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
|
||||
**/
|
||||
s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 checksum = 0;
|
||||
u16 i, nvm_data;
|
||||
|
||||
DEBUGFUNC("e1000_validate_nvm_checksum_generic");
|
||||
|
||||
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
|
||||
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error\n");
|
||||
return ret_val;
|
||||
}
|
||||
checksum += nvm_data;
|
||||
}
|
||||
|
||||
if (checksum != (u16) NVM_SUM) {
|
||||
DEBUGOUT("NVM Checksum Invalid\n");
|
||||
return -E1000_ERR_NVM;
|
||||
}
|
||||
|
||||
return E1000_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_update_nvm_checksum_generic - Update EEPROM checksum
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
|
||||
* up to the checksum. Then calculates the EEPROM checksum and writes the
|
||||
* value to the EEPROM.
|
||||
**/
|
||||
s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val;
|
||||
u16 checksum = 0;
|
||||
u16 i, nvm_data;
|
||||
|
||||
DEBUGFUNC("e1000_update_nvm_checksum");
|
||||
|
||||
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
|
||||
ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
|
||||
if (ret_val) {
|
||||
DEBUGOUT("NVM Read Error while updating checksum.\n");
|
||||
return ret_val;
|
||||
}
|
||||
checksum += nvm_data;
|
||||
}
|
||||
checksum = (u16) NVM_SUM - checksum;
|
||||
ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
|
||||
if (ret_val)
|
||||
DEBUGOUT("NVM Write Error while updating checksum.\n");
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_reload_nvm_generic - Reloads EEPROM
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
|
||||
* extended control register.
|
||||
**/
|
||||
static void e1000_reload_nvm_generic(struct e1000_hw *hw)
|
||||
{
|
||||
u32 ctrl_ext;
|
||||
|
||||
DEBUGFUNC("e1000_reload_nvm_generic");
|
||||
|
||||
usec_delay(10);
|
||||
ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
|
||||
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_get_fw_version - Get firmware version information
|
||||
* @hw: pointer to the HW structure
|
||||
* @fw_vers: pointer to output version structure
|
||||
*
|
||||
* unsupported/not present features return 0 in version structure
|
||||
**/
|
||||
void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
|
||||
{
|
||||
u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
|
||||
u8 q, hval, rem, result;
|
||||
u16 comb_verh, comb_verl, comb_offset;
|
||||
|
||||
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
|
||||
|
||||
/* basic eeprom version numbers, bits used vary by part and by tool
|
||||
* used to create the nvm images */
|
||||
/* Check which data format we have */
|
||||
hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
|
||||
switch (hw->mac.type) {
|
||||
case e1000_i211:
|
||||
e1000_read_invm_version(hw, fw_vers);
|
||||
return;
|
||||
case e1000_82575:
|
||||
case e1000_82576:
|
||||
case e1000_82580:
|
||||
/* Use this format, unless EETRACK ID exists,
|
||||
* then use alternate format
|
||||
*/
|
||||
if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
|
||||
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
|
||||
fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
|
||||
>> NVM_MAJOR_SHIFT;
|
||||
fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
|
||||
>> NVM_MINOR_SHIFT;
|
||||
fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
|
||||
goto etrack_id;
|
||||
}
|
||||
break;
|
||||
case e1000_i210:
|
||||
if (!(e1000_get_flash_presence_i210(hw))) {
|
||||
e1000_read_invm_version(hw, fw_vers);
|
||||
return;
|
||||
}
|
||||
/* fall through */
|
||||
case e1000_i350:
|
||||
case e1000_i354:
|
||||
/* find combo image version */
|
||||
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
|
||||
if ((comb_offset != 0x0) &&
|
||||
(comb_offset != NVM_VER_INVALID)) {
|
||||
|
||||
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
|
||||
+ 1), 1, &comb_verh);
|
||||
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
|
||||
1, &comb_verl);
|
||||
|
||||
/* get Option Rom version if it exists and is valid */
|
||||
if ((comb_verh && comb_verl) &&
|
||||
((comb_verh != NVM_VER_INVALID) &&
|
||||
(comb_verl != NVM_VER_INVALID))) {
|
||||
|
||||
fw_vers->or_valid = true;
|
||||
fw_vers->or_major =
|
||||
comb_verl >> NVM_COMB_VER_SHFT;
|
||||
fw_vers->or_build =
|
||||
(comb_verl << NVM_COMB_VER_SHFT)
|
||||
| (comb_verh >> NVM_COMB_VER_SHFT);
|
||||
fw_vers->or_patch =
|
||||
comb_verh & NVM_COMB_VER_MASK;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
|
||||
fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
|
||||
>> NVM_MAJOR_SHIFT;
|
||||
|
||||
/* check for old style version format in newer images*/
|
||||
if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
|
||||
eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
|
||||
} else {
|
||||
eeprom_verl = (fw_version & NVM_MINOR_MASK)
|
||||
>> NVM_MINOR_SHIFT;
|
||||
}
|
||||
/* Convert minor value to hex before assigning to output struct
|
||||
* Val to be converted will not be higher than 99, per tool output
|
||||
*/
|
||||
q = eeprom_verl / NVM_HEX_CONV;
|
||||
hval = q * NVM_HEX_TENS;
|
||||
rem = eeprom_verl % NVM_HEX_CONV;
|
||||
result = hval + rem;
|
||||
fw_vers->eep_minor = result;
|
||||
|
||||
etrack_id:
|
||||
if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
|
||||
hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
|
||||
hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
|
||||
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
|
||||
| eeprom_verl;
|
||||
}
|
||||
return;
|
||||
}
|
@ -1,60 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_NVM_H_
|
||||
#define _E1000_NVM_H_
|
||||
|
||||
|
||||
struct e1000_fw_version {
|
||||
u32 etrack_id;
|
||||
u16 eep_major;
|
||||
u16 eep_minor;
|
||||
u16 eep_build;
|
||||
|
||||
u8 invm_major;
|
||||
u8 invm_minor;
|
||||
u8 invm_img_type;
|
||||
|
||||
bool or_valid;
|
||||
u16 or_major;
|
||||
u16 or_build;
|
||||
u16 or_patch;
|
||||
};
|
||||
|
||||
|
||||
void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
|
||||
s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
|
||||
void e1000_null_nvm_generic(struct e1000_hw *hw);
|
||||
s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data);
|
||||
s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
|
||||
s32 e1000_acquire_nvm_generic(struct e1000_hw *hw);
|
||||
|
||||
s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
|
||||
s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
|
||||
s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
|
||||
u32 pba_num_size);
|
||||
s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
|
||||
s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||
s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data);
|
||||
s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
|
||||
s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
|
||||
s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
|
||||
u16 *data);
|
||||
s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
|
||||
void e1000_release_nvm_generic(struct e1000_hw *hw);
|
||||
void e1000_get_fw_version(struct e1000_hw *hw,
|
||||
struct e1000_fw_version *fw_vers);
|
||||
|
||||
#define E1000_STM_OPCODE 0xDB00
|
||||
|
||||
#endif
|
@ -1,121 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
|
||||
/* glue for the OS independent part of e1000
|
||||
* includes register access macros
|
||||
*/
|
||||
|
||||
#ifndef _E1000_OSDEP_H_
|
||||
#define _E1000_OSDEP_H_
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/sched.h>
|
||||
#include "kcompat.h"
|
||||
|
||||
#ifndef __INTEL_COMPILER
|
||||
#pragma GCC diagnostic ignored "-Wunused-function"
|
||||
#endif
|
||||
|
||||
#define usec_delay(x) udelay(x)
|
||||
#define usec_delay_irq(x) udelay(x)
|
||||
#ifndef msec_delay
|
||||
#define msec_delay(x) do { \
|
||||
/* Don't mdelay in interrupt context! */ \
|
||||
if (in_interrupt()) \
|
||||
BUG(); \
|
||||
else \
|
||||
msleep(x); \
|
||||
} while (0)
|
||||
|
||||
/* Some workarounds require millisecond delays and are run during interrupt
|
||||
* context. Most notably, when establishing link, the phy may need tweaking
|
||||
* but cannot process phy register reads/writes faster than millisecond
|
||||
* intervals...and we establish link due to a "link status change" interrupt.
|
||||
*/
|
||||
#define msec_delay_irq(x) mdelay(x)
|
||||
#endif
|
||||
|
||||
#define PCI_COMMAND_REGISTER PCI_COMMAND
|
||||
#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
|
||||
#define ETH_ADDR_LEN ETH_ALEN
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
#define E1000_BIG_ENDIAN __BIG_ENDIAN
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
#define DEBUGOUT(S) printk(KERN_DEBUG S)
|
||||
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A)
|
||||
#else
|
||||
#define DEBUGOUT(S)
|
||||
#define DEBUGOUT1(S, A...)
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_FUNC
|
||||
#define DEBUGFUNC(F) DEBUGOUT(F "\n")
|
||||
#else
|
||||
#define DEBUGFUNC(F)
|
||||
#endif
|
||||
#define DEBUGOUT2 DEBUGOUT1
|
||||
#define DEBUGOUT3 DEBUGOUT2
|
||||
#define DEBUGOUT7 DEBUGOUT3
|
||||
|
||||
#define E1000_REGISTER(a, reg) reg
|
||||
|
||||
#define E1000_WRITE_REG(a, reg, value) ( \
|
||||
writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
|
||||
|
||||
#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
|
||||
|
||||
#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
|
||||
writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
|
||||
|
||||
#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
|
||||
readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
|
||||
|
||||
#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
|
||||
#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
|
||||
|
||||
#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
|
||||
writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
|
||||
|
||||
#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
|
||||
readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
|
||||
|
||||
#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
|
||||
writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
|
||||
|
||||
#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
|
||||
readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
|
||||
|
||||
#define E1000_WRITE_REG_IO(a, reg, offset) do { \
|
||||
outl(reg, ((a)->io_base)); \
|
||||
outl(offset, ((a)->io_base + 4)); } while (0)
|
||||
|
||||
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
|
||||
|
||||
#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
|
||||
writel((value), ((a)->flash_address + reg)))
|
||||
|
||||
#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
|
||||
writew((value), ((a)->flash_address + reg)))
|
||||
|
||||
#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
|
||||
|
||||
#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
|
||||
|
||||
#endif /* _E1000_OSDEP_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,241 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_PHY_H_
|
||||
#define _E1000_PHY_H_
|
||||
|
||||
void e1000_init_phy_ops_generic(struct e1000_hw *hw);
|
||||
s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
void e1000_null_phy_generic(struct e1000_hw *hw);
|
||||
s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active);
|
||||
s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_null_set_page(struct e1000_hw *hw, u16 data);
|
||||
s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 *data);
|
||||
s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 data);
|
||||
s32 e1000_check_downshift_generic(struct e1000_hw *hw);
|
||||
s32 e1000_check_polarity_m88(struct e1000_hw *hw);
|
||||
s32 e1000_check_polarity_igp(struct e1000_hw *hw);
|
||||
s32 e1000_check_polarity_ife(struct e1000_hw *hw);
|
||||
s32 e1000_check_reset_block_generic(struct e1000_hw *hw);
|
||||
s32 e1000_copper_link_setup_igp(struct e1000_hw *hw);
|
||||
s32 e1000_copper_link_setup_m88(struct e1000_hw *hw);
|
||||
s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
|
||||
s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
|
||||
s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
|
||||
s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
|
||||
s32 e1000_get_cable_length_m88(struct e1000_hw *hw);
|
||||
s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
|
||||
s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw);
|
||||
s32 e1000_get_cfg_done_generic(struct e1000_hw *hw);
|
||||
s32 e1000_get_phy_id(struct e1000_hw *hw);
|
||||
s32 e1000_get_phy_info_igp(struct e1000_hw *hw);
|
||||
s32 e1000_get_phy_info_m88(struct e1000_hw *hw);
|
||||
s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
|
||||
s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw);
|
||||
void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
|
||||
s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw);
|
||||
s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
|
||||
s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
|
||||
s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
|
||||
s32 e1000_setup_copper_link_generic(struct e1000_hw *hw);
|
||||
s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
|
||||
u32 usec_interval, bool *success);
|
||||
s32 e1000_phy_init_script_igp3(struct e1000_hw *hw);
|
||||
enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
|
||||
s32 e1000_determine_phy_address(struct e1000_hw *hw);
|
||||
s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
|
||||
s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
|
||||
void e1000_power_up_phy_copper(struct e1000_hw *hw);
|
||||
void e1000_power_down_phy_copper(struct e1000_hw *hw);
|
||||
s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
|
||||
s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
|
||||
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
|
||||
s32 e1000_check_polarity_82577(struct e1000_hw *hw);
|
||||
s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
|
||||
s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
|
||||
s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
|
||||
s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
|
||||
s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
|
||||
s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data);
|
||||
s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
|
||||
bool line_override);
|
||||
bool e1000_is_mphy_ready(struct e1000_hw *hw);
|
||||
|
||||
#define E1000_MAX_PHY_ADDR 8
|
||||
|
||||
/* IGP01E1000 Specific Registers */
|
||||
#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
|
||||
#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
|
||||
#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
|
||||
#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
|
||||
#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
|
||||
#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
|
||||
#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
|
||||
#define IGP_PAGE_SHIFT 5
|
||||
#define PHY_REG_MASK 0x1F
|
||||
|
||||
/* GS40G - I210 PHY defines */
|
||||
#define GS40G_PAGE_SELECT 0x16
|
||||
#define GS40G_PAGE_SHIFT 16
|
||||
#define GS40G_OFFSET_MASK 0xFFFF
|
||||
#define GS40G_PAGE_2 0x20000
|
||||
#define GS40G_MAC_REG2 0x15
|
||||
#define GS40G_MAC_LB 0x4140
|
||||
#define GS40G_MAC_SPEED_1G 0X0006
|
||||
#define GS40G_COPPER_SPEC 0x0010
|
||||
#define GS40G_CS_POWER_DOWN 0x0002
|
||||
|
||||
#define HV_INTC_FC_PAGE_START 768
|
||||
#define I82578_ADDR_REG 29
|
||||
#define I82577_ADDR_REG 16
|
||||
#define I82577_CFG_REG 22
|
||||
#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
|
||||
#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
|
||||
#define I82577_CTRL_REG 23
|
||||
|
||||
/* 82577 specific PHY registers */
|
||||
#define I82577_PHY_CTRL_2 18
|
||||
#define I82577_PHY_LBK_CTRL 19
|
||||
#define I82577_PHY_STATUS_2 26
|
||||
#define I82577_PHY_DIAG_STATUS 31
|
||||
|
||||
/* I82577 PHY Status 2 */
|
||||
#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
|
||||
#define I82577_PHY_STATUS2_MDIX 0x0800
|
||||
#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
|
||||
#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
|
||||
|
||||
/* I82577 PHY Control 2 */
|
||||
#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
|
||||
#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
|
||||
#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
|
||||
|
||||
/* I82577 PHY Diagnostics Status */
|
||||
#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
|
||||
#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
|
||||
|
||||
/* 82580 PHY Power Management */
|
||||
#define E1000_82580_PHY_POWER_MGMT 0xE14
|
||||
#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
|
||||
#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
|
||||
#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
|
||||
#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
|
||||
|
||||
#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */
|
||||
#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */
|
||||
#define E1000_MPHY_BUSY 0x00010000 /* busy bit */
|
||||
#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */
|
||||
#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */
|
||||
|
||||
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
|
||||
#define IGP01E1000_PHY_POLARITY_MASK 0x0078
|
||||
|
||||
#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
|
||||
#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
|
||||
|
||||
#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
|
||||
|
||||
#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
|
||||
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
|
||||
#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
|
||||
|
||||
#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
|
||||
|
||||
#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
|
||||
#define IGP01E1000_PSSR_MDIX 0x0800
|
||||
#define IGP01E1000_PSSR_SPEED_MASK 0xC000
|
||||
#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
|
||||
|
||||
#define IGP02E1000_PHY_CHANNEL_NUM 4
|
||||
#define IGP02E1000_PHY_AGC_A 0x11B1
|
||||
#define IGP02E1000_PHY_AGC_B 0x12B1
|
||||
#define IGP02E1000_PHY_AGC_C 0x14B1
|
||||
#define IGP02E1000_PHY_AGC_D 0x18B1
|
||||
|
||||
#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
|
||||
#define IGP02E1000_AGC_LENGTH_MASK 0x7F
|
||||
#define IGP02E1000_AGC_RANGE 15
|
||||
|
||||
#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
|
||||
|
||||
#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
|
||||
#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
|
||||
#define E1000_KMRNCTRLSTA_REN 0x00200000
|
||||
#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
|
||||
#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
|
||||
#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
|
||||
#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
|
||||
#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
|
||||
|
||||
#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
|
||||
#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
|
||||
#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
|
||||
#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
|
||||
|
||||
/* IFE PHY Extended Status Control */
|
||||
#define IFE_PESC_POLARITY_REVERSED 0x0100
|
||||
|
||||
/* IFE PHY Special Control */
|
||||
#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
|
||||
#define IFE_PSC_FORCE_POLARITY 0x0020
|
||||
|
||||
/* IFE PHY Special Control and LED Control */
|
||||
#define IFE_PSCL_PROBE_MODE 0x0020
|
||||
#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
|
||||
#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
|
||||
|
||||
/* IFE PHY MDIX Control */
|
||||
#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
|
||||
#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
|
||||
#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
|
||||
|
||||
/* SFP modules ID memory locations */
|
||||
#define E1000_SFF_IDENTIFIER_OFFSET 0x00
|
||||
#define E1000_SFF_IDENTIFIER_SFF 0x02
|
||||
#define E1000_SFF_IDENTIFIER_SFP 0x03
|
||||
|
||||
#define E1000_SFF_ETH_FLAGS_OFFSET 0x06
|
||||
/* Flags for SFP modules compatible with ETH up to 1Gb */
|
||||
struct sfp_e1000_flags {
|
||||
u8 e1000_base_sx:1;
|
||||
u8 e1000_base_lx:1;
|
||||
u8 e1000_base_cx:1;
|
||||
u8 e1000_base_t:1;
|
||||
u8 e100_base_lx:1;
|
||||
u8 e100_base_fx:1;
|
||||
u8 e10_base_bx10:1;
|
||||
u8 e10_base_px:1;
|
||||
};
|
||||
|
||||
/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
|
||||
#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600
|
||||
#define E1000_SFF_VENDOR_OUI_FTL 0x00906500
|
||||
#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
|
||||
#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
|
||||
|
||||
#endif
|
@ -1,631 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _E1000_REGS_H_
|
||||
#define _E1000_REGS_H_
|
||||
|
||||
#define E1000_CTRL 0x00000 /* Device Control - RW */
|
||||
#define E1000_STATUS 0x00008 /* Device Status - RO */
|
||||
#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
|
||||
#define E1000_EERD 0x00014 /* EEPROM Read - RW */
|
||||
#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
|
||||
#define E1000_FLA 0x0001C /* Flash Access - RW */
|
||||
#define E1000_MDIC 0x00020 /* MDI Control - RW */
|
||||
#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
|
||||
#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */
|
||||
#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */
|
||||
#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */
|
||||
#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */
|
||||
#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */
|
||||
#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */
|
||||
#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */
|
||||
#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */
|
||||
#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */
|
||||
#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */
|
||||
#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */
|
||||
#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/
|
||||
#define E1000_SCTL 0x00024 /* SerDes Control - RW */
|
||||
#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
|
||||
#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
|
||||
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
|
||||
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
|
||||
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
|
||||
#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
|
||||
#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
|
||||
#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
|
||||
#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
|
||||
#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
|
||||
#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
|
||||
#define E1000_RCTL 0x00100 /* Rx Control - RW */
|
||||
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
|
||||
#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
|
||||
#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
|
||||
#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
|
||||
#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
|
||||
#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
|
||||
#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */
|
||||
#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */
|
||||
#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */
|
||||
#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */
|
||||
#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */
|
||||
#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */
|
||||
#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
|
||||
#define E1000_TCTL 0x00400 /* Tx Control - RW */
|
||||
#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
|
||||
#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
|
||||
#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
|
||||
#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
|
||||
#define E1000_LEDMUX 0x08130 /* LED MUX Control */
|
||||
#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
|
||||
#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
|
||||
#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
|
||||
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
|
||||
#define E1000_PBS 0x01008 /* Packet Buffer Size */
|
||||
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
|
||||
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
|
||||
#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
|
||||
#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
|
||||
#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
|
||||
#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
|
||||
#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
|
||||
#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
|
||||
#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
|
||||
#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
|
||||
#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
|
||||
#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
|
||||
#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
|
||||
#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */
|
||||
#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */
|
||||
#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */
|
||||
#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
|
||||
#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */
|
||||
#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */
|
||||
#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */
|
||||
#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */
|
||||
#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */
|
||||
#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */
|
||||
#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */
|
||||
#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
|
||||
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
|
||||
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
|
||||
#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
|
||||
#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
|
||||
#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
|
||||
#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
|
||||
#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
|
||||
#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
|
||||
#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */
|
||||
#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
|
||||
/* Split and Replication Rx Control - RW */
|
||||
#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */
|
||||
#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */
|
||||
#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */
|
||||
#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */
|
||||
#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */
|
||||
#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */
|
||||
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
|
||||
#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */
|
||||
#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */
|
||||
#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
|
||||
#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
|
||||
#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */
|
||||
#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */
|
||||
#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */
|
||||
#define E1000_I210_FLMNGCTL 0x12038
|
||||
#define E1000_I210_FLMNGDATA 0x1203C
|
||||
#define E1000_I210_FLMNGCNT 0x12040
|
||||
|
||||
#define E1000_I210_FLSWCTL 0x12048
|
||||
#define E1000_I210_FLSWDATA 0x1204C
|
||||
#define E1000_I210_FLSWCNT 0x12050
|
||||
|
||||
#define E1000_I210_FLA 0x1201C
|
||||
|
||||
#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
|
||||
#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
|
||||
|
||||
/* QAV Tx mode control register */
|
||||
#define E1000_I210_TQAVCTRL 0x3570
|
||||
|
||||
/* QAV Tx mode control register bitfields masks */
|
||||
/* QAV enable */
|
||||
#define E1000_TQAVCTRL_MODE (1 << 0)
|
||||
/* Fetching arbitration type */
|
||||
#define E1000_TQAVCTRL_FETCH_ARB (1 << 4)
|
||||
/* Fetching timer enable */
|
||||
#define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5)
|
||||
/* Launch arbitration type */
|
||||
#define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8)
|
||||
/* Launch timer enable */
|
||||
#define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9)
|
||||
/* SP waits for SR enable */
|
||||
#define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10)
|
||||
/* Fetching timer correction */
|
||||
#define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16
|
||||
#define E1000_TQAVCTRL_FETCH_TIMER_DELTA \
|
||||
(0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET)
|
||||
|
||||
/* High credit registers where _n can be 0 or 1. */
|
||||
#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n))
|
||||
|
||||
/* Queues fetch arbitration priority control register */
|
||||
#define E1000_I210_TQAVARBCTRL 0x3574
|
||||
/* Queues priority masks where _n and _p can be 0-3. */
|
||||
#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * _n))
|
||||
/* QAV Tx mode control registers where _n can be 0 or 1. */
|
||||
#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n))
|
||||
|
||||
/* QAV Tx mode control register bitfields masks */
|
||||
#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */
|
||||
#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */
|
||||
#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */
|
||||
|
||||
/* Good transmitted packets counter registers */
|
||||
#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n)))
|
||||
|
||||
/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */
|
||||
#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * _n))
|
||||
|
||||
#define E1000_MMDAC 13 /* MMD Access Control */
|
||||
#define E1000_MMDAAD 14 /* MMD Access Address/Data */
|
||||
|
||||
/* Convenience macros
|
||||
*
|
||||
* Note: "_n" is the queue number of the register to be written to.
|
||||
*
|
||||
* Example usage:
|
||||
* E1000_RDBAL_REG(current_rx_queue)
|
||||
*/
|
||||
#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
|
||||
(0x0C000 + ((_n) * 0x40)))
|
||||
#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
|
||||
(0x0C004 + ((_n) * 0x40)))
|
||||
#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
|
||||
(0x0C008 + ((_n) * 0x40)))
|
||||
#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
|
||||
(0x0C00C + ((_n) * 0x40)))
|
||||
#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
|
||||
(0x0C010 + ((_n) * 0x40)))
|
||||
#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
|
||||
(0x0C014 + ((_n) * 0x40)))
|
||||
#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
|
||||
#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
|
||||
(0x0C018 + ((_n) * 0x40)))
|
||||
#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
|
||||
(0x0C028 + ((_n) * 0x40)))
|
||||
#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
|
||||
(0x0C030 + ((_n) * 0x40)))
|
||||
#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
|
||||
(0x0E000 + ((_n) * 0x40)))
|
||||
#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
|
||||
(0x0E004 + ((_n) * 0x40)))
|
||||
#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
|
||||
(0x0E008 + ((_n) * 0x40)))
|
||||
#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
|
||||
(0x0E010 + ((_n) * 0x40)))
|
||||
#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
|
||||
(0x0E014 + ((_n) * 0x40)))
|
||||
#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
|
||||
#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
|
||||
(0x0E018 + ((_n) * 0x40)))
|
||||
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
|
||||
(0x0E028 + ((_n) * 0x40)))
|
||||
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
|
||||
(0x0E038 + ((_n) * 0x40)))
|
||||
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
|
||||
(0x0E03C + ((_n) * 0x40)))
|
||||
#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
|
||||
#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */
|
||||
#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */
|
||||
#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
|
||||
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
|
||||
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
|
||||
(0x054E0 + ((_i - 16) * 8)))
|
||||
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
|
||||
(0x054E4 + ((_i - 16) * 8)))
|
||||
#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
|
||||
#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
|
||||
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
|
||||
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
|
||||
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
|
||||
#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
|
||||
#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
|
||||
#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
|
||||
#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */
|
||||
#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */
|
||||
#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
|
||||
/* Same as TXPBS, renamed for newer Si - RW */
|
||||
#define E1000_ITPBS 0x03404
|
||||
#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
|
||||
#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
|
||||
#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
|
||||
#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
|
||||
#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
|
||||
#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */
|
||||
#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */
|
||||
#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */
|
||||
#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */
|
||||
#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */
|
||||
#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */
|
||||
#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
|
||||
#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
|
||||
/* DMA Tx Max Total Allow Size Reqs - RW */
|
||||
#define E1000_DTXMXSZRQ 0x03540
|
||||
#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
|
||||
#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
|
||||
#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
|
||||
#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
|
||||
#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
|
||||
#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
|
||||
#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
|
||||
#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
|
||||
#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
|
||||
#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
|
||||
#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
|
||||
#define E1000_COLC 0x04028 /* Collision Count - R/clr */
|
||||
#define E1000_DC 0x04030 /* Defer Count - R/clr */
|
||||
#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
|
||||
#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
|
||||
#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
|
||||
#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
|
||||
#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
|
||||
#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
|
||||
#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
|
||||
#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
|
||||
#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
|
||||
#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
|
||||
#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
|
||||
#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
|
||||
#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
|
||||
#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
|
||||
#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
|
||||
#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
|
||||
#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
|
||||
#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
|
||||
#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
|
||||
#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
|
||||
#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
|
||||
#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
|
||||
#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
|
||||
#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
|
||||
#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
|
||||
#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
|
||||
#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
|
||||
#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
|
||||
#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
|
||||
#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
|
||||
#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
|
||||
#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
|
||||
#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
|
||||
#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
|
||||
#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
|
||||
#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
|
||||
#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
|
||||
#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
|
||||
#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
|
||||
#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
|
||||
#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
|
||||
#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
|
||||
#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
|
||||
#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
|
||||
#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
|
||||
#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
|
||||
#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
|
||||
#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
|
||||
#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
|
||||
#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
|
||||
#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
|
||||
#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
|
||||
#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
|
||||
#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
|
||||
#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
|
||||
#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
|
||||
|
||||
/* Virtualization statistical counters */
|
||||
#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n)))
|
||||
#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n)))
|
||||
#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n)))
|
||||
#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n)))
|
||||
#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n)))
|
||||
#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
|
||||
#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
|
||||
#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
|
||||
#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
|
||||
|
||||
/* LinkSec */
|
||||
#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */
|
||||
#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */
|
||||
#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */
|
||||
#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */
|
||||
#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */
|
||||
#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */
|
||||
#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */
|
||||
#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */
|
||||
#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */
|
||||
#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */
|
||||
#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */
|
||||
#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */
|
||||
#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */
|
||||
#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */
|
||||
#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */
|
||||
#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */
|
||||
#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */
|
||||
#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */
|
||||
#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */
|
||||
#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */
|
||||
#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */
|
||||
#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */
|
||||
#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */
|
||||
#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */
|
||||
#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */
|
||||
#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */
|
||||
#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */
|
||||
#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */
|
||||
#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */
|
||||
#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */
|
||||
/* LinkSec Tx 128-bit Key 0 - WO */
|
||||
#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n)))
|
||||
/* LinkSec Tx 128-bit Key 1 - WO */
|
||||
#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n)))
|
||||
#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */
|
||||
#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */
|
||||
/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit
|
||||
* key - RW.
|
||||
*/
|
||||
#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
|
||||
|
||||
#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */
|
||||
#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */
|
||||
#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */
|
||||
#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */
|
||||
/* IPSec Rx IPv4/v6 Address - RW */
|
||||
#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n)))
|
||||
/* IPSec Rx 128-bit Key - RW */
|
||||
#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n)))
|
||||
#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */
|
||||
#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */
|
||||
/* IPSec Tx 128-bit Key - RW */
|
||||
#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n)))
|
||||
#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */
|
||||
#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */
|
||||
#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */
|
||||
#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
|
||||
#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
|
||||
#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */
|
||||
#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */
|
||||
#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */
|
||||
#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */
|
||||
#define E1000_RPTHC 0x04104 /* Rx Packets To Host */
|
||||
#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */
|
||||
#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */
|
||||
#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */
|
||||
#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */
|
||||
#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */
|
||||
#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */
|
||||
#define E1000_LENERRS 0x04138 /* Length Errors Count */
|
||||
#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */
|
||||
#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */
|
||||
#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
|
||||
#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
|
||||
#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */
|
||||
#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */
|
||||
#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
|
||||
#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */
|
||||
#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
|
||||
#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
|
||||
#define E1000_RA 0x05400 /* Receive Address - RW Array */
|
||||
#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
|
||||
#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
|
||||
#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
|
||||
#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */
|
||||
#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */
|
||||
#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */
|
||||
#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */
|
||||
#define E1000_WUC 0x05800 /* Wakeup Control - RW */
|
||||
#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
|
||||
#define E1000_WUS 0x05810 /* Wakeup Status - RO */
|
||||
#define E1000_MANC 0x05820 /* Management Control - RW */
|
||||
#define E1000_IPAV 0x05838 /* IP Address Valid - RW */
|
||||
#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */
|
||||
#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */
|
||||
#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */
|
||||
#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */
|
||||
#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */
|
||||
#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
|
||||
#define E1000_HOST_IF 0x08800 /* Host Interface */
|
||||
#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */
|
||||
#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */
|
||||
#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */
|
||||
/* Flexible Host Filter Table */
|
||||
#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100))
|
||||
/* Ext Flexible Host Filter Table */
|
||||
#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100))
|
||||
|
||||
|
||||
#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
|
||||
#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
|
||||
/* Management Decision Filters */
|
||||
#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
|
||||
#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
|
||||
#define E1000_CCMCTL 0x05B48 /* CCM Control Register */
|
||||
#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
|
||||
#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
|
||||
#define E1000_GCR 0x05B00 /* PCI-Ex Control */
|
||||
#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
|
||||
#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
|
||||
#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
|
||||
#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
|
||||
#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */
|
||||
#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
|
||||
#define E1000_SWSM 0x05B50 /* SW Semaphore */
|
||||
#define E1000_FWSM 0x05B54 /* FW Semaphore */
|
||||
/* Driver-only SW semaphore (not used by BOOT agents) */
|
||||
#define E1000_SWSM2 0x05B58
|
||||
#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */
|
||||
#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */
|
||||
#define E1000_UFUSE 0x05B78 /* UFUSE - RO */
|
||||
#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
|
||||
#define E1000_HICR 0x08F00 /* Host Interface Control */
|
||||
#define E1000_FWSTS 0x08F0C /* FW Status */
|
||||
|
||||
/* RSS registers */
|
||||
#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
|
||||
#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
|
||||
#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */
|
||||
#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/
|
||||
#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */
|
||||
#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */
|
||||
#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
|
||||
#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
|
||||
#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
|
||||
#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
|
||||
/* VT Registers */
|
||||
#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */
|
||||
#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */
|
||||
#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */
|
||||
#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */
|
||||
#define E1000_VFRE 0x00C8C /* VF Receive Enables */
|
||||
#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
|
||||
#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
|
||||
#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
|
||||
#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
|
||||
#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
|
||||
#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
|
||||
#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
|
||||
#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */
|
||||
#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */
|
||||
#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */
|
||||
#define E1000_MDFB 0x03558 /* Malicious Driver free block */
|
||||
#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */
|
||||
#define E1000_TXSWC 0x05ACC /* Tx Switch Control */
|
||||
#define E1000_SCCRL 0x05DB0 /* Storm Control Control */
|
||||
#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */
|
||||
#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */
|
||||
/* These act per VF so an array friendly macro is used */
|
||||
#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n)))
|
||||
#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
|
||||
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
|
||||
#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
|
||||
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
|
||||
/* VLAN Virtual Machine Filter - RW */
|
||||
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n)))
|
||||
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
|
||||
#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
|
||||
#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */
|
||||
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
|
||||
#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
|
||||
#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
|
||||
#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
|
||||
#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
|
||||
#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
|
||||
#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
|
||||
#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
|
||||
#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
|
||||
#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
|
||||
#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
|
||||
#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
|
||||
#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
|
||||
#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
|
||||
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
|
||||
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
|
||||
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
|
||||
#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
|
||||
|
||||
/* Filtering Registers */
|
||||
#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
|
||||
#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
|
||||
#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
|
||||
#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
|
||||
#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
|
||||
#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
|
||||
#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
|
||||
|
||||
#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */
|
||||
#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */
|
||||
#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */
|
||||
#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */
|
||||
#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */
|
||||
/* Tx Desc plane TC Rate-scheduler config */
|
||||
#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4))
|
||||
/* Tx Packet plane TC Rate-Scheduler Config */
|
||||
#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4))
|
||||
/* Rx Packet plane TC Rate-Scheduler Config */
|
||||
#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4))
|
||||
/* Tx Desc Plane TC Rate-Scheduler Status */
|
||||
#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4))
|
||||
/* Tx Desc Plane TC Rate-Scheduler MMW */
|
||||
#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4))
|
||||
/* Tx Packet plane TC Rate-Scheduler Status */
|
||||
#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4))
|
||||
/* Tx Packet plane TC Rate-scheduler MMW */
|
||||
#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4))
|
||||
/* Rx Packet plane TC Rate-Scheduler Status */
|
||||
#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4))
|
||||
/* Rx Packet plane TC Rate-Scheduler MMW */
|
||||
#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4))
|
||||
/* Tx Desc plane VM Rate-Scheduler MMW*/
|
||||
#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4))
|
||||
/* Tx BCN Rate-Scheduler MMW */
|
||||
#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4))
|
||||
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */
|
||||
#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */
|
||||
#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */
|
||||
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */
|
||||
#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */
|
||||
#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */
|
||||
#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */
|
||||
#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */
|
||||
#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */
|
||||
#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */
|
||||
#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */
|
||||
#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */
|
||||
#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */
|
||||
#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */
|
||||
|
||||
/* DMA Coalescing registers */
|
||||
#define E1000_DMACR 0x02508 /* Control Register */
|
||||
#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */
|
||||
#define E1000_DMCTLX 0x02514 /* Time to Lx Request */
|
||||
#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
|
||||
#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
|
||||
#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
|
||||
#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
|
||||
|
||||
/* PCIe Parity Status Register */
|
||||
#define E1000_PCIEERRSTS 0x05BA8
|
||||
|
||||
#define E1000_PROXYS 0x5F64 /* Proxying Status */
|
||||
#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */
|
||||
/* Thermal sensor configuration and status registers */
|
||||
#define E1000_THMJT 0x08100 /* Junction Temperature */
|
||||
#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
|
||||
#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
|
||||
#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
|
||||
#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
|
||||
|
||||
/* Energy Efficient Ethernet "EEE" registers */
|
||||
#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */
|
||||
#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
|
||||
#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/
|
||||
#define E1000_EEE_SU 0x0E34 /* EEE Setup */
|
||||
#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */
|
||||
#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */
|
||||
|
||||
/* OS2BMC Registers */
|
||||
#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */
|
||||
#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */
|
||||
#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */
|
||||
#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */
|
||||
|
||||
|
||||
|
||||
#endif
|
@ -1,844 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
/* Linux PRO/1000 Ethernet Driver main header file */
|
||||
|
||||
#ifndef _IGB_H_
|
||||
#define _IGB_H_
|
||||
|
||||
#include <linux/kobject.h>
|
||||
|
||||
#ifndef IGB_NO_LRO
|
||||
#include <net/tcp.h>
|
||||
#endif
|
||||
|
||||
#undef HAVE_HW_TIME_STAMP
|
||||
#ifdef HAVE_HW_TIME_STAMP
|
||||
#include <linux/pci.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#endif
|
||||
#ifdef SIOCETHTOOL
|
||||
#include <linux/ethtool.h>
|
||||
#endif
|
||||
|
||||
struct igb_adapter;
|
||||
|
||||
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
|
||||
//#define IGB_DCA
|
||||
#endif
|
||||
#ifdef IGB_DCA
|
||||
#include <linux/dca.h>
|
||||
#endif
|
||||
|
||||
#include "kcompat.h"
|
||||
|
||||
#ifdef HAVE_SCTP
|
||||
#include <linux/sctp.h>
|
||||
#endif
|
||||
|
||||
#include "e1000_api.h"
|
||||
#include "e1000_82575.h"
|
||||
#include "e1000_manage.h"
|
||||
#include "e1000_mbx.h"
|
||||
|
||||
#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
|
||||
|
||||
#define PFX "igb: "
|
||||
#define DPRINTK(nlevel, klevel, fmt, args...) \
|
||||
(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
|
||||
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
|
||||
__FUNCTION__ , ## args))
|
||||
|
||||
#ifdef HAVE_PTP_1588_CLOCK
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#endif /* HAVE_PTP_1588_CLOCK */
|
||||
|
||||
#ifdef HAVE_I2C_SUPPORT
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#endif /* HAVE_I2C_SUPPORT */
|
||||
|
||||
/* Interrupt defines */
|
||||
#define IGB_START_ITR 648 /* ~6000 ints/sec */
|
||||
#define IGB_4K_ITR 980
|
||||
#define IGB_20K_ITR 196
|
||||
#define IGB_70K_ITR 56
|
||||
|
||||
/* Interrupt modes, as used by the IntMode parameter */
|
||||
#define IGB_INT_MODE_LEGACY 0
|
||||
#define IGB_INT_MODE_MSI 1
|
||||
#define IGB_INT_MODE_MSIX 2
|
||||
|
||||
/* TX/RX descriptor defines */
|
||||
#define IGB_DEFAULT_TXD 256
|
||||
#define IGB_DEFAULT_TX_WORK 128
|
||||
#define IGB_MIN_TXD 80
|
||||
#define IGB_MAX_TXD 4096
|
||||
|
||||
#define IGB_DEFAULT_RXD 256
|
||||
#define IGB_MIN_RXD 80
|
||||
#define IGB_MAX_RXD 4096
|
||||
|
||||
#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
|
||||
#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
|
||||
|
||||
#define NON_Q_VECTORS 1
|
||||
#define MAX_Q_VECTORS 10
|
||||
|
||||
/* Transmit and receive queues */
|
||||
#define IGB_MAX_RX_QUEUES 16
|
||||
#define IGB_MAX_TX_QUEUES 16
|
||||
|
||||
#define IGB_MAX_VF_MC_ENTRIES 30
|
||||
#define IGB_MAX_VF_FUNCTIONS 8
|
||||
#define IGB_82576_VF_DEV_ID 0x10CA
|
||||
#define IGB_I350_VF_DEV_ID 0x1520
|
||||
#define IGB_MAX_UTA_ENTRIES 128
|
||||
#define MAX_EMULATION_MAC_ADDRS 16
|
||||
#define OUI_LEN 3
|
||||
#define IGB_MAX_VMDQ_QUEUES 8
|
||||
|
||||
|
||||
struct vf_data_storage {
|
||||
unsigned char vf_mac_addresses[ETH_ALEN];
|
||||
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
|
||||
u16 num_vf_mc_hashes;
|
||||
u16 default_vf_vlan_id;
|
||||
u16 vlans_enabled;
|
||||
unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
|
||||
u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
|
||||
u32 flags;
|
||||
unsigned long last_nack;
|
||||
#ifdef IFLA_VF_MAX
|
||||
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
|
||||
u16 pf_qos;
|
||||
u16 tx_rate;
|
||||
#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
|
||||
bool spoofchk_enabled;
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
|
||||
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
|
||||
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
|
||||
#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
|
||||
|
||||
/* RX descriptor control thresholds.
|
||||
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
|
||||
* descriptors available in its onboard memory.
|
||||
* Setting this to 0 disables RX descriptor prefetch.
|
||||
* HTHRESH - MAC will only prefetch if there are at least this many descriptors
|
||||
* available in host memory.
|
||||
* If PTHRESH is 0, this should also be 0.
|
||||
* WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
|
||||
* descriptors until either it has this many to write back, or the
|
||||
* ITR timer expires.
|
||||
*/
|
||||
#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
|
||||
#define IGB_RX_HTHRESH 8
|
||||
#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
|
||||
#define IGB_TX_HTHRESH 1
|
||||
#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
|
||||
adapter->msix_entries) ? 1 : 4)
|
||||
|
||||
/* this is the size past which hardware will drop packets when setting LPE=0 */
|
||||
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
|
||||
|
||||
/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we
|
||||
* reserve 2 more, and skb_shared_info adds an additional 384 more,
|
||||
* this adds roughly 448 bytes of extra data meaning the smallest
|
||||
* allocation we could have is 1K.
|
||||
* i.e. RXBUFFER_512 --> size-1024 slab
|
||||
*/
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define IGB_RXBUFFER_256 256
|
||||
#define IGB_RXBUFFER_2048 2048
|
||||
#define IGB_RXBUFFER_16384 16384
|
||||
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
|
||||
#if MAX_SKB_FRAGS < 8
|
||||
#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024)
|
||||
#else
|
||||
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
|
||||
#endif
|
||||
|
||||
|
||||
/* Packet Buffer allocations */
|
||||
#define IGB_PBA_BYTES_SHIFT 0xA
|
||||
#define IGB_TX_HEAD_ADDR_SHIFT 7
|
||||
#define IGB_PBA_TX_MASK 0xFFFF0000
|
||||
|
||||
#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
|
||||
|
||||
/* How many Rx Buffers do we bundle into one write to the hardware ? */
|
||||
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
|
||||
|
||||
#define IGB_EEPROM_APME 0x0400
|
||||
#define AUTO_ALL_MODES 0
|
||||
|
||||
#ifndef IGB_MASTER_SLAVE
|
||||
/* Switch to override PHY master/slave setting */
|
||||
#define IGB_MASTER_SLAVE e1000_ms_hw_default
|
||||
#endif
|
||||
|
||||
#define IGB_MNG_VLAN_NONE -1
|
||||
|
||||
#ifndef IGB_NO_LRO
|
||||
#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/
|
||||
struct igb_lro_stats {
|
||||
u32 flushed;
|
||||
u32 coal;
|
||||
};
|
||||
|
||||
/*
|
||||
* igb_lro_header - header format to be aggregated by LRO
|
||||
* @iph: IP header without options
|
||||
* @tcp: TCP header
|
||||
* @ts: Optional TCP timestamp data in TCP options
|
||||
*
|
||||
* This structure relies on the check above that verifies that the header
|
||||
* is IPv4 and does not contain any options.
|
||||
*/
|
||||
struct igb_lrohdr {
|
||||
struct iphdr iph;
|
||||
struct tcphdr th;
|
||||
__be32 ts[0];
|
||||
};
|
||||
|
||||
struct igb_lro_list {
|
||||
struct sk_buff_head active;
|
||||
struct igb_lro_stats stats;
|
||||
};
|
||||
|
||||
#endif /* IGB_NO_LRO */
|
||||
struct igb_cb {
|
||||
#ifndef IGB_NO_LRO
|
||||
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
|
||||
union { /* Union defining head/tail partner */
|
||||
struct sk_buff *head;
|
||||
struct sk_buff *tail;
|
||||
};
|
||||
#endif
|
||||
__be32 tsecr; /* timestamp echo response */
|
||||
u32 tsval; /* timestamp value in host order */
|
||||
u32 next_seq; /* next expected sequence number */
|
||||
u16 free; /* 65521 minus total size */
|
||||
u16 mss; /* size of data portion of packet */
|
||||
u16 append_cnt; /* number of skb's appended */
|
||||
#endif /* IGB_NO_LRO */
|
||||
#ifdef HAVE_VLAN_RX_REGISTER
|
||||
u16 vid; /* VLAN tag */
|
||||
#endif
|
||||
};
|
||||
#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb)
|
||||
|
||||
enum igb_tx_flags {
|
||||
/* cmd_type flags */
|
||||
IGB_TX_FLAGS_VLAN = 0x01,
|
||||
IGB_TX_FLAGS_TSO = 0x02,
|
||||
IGB_TX_FLAGS_TSTAMP = 0x04,
|
||||
|
||||
/* olinfo flags */
|
||||
IGB_TX_FLAGS_IPV4 = 0x10,
|
||||
IGB_TX_FLAGS_CSUM = 0x20,
|
||||
};
|
||||
|
||||
/* VLAN info */
|
||||
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||
#define IGB_TX_FLAGS_VLAN_SHIFT 16
|
||||
|
||||
/*
|
||||
* The largest size we can write to the descriptor is 65535. In order to
|
||||
* maintain a power of two alignment we have to limit ourselves to 32K.
|
||||
*/
|
||||
#define IGB_MAX_TXD_PWR 15
|
||||
#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
|
||||
|
||||
/* Tx Descriptors needed, worst case */
|
||||
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
|
||||
#ifndef MAX_SKB_FRAGS
|
||||
#define DESC_NEEDED 4
|
||||
#elif (MAX_SKB_FRAGS < 16)
|
||||
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
|
||||
#else
|
||||
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||
#endif
|
||||
|
||||
/* wrapper around a pointer to a socket buffer,
|
||||
* so a DMA handle can be stored along with the buffer */
|
||||
struct igb_tx_buffer {
|
||||
union e1000_adv_tx_desc *next_to_watch;
|
||||
unsigned long time_stamp;
|
||||
struct sk_buff *skb;
|
||||
unsigned int bytecount;
|
||||
u16 gso_segs;
|
||||
__be16 protocol;
|
||||
DEFINE_DMA_UNMAP_ADDR(dma);
|
||||
DEFINE_DMA_UNMAP_LEN(len);
|
||||
u32 tx_flags;
|
||||
};
|
||||
|
||||
struct igb_rx_buffer {
|
||||
dma_addr_t dma;
|
||||
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
|
||||
struct sk_buff *skb;
|
||||
#else
|
||||
struct page *page;
|
||||
u32 page_offset;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct igb_tx_queue_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
u64 restart_queue;
|
||||
};
|
||||
|
||||
struct igb_rx_queue_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
u64 drops;
|
||||
u64 csum_err;
|
||||
u64 alloc_failed;
|
||||
u64 ipv4_packets; /* IPv4 headers processed */
|
||||
u64 ipv4e_packets; /* IPv4E headers with extensions processed */
|
||||
u64 ipv6_packets; /* IPv6 headers processed */
|
||||
u64 ipv6e_packets; /* IPv6E headers with extensions processed */
|
||||
u64 tcp_packets; /* TCP headers processed */
|
||||
u64 udp_packets; /* UDP headers processed */
|
||||
u64 sctp_packets; /* SCTP headers processed */
|
||||
u64 nfs_packets; /* NFS headers processe */
|
||||
};
|
||||
|
||||
struct igb_ring_container {
|
||||
struct igb_ring *ring; /* pointer to linked list of rings */
|
||||
unsigned int total_bytes; /* total bytes processed this int */
|
||||
unsigned int total_packets; /* total packets processed this int */
|
||||
u16 work_limit; /* total work allowed per interrupt */
|
||||
u8 count; /* total number of rings in vector */
|
||||
u8 itr; /* current ITR setting for ring */
|
||||
};
|
||||
|
||||
struct igb_ring {
|
||||
struct igb_q_vector *q_vector; /* backlink to q_vector */
|
||||
struct net_device *netdev; /* back pointer to net_device */
|
||||
struct device *dev; /* device for dma mapping */
|
||||
union { /* array of buffer info structs */
|
||||
struct igb_tx_buffer *tx_buffer_info;
|
||||
struct igb_rx_buffer *rx_buffer_info;
|
||||
};
|
||||
#ifdef HAVE_PTP_1588_CLOCK
|
||||
unsigned long last_rx_timestamp;
|
||||
#endif /* HAVE_PTP_1588_CLOCK */
|
||||
void *desc; /* descriptor ring memory */
|
||||
unsigned long flags; /* ring specific flags */
|
||||
void __iomem *tail; /* pointer to ring tail register */
|
||||
dma_addr_t dma; /* phys address of the ring */
|
||||
unsigned int size; /* length of desc. ring in bytes */
|
||||
|
||||
u16 count; /* number of desc. in the ring */
|
||||
u8 queue_index; /* logical index of the ring*/
|
||||
u8 reg_idx; /* physical index of the ring */
|
||||
|
||||
/* everything past this point are written often */
|
||||
u16 next_to_clean;
|
||||
u16 next_to_use;
|
||||
u16 next_to_alloc;
|
||||
|
||||
union {
|
||||
/* TX */
|
||||
struct {
|
||||
struct igb_tx_queue_stats tx_stats;
|
||||
};
|
||||
/* RX */
|
||||
struct {
|
||||
struct igb_rx_queue_stats rx_stats;
|
||||
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
|
||||
u16 rx_buffer_len;
|
||||
#else
|
||||
struct sk_buff *skb;
|
||||
#endif
|
||||
};
|
||||
};
|
||||
#ifdef CONFIG_IGB_VMDQ_NETDEV
|
||||
struct net_device *vmdq_netdev;
|
||||
int vqueue_index; /* queue index for virtual netdev */
|
||||
#endif
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
struct igb_q_vector {
|
||||
struct igb_adapter *adapter; /* backlink */
|
||||
int cpu; /* CPU for DCA */
|
||||
u32 eims_value; /* EIMS mask value */
|
||||
|
||||
u16 itr_val;
|
||||
u8 set_itr;
|
||||
void __iomem *itr_register;
|
||||
|
||||
struct igb_ring_container rx, tx;
|
||||
|
||||
struct napi_struct napi;
|
||||
#ifndef IGB_NO_LRO
|
||||
struct igb_lro_list lrolist; /* LRO list for queue vector*/
|
||||
#endif
|
||||
char name[IFNAMSIZ + 9];
|
||||
#ifndef HAVE_NETDEV_NAPI_LIST
|
||||
struct net_device poll_dev;
|
||||
#endif
|
||||
|
||||
/* for dynamic allocation of rings associated with this q_vector */
|
||||
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
|
||||
};
|
||||
|
||||
enum e1000_ring_flags_t {
|
||||
#ifndef HAVE_NDO_SET_FEATURES
|
||||
IGB_RING_FLAG_RX_CSUM,
|
||||
#endif
|
||||
IGB_RING_FLAG_RX_SCTP_CSUM,
|
||||
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
|
||||
IGB_RING_FLAG_TX_CTX_IDX,
|
||||
IGB_RING_FLAG_TX_DETECT_HANG,
|
||||
};
|
||||
|
||||
struct igb_mac_addr {
|
||||
u8 addr[ETH_ALEN];
|
||||
u16 queue;
|
||||
u16 state; /* bitmask */
|
||||
};
|
||||
#define IGB_MAC_STATE_DEFAULT 0x1
|
||||
#define IGB_MAC_STATE_MODIFIED 0x2
|
||||
#define IGB_MAC_STATE_IN_USE 0x4
|
||||
|
||||
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
|
||||
|
||||
#define IGB_RX_DESC(R, i) \
|
||||
(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
|
||||
#define IGB_TX_DESC(R, i) \
|
||||
(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
|
||||
#define IGB_TX_CTXTDESC(R, i) \
|
||||
(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
|
||||
|
||||
#ifdef CONFIG_IGB_VMDQ_NETDEV
|
||||
#define netdev_ring(ring) \
|
||||
((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev))
|
||||
#define ring_queue_index(ring) \
|
||||
((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index))
|
||||
#else
|
||||
#define netdev_ring(ring) (ring->netdev)
|
||||
#define ring_queue_index(ring) (ring->queue_index)
|
||||
#endif /* CONFIG_IGB_VMDQ_NETDEV */
|
||||
|
||||
/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
|
||||
static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
|
||||
const u32 stat_err_bits)
|
||||
{
|
||||
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
|
||||
}
|
||||
|
||||
/* igb_desc_unused - calculate if we have unused descriptors */
|
||||
static inline u16 igb_desc_unused(const struct igb_ring *ring)
|
||||
{
|
||||
u16 ntc = ring->next_to_clean;
|
||||
u16 ntu = ring->next_to_use;
|
||||
|
||||
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BQL
|
||||
static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
|
||||
{
|
||||
return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
|
||||
}
|
||||
#endif /* CONFIG_BQL */
|
||||
|
||||
// #ifdef EXT_THERMAL_SENSOR_SUPPORT
|
||||
// #ifdef IGB_PROCFS
|
||||
struct igb_therm_proc_data
|
||||
{
|
||||
struct e1000_hw *hw;
|
||||
struct e1000_thermal_diode_data *sensor_data;
|
||||
};
|
||||
|
||||
// #endif /* IGB_PROCFS */
|
||||
// #endif /* EXT_THERMAL_SENSOR_SUPPORT */
|
||||
|
||||
#ifdef IGB_HWMON
|
||||
#define IGB_HWMON_TYPE_LOC 0
|
||||
#define IGB_HWMON_TYPE_TEMP 1
|
||||
#define IGB_HWMON_TYPE_CAUTION 2
|
||||
#define IGB_HWMON_TYPE_MAX 3
|
||||
|
||||
struct hwmon_attr {
|
||||
struct device_attribute dev_attr;
|
||||
struct e1000_hw *hw;
|
||||
struct e1000_thermal_diode_data *sensor;
|
||||
char name[12];
|
||||
};
|
||||
|
||||
struct hwmon_buff {
|
||||
struct device *device;
|
||||
struct hwmon_attr *hwmon_list;
|
||||
unsigned int n_hwmon;
|
||||
};
|
||||
#endif /* IGB_HWMON */
|
||||
|
||||
/* board specific private data structure */
|
||||
struct igb_adapter {
|
||||
#ifdef HAVE_VLAN_RX_REGISTER
|
||||
/* vlgrp must be first member of structure */
|
||||
struct vlan_group *vlgrp;
|
||||
#else
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
#endif
|
||||
struct net_device *netdev;
|
||||
|
||||
unsigned long state;
|
||||
unsigned int flags;
|
||||
|
||||
unsigned int num_q_vectors;
|
||||
struct msix_entry *msix_entries;
|
||||
|
||||
|
||||
/* TX */
|
||||
u16 tx_work_limit;
|
||||
u32 tx_timeout_count;
|
||||
int num_tx_queues;
|
||||
struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES];
|
||||
|
||||
/* RX */
|
||||
int num_rx_queues;
|
||||
struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES];
|
||||
|
||||
struct timer_list watchdog_timer;
|
||||
struct timer_list dma_err_timer;
|
||||
struct timer_list phy_info_timer;
|
||||
u16 mng_vlan_id;
|
||||
u32 bd_number;
|
||||
u32 wol;
|
||||
u32 en_mng_pt;
|
||||
u16 link_speed;
|
||||
u16 link_duplex;
|
||||
u8 port_num;
|
||||
|
||||
/* Interrupt Throttle Rate */
|
||||
u32 rx_itr_setting;
|
||||
u32 tx_itr_setting;
|
||||
|
||||
struct work_struct reset_task;
|
||||
struct work_struct watchdog_task;
|
||||
struct work_struct dma_err_task;
|
||||
bool fc_autoneg;
|
||||
u8 tx_timeout_factor;
|
||||
|
||||
#ifdef DEBUG
|
||||
bool tx_hang_detected;
|
||||
bool disable_hw_reset;
|
||||
#endif
|
||||
u32 max_frame_size;
|
||||
|
||||
/* OS defined structs */
|
||||
struct pci_dev *pdev;
|
||||
#ifndef HAVE_NETDEV_STATS_IN_NETDEV
|
||||
struct net_device_stats net_stats;
|
||||
#endif
|
||||
#ifndef IGB_NO_LRO
|
||||
struct igb_lro_stats lro_stats;
|
||||
#endif
|
||||
|
||||
/* structs defined in e1000_hw.h */
|
||||
struct e1000_hw hw;
|
||||
struct e1000_hw_stats stats;
|
||||
struct e1000_phy_info phy_info;
|
||||
struct e1000_phy_stats phy_stats;
|
||||
|
||||
#ifdef ETHTOOL_TEST
|
||||
u32 test_icr;
|
||||
struct igb_ring test_tx_ring;
|
||||
struct igb_ring test_rx_ring;
|
||||
#endif
|
||||
|
||||
int msg_enable;
|
||||
|
||||
struct igb_q_vector *q_vector[MAX_Q_VECTORS];
|
||||
u32 eims_enable_mask;
|
||||
u32 eims_other;
|
||||
|
||||
/* to not mess up cache alignment, always add to the bottom */
|
||||
u32 *config_space;
|
||||
u16 tx_ring_count;
|
||||
u16 rx_ring_count;
|
||||
struct vf_data_storage *vf_data;
|
||||
#ifdef IFLA_VF_MAX
|
||||
int vf_rate_link_speed;
|
||||
#endif
|
||||
u32 lli_port;
|
||||
u32 lli_size;
|
||||
unsigned int vfs_allocated_count;
|
||||
/* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */
|
||||
bool mdd;
|
||||
int int_mode;
|
||||
u32 rss_queues;
|
||||
u32 vmdq_pools;
|
||||
char fw_version[43];
|
||||
u32 wvbr;
|
||||
struct igb_mac_addr *mac_table;
|
||||
#ifdef CONFIG_IGB_VMDQ_NETDEV
|
||||
struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES];
|
||||
#endif
|
||||
int vferr_refcount;
|
||||
int dmac;
|
||||
u32 *shadow_vfta;
|
||||
|
||||
/* External Thermal Sensor support flag */
|
||||
bool ets;
|
||||
#ifdef IGB_HWMON
|
||||
struct hwmon_buff igb_hwmon_buff;
|
||||
#else /* IGB_HWMON */
|
||||
#ifdef IGB_PROCFS
|
||||
struct proc_dir_entry *eth_dir;
|
||||
struct proc_dir_entry *info_dir;
|
||||
struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS];
|
||||
struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
|
||||
bool old_lsc;
|
||||
#endif /* IGB_PROCFS */
|
||||
#endif /* IGB_HWMON */
|
||||
u32 etrack_id;
|
||||
|
||||
#ifdef HAVE_PTP_1588_CLOCK
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_caps;
|
||||
struct delayed_work ptp_overflow_work;
|
||||
struct work_struct ptp_tx_work;
|
||||
struct sk_buff *ptp_tx_skb;
|
||||
unsigned long ptp_tx_start;
|
||||
unsigned long last_rx_ptp_check;
|
||||
spinlock_t tmreg_lock;
|
||||
struct cyclecounter cc;
|
||||
struct timecounter tc;
|
||||
u32 tx_hwtstamp_timeouts;
|
||||
u32 rx_hwtstamp_cleared;
|
||||
#endif /* HAVE_PTP_1588_CLOCK */
|
||||
|
||||
#ifdef HAVE_I2C_SUPPORT
|
||||
struct i2c_algo_bit_data i2c_algo;
|
||||
struct i2c_adapter i2c_adap;
|
||||
struct i2c_client *i2c_client;
|
||||
#endif /* HAVE_I2C_SUPPORT */
|
||||
unsigned long link_check_timeout;
|
||||
|
||||
|
||||
int devrc;
|
||||
|
||||
int copper_tries;
|
||||
u16 eee_advert;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IGB_VMDQ_NETDEV
|
||||
struct igb_vmdq_adapter {
|
||||
#ifdef HAVE_VLAN_RX_REGISTER
|
||||
/* vlgrp must be first member of structure */
|
||||
struct vlan_group *vlgrp;
|
||||
#else
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
#endif
|
||||
struct igb_adapter *real_adapter;
|
||||
struct net_device *vnetdev;
|
||||
struct net_device_stats net_stats;
|
||||
struct igb_ring *tx_ring;
|
||||
struct igb_ring *rx_ring;
|
||||
};
|
||||
#endif
|
||||
|
||||
#define IGB_FLAG_HAS_MSI (1 << 0)
|
||||
#define IGB_FLAG_DCA_ENABLED (1 << 1)
|
||||
#define IGB_FLAG_LLI_PUSH (1 << 2)
|
||||
#define IGB_FLAG_QUAD_PORT_A (1 << 3)
|
||||
#define IGB_FLAG_QUEUE_PAIRS (1 << 4)
|
||||
#define IGB_FLAG_EEE (1 << 5)
|
||||
#define IGB_FLAG_DMAC (1 << 6)
|
||||
#define IGB_FLAG_DETECT_BAD_DMA (1 << 7)
|
||||
#define IGB_FLAG_PTP (1 << 8)
|
||||
#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9)
|
||||
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10)
|
||||
#define IGB_FLAG_WOL_SUPPORTED (1 << 11)
|
||||
#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12)
|
||||
#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13)
|
||||
#define IGB_FLAG_MEDIA_RESET (1 << 14)
|
||||
#define IGB_FLAG_MAS_ENABLE (1 << 15)
|
||||
|
||||
/* Media Auto Sense */
|
||||
#define IGB_MAS_ENABLE_0 0X0001
|
||||
#define IGB_MAS_ENABLE_1 0X0002
|
||||
#define IGB_MAS_ENABLE_2 0X0004
|
||||
#define IGB_MAS_ENABLE_3 0X0008
|
||||
|
||||
#define IGB_MIN_TXPBSIZE 20408
|
||||
#define IGB_TX_BUF_4096 4096
|
||||
|
||||
#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
|
||||
|
||||
/* DMA Coalescing defines */
|
||||
#define IGB_DMAC_DISABLE 0
|
||||
#define IGB_DMAC_MIN 250
|
||||
#define IGB_DMAC_500 500
|
||||
#define IGB_DMAC_EN_DEFAULT 1000
|
||||
#define IGB_DMAC_2000 2000
|
||||
#define IGB_DMAC_3000 3000
|
||||
#define IGB_DMAC_4000 4000
|
||||
#define IGB_DMAC_5000 5000
|
||||
#define IGB_DMAC_6000 6000
|
||||
#define IGB_DMAC_7000 7000
|
||||
#define IGB_DMAC_8000 8000
|
||||
#define IGB_DMAC_9000 9000
|
||||
#define IGB_DMAC_MAX 10000
|
||||
|
||||
#define IGB_82576_TSYNC_SHIFT 19
|
||||
#define IGB_82580_TSYNC_SHIFT 24
|
||||
#define IGB_TS_HDR_LEN 16
|
||||
|
||||
/* CEM Support */
|
||||
#define FW_HDR_LEN 0x4
|
||||
#define FW_CMD_DRV_INFO 0xDD
|
||||
#define FW_CMD_DRV_INFO_LEN 0x5
|
||||
#define FW_CMD_RESERVED 0X0
|
||||
#define FW_RESP_SUCCESS 0x1
|
||||
#define FW_UNUSED_VER 0x0
|
||||
#define FW_MAX_RETRIES 3
|
||||
#define FW_STATUS_SUCCESS 0x1
|
||||
#define FW_FAMILY_DRV_VER 0Xffffffff
|
||||
|
||||
#define IGB_MAX_LINK_TRIES 20
|
||||
|
||||
struct e1000_fw_hdr {
|
||||
u8 cmd;
|
||||
u8 buf_len;
|
||||
union
|
||||
{
|
||||
u8 cmd_resv;
|
||||
u8 ret_status;
|
||||
} cmd_or_resp;
|
||||
u8 checksum;
|
||||
};
|
||||
|
||||
#pragma pack(push,1)
|
||||
struct e1000_fw_drv_info {
|
||||
struct e1000_fw_hdr hdr;
|
||||
u8 port_num;
|
||||
u32 drv_version;
|
||||
u16 pad; /* end spacing to ensure length is mult. of dword */
|
||||
u8 pad2; /* end spacing to ensure length is mult. of dword2 */
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
enum e1000_state_t {
|
||||
__IGB_TESTING,
|
||||
__IGB_RESETTING,
|
||||
__IGB_DOWN
|
||||
};
|
||||
|
||||
extern char igb_driver_name[];
|
||||
extern char igb_driver_version[];
|
||||
|
||||
extern int igb_up(struct igb_adapter *);
|
||||
extern void igb_down(struct igb_adapter *);
|
||||
extern void igb_reinit_locked(struct igb_adapter *);
|
||||
extern void igb_reset(struct igb_adapter *);
|
||||
extern int igb_set_spd_dplx(struct igb_adapter *, u16);
|
||||
extern int igb_setup_tx_resources(struct igb_ring *);
|
||||
extern int igb_setup_rx_resources(struct igb_ring *);
|
||||
extern void igb_free_tx_resources(struct igb_ring *);
|
||||
extern void igb_free_rx_resources(struct igb_ring *);
|
||||
extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
|
||||
extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
|
||||
extern void igb_setup_tctl(struct igb_adapter *);
|
||||
extern void igb_setup_rctl(struct igb_adapter *);
|
||||
extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
|
||||
extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
|
||||
struct igb_tx_buffer *);
|
||||
extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
|
||||
extern void igb_clean_rx_ring(struct igb_ring *);
|
||||
extern void igb_update_stats(struct igb_adapter *);
|
||||
extern bool igb_has_link(struct igb_adapter *adapter);
|
||||
extern void igb_set_ethtool_ops(struct net_device *);
|
||||
extern void igb_check_options(struct igb_adapter *);
|
||||
extern void igb_power_up_link(struct igb_adapter *);
|
||||
#ifdef HAVE_PTP_1588_CLOCK
|
||||
extern void igb_ptp_init(struct igb_adapter *adapter);
|
||||
extern void igb_ptp_stop(struct igb_adapter *adapter);
|
||||
extern void igb_ptp_reset(struct igb_adapter *adapter);
|
||||
extern void igb_ptp_tx_work(struct work_struct *work);
|
||||
extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
|
||||
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
|
||||
extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
|
||||
struct sk_buff *skb);
|
||||
extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
|
||||
unsigned char *va,
|
||||
struct sk_buff *skb);
|
||||
static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
|
||||
union e1000_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
||||
#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
|
||||
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
|
||||
skb_pull(skb, IGB_TS_HDR_LEN);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS))
|
||||
igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
|
||||
|
||||
/* Update the last_rx_timestamp timer in order to enable watchdog check
|
||||
* for error case of latched timestamp on a dropped packet.
|
||||
*/
|
||||
rx_ring->last_rx_timestamp = jiffies;
|
||||
}
|
||||
|
||||
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
|
||||
struct ifreq *ifr, int cmd);
|
||||
#endif /* HAVE_PTP_1588_CLOCK */
|
||||
#ifdef ETHTOOL_OPS_COMPAT
|
||||
extern int ethtool_ioctl(struct ifreq *);
|
||||
#endif
|
||||
extern int igb_write_mc_addr_list(struct net_device *netdev);
|
||||
extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
|
||||
extern int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue);
|
||||
extern int igb_available_rars(struct igb_adapter *adapter);
|
||||
extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
|
||||
extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
|
||||
extern void igb_enable_vlan_tags(struct igb_adapter *adapter);
|
||||
#ifndef HAVE_VLAN_RX_REGISTER
|
||||
extern void igb_vlan_mode(struct net_device *, u32);
|
||||
#endif
|
||||
|
||||
#define E1000_PCS_CFG_IGN_SD 1
|
||||
|
||||
#ifdef IGB_HWMON
|
||||
void igb_sysfs_exit(struct igb_adapter *adapter);
|
||||
int igb_sysfs_init(struct igb_adapter *adapter);
|
||||
#else
|
||||
#ifdef IGB_PROCFS
|
||||
int igb_procfs_init(struct igb_adapter* adapter);
|
||||
void igb_procfs_exit(struct igb_adapter* adapter);
|
||||
int igb_procfs_topdir_init(void);
|
||||
void igb_procfs_topdir_exit(void);
|
||||
#endif /* IGB_PROCFS */
|
||||
#endif /* IGB_HWMON */
|
||||
|
||||
|
||||
|
||||
#endif /* _IGB_H_ */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,832 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#include "igb.h"
|
||||
|
||||
/* This is the only thing that needs to be changed to adjust the
|
||||
* maximum number of ports that the driver can manage.
|
||||
*/
|
||||
|
||||
#define IGB_MAX_NIC 32
|
||||
|
||||
#define OPTION_UNSET -1
|
||||
#define OPTION_DISABLED 0
|
||||
#define OPTION_ENABLED 1
|
||||
#define MAX_NUM_LIST_OPTS 15
|
||||
|
||||
/* All parameters are treated the same, as an integer array of values.
|
||||
* This macro just reduces the need to repeat the same declaration code
|
||||
* over and over (plus this helps to avoid typo bugs).
|
||||
*/
|
||||
|
||||
#define IGB_PARAM_INIT { [0 ... IGB_MAX_NIC] = OPTION_UNSET }
|
||||
#ifndef module_param_array
|
||||
/* Module Parameters are always initialized to -1, so that the driver
|
||||
* can tell the difference between no user specified value or the
|
||||
* user asking for the default value.
|
||||
* The true default values are loaded in when igb_check_options is called.
|
||||
*
|
||||
* This is a GCC extension to ANSI C.
|
||||
* See the item "Labeled Elements in Initializers" in the section
|
||||
* "Extensions to the C Language Family" of the GCC documentation.
|
||||
*/
|
||||
|
||||
#define IGB_PARAM(X, desc) \
|
||||
static const int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
|
||||
MODULE_PARM(X, "1-" __MODULE_STRING(IGB_MAX_NIC) "i"); \
|
||||
MODULE_PARM_DESC(X, desc);
|
||||
#else
|
||||
#define IGB_PARAM(X, desc) \
|
||||
static int X[IGB_MAX_NIC+1] = IGB_PARAM_INIT; \
|
||||
static unsigned int num_##X; \
|
||||
module_param_array_named(X, X, int, &num_##X, 0); \
|
||||
MODULE_PARM_DESC(X, desc);
|
||||
#endif
|
||||
|
||||
/* Interrupt Throttle Rate (interrupts/sec)
|
||||
*
|
||||
* Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
|
||||
*/
|
||||
IGB_PARAM(InterruptThrottleRate,
|
||||
"Maximum interrupts per second, per vector, (max 100000), default 3=adaptive");
|
||||
#define DEFAULT_ITR 3
|
||||
#define MAX_ITR 100000
|
||||
/* #define MIN_ITR 120 */
|
||||
#define MIN_ITR 0
|
||||
/* IntMode (Interrupt Mode)
|
||||
*
|
||||
* Valid Range: 0 - 2
|
||||
*
|
||||
* Default Value: 2 (MSI-X)
|
||||
*/
|
||||
IGB_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), default 2");
|
||||
#define MAX_INTMODE IGB_INT_MODE_MSIX
|
||||
#define MIN_INTMODE IGB_INT_MODE_LEGACY
|
||||
|
||||
IGB_PARAM(Node, "set the starting node to allocate memory on, default -1");
|
||||
|
||||
/* LLIPort (Low Latency Interrupt TCP Port)
|
||||
*
|
||||
* Valid Range: 0 - 65535
|
||||
*
|
||||
* Default Value: 0 (disabled)
|
||||
*/
|
||||
IGB_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535), default 0=off");
|
||||
|
||||
#define DEFAULT_LLIPORT 0
|
||||
#define MAX_LLIPORT 0xFFFF
|
||||
#define MIN_LLIPORT 0
|
||||
|
||||
/* LLIPush (Low Latency Interrupt on TCP Push flag)
|
||||
*
|
||||
* Valid Range: 0, 1
|
||||
*
|
||||
* Default Value: 0 (disabled)
|
||||
*/
|
||||
IGB_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1), default 0=off");
|
||||
|
||||
#define DEFAULT_LLIPUSH 0
|
||||
#define MAX_LLIPUSH 1
|
||||
#define MIN_LLIPUSH 0
|
||||
|
||||
/* LLISize (Low Latency Interrupt on Packet Size)
|
||||
*
|
||||
* Valid Range: 0 - 1500
|
||||
*
|
||||
* Default Value: 0 (disabled)
|
||||
*/
|
||||
IGB_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500), default 0=off");
|
||||
|
||||
#define DEFAULT_LLISIZE 0
|
||||
#define MAX_LLISIZE 1500
|
||||
#define MIN_LLISIZE 0
|
||||
|
||||
/* RSS (Enable RSS multiqueue receive)
|
||||
*
|
||||
* Valid Range: 0 - 8
|
||||
*
|
||||
* Default Value: 1
|
||||
*/
|
||||
IGB_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues (0-8), default 1, 0=number of cpus");
|
||||
|
||||
#define DEFAULT_RSS 1
|
||||
#define MAX_RSS 8
|
||||
#define MIN_RSS 0
|
||||
|
||||
/* VMDQ (Enable VMDq multiqueue receive)
|
||||
*
|
||||
* Valid Range: 0 - 8
|
||||
*
|
||||
* Default Value: 0
|
||||
*/
|
||||
IGB_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0-1 = disable, 2-8 enable, default 0");
|
||||
|
||||
#define DEFAULT_VMDQ 0
|
||||
#define MAX_VMDQ MAX_RSS
|
||||
#define MIN_VMDQ 0
|
||||
|
||||
/* max_vfs (Enable SR-IOV VF devices)
|
||||
*
|
||||
* Valid Range: 0 - 7
|
||||
*
|
||||
* Default Value: 0
|
||||
*/
|
||||
IGB_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable, 1-7 enable, default 0");
|
||||
|
||||
#define DEFAULT_SRIOV 0
|
||||
#define MAX_SRIOV 7
|
||||
#define MIN_SRIOV 0
|
||||
|
||||
/* MDD (Enable Malicious Driver Detection)
|
||||
*
|
||||
* Only available when SR-IOV is enabled - max_vfs is greater than 0
|
||||
*
|
||||
* Valid Range: 0, 1
|
||||
*
|
||||
* Default Value: 1
|
||||
*/
|
||||
IGB_PARAM(MDD, "Malicious Driver Detection (0/1), default 1 = enabled. "
|
||||
"Only available when max_vfs is greater than 0");
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
/* Disable Hardware Reset on Tx Hang
|
||||
*
|
||||
* Valid Range: 0, 1
|
||||
*
|
||||
* Default Value: 0 (disabled, i.e. h/w will reset)
|
||||
*/
|
||||
IGB_PARAM(DisableHwReset, "Disable reset of hardware on Tx hang");
|
||||
|
||||
/* Dump Transmit and Receive buffers
|
||||
*
|
||||
* Valid Range: 0, 1
|
||||
*
|
||||
* Default Value: 0
|
||||
*/
|
||||
IGB_PARAM(DumpBuffers, "Dump Tx/Rx buffers on Tx hang or by request");
|
||||
|
||||
#endif /* DEBUG */
|
||||
|
||||
/* QueuePairs (Enable TX/RX queue pairs for interrupt handling)
|
||||
*
|
||||
* Valid Range: 0 - 1
|
||||
*
|
||||
* Default Value: 1
|
||||
*/
|
||||
IGB_PARAM(QueuePairs, "Enable Tx/Rx queue pairs for interrupt handling (0,1), default 1=on");
|
||||
|
||||
#define DEFAULT_QUEUE_PAIRS 1
|
||||
#define MAX_QUEUE_PAIRS 1
|
||||
#define MIN_QUEUE_PAIRS 0
|
||||
|
||||
/* Enable/disable EEE (a.k.a. IEEE802.3az)
|
||||
*
|
||||
* Valid Range: 0, 1
|
||||
*
|
||||
* Default Value: 1
|
||||
*/
|
||||
IGB_PARAM(EEE, "Enable/disable on parts that support the feature");
|
||||
|
||||
/* Enable/disable DMA Coalescing
|
||||
*
|
||||
* Valid Values: 0(off), 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000,
|
||||
* 9000, 10000(msec), 250(usec), 500(usec)
|
||||
*
|
||||
* Default Value: 0
|
||||
*/
|
||||
IGB_PARAM(DMAC, "Disable or set latency for DMA Coalescing ((0=off, 1000-10000(msec), 250, 500 (usec))");
|
||||
|
||||
#ifndef IGB_NO_LRO
|
||||
/* Enable/disable Large Receive Offload
|
||||
*
|
||||
* Valid Values: 0(off), 1(on)
|
||||
*
|
||||
* Default Value: 0
|
||||
*/
|
||||
IGB_PARAM(LRO, "Large Receive Offload (0,1), default 0=off");
|
||||
|
||||
#endif
|
||||
struct igb_opt_list {
|
||||
int i;
|
||||
char *str;
|
||||
};
|
||||
struct igb_option {
|
||||
enum { enable_option, range_option, list_option } type;
|
||||
const char *name;
|
||||
const char *err;
|
||||
int def;
|
||||
union {
|
||||
struct { /* range_option info */
|
||||
int min;
|
||||
int max;
|
||||
} r;
|
||||
struct { /* list_option info */
|
||||
int nr;
|
||||
struct igb_opt_list *p;
|
||||
} l;
|
||||
} arg;
|
||||
};
|
||||
|
||||
static int igb_validate_option(unsigned int *value,
|
||||
struct igb_option *opt,
|
||||
struct igb_adapter *adapter)
|
||||
{
|
||||
if (*value == OPTION_UNSET) {
|
||||
*value = opt->def;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (opt->type) {
|
||||
case enable_option:
|
||||
switch (*value) {
|
||||
case OPTION_ENABLED:
|
||||
DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
|
||||
return 0;
|
||||
case OPTION_DISABLED:
|
||||
DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case range_option:
|
||||
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
|
||||
DPRINTK(PROBE, INFO,
|
||||
"%s set to %d\n", opt->name, *value);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case list_option: {
|
||||
int i;
|
||||
struct igb_opt_list *ent;
|
||||
|
||||
for (i = 0; i < opt->arg.l.nr; i++) {
|
||||
ent = &opt->arg.l.p[i];
|
||||
if (*value == ent->i) {
|
||||
if (ent->str[0] != '\0')
|
||||
DPRINTK(PROBE, INFO, "%s\n", ent->str);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
DPRINTK(PROBE, INFO, "Invalid %s value specified (%d) %s\n",
|
||||
opt->name, *value, opt->err);
|
||||
*value = opt->def;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_check_options - Range Checking for Command Line Parameters
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* This routine checks all command line parameters for valid user
|
||||
* input. If an invalid value is given, or if no user specified
|
||||
* value exists, a default value is used. The final value is stored
|
||||
* in a variable in the adapter structure.
|
||||
**/
|
||||
|
||||
void igb_check_options(struct igb_adapter *adapter)
|
||||
{
|
||||
int bd = adapter->bd_number;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
|
||||
if (bd >= IGB_MAX_NIC) {
|
||||
DPRINTK(PROBE, NOTICE,
|
||||
"Warning: no configuration for board #%d\n", bd);
|
||||
DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
|
||||
#ifndef module_param_array
|
||||
bd = IGB_MAX_NIC;
|
||||
#endif
|
||||
}
|
||||
|
||||
{ /* Interrupt Throttling Rate */
|
||||
struct igb_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Interrupt Throttling Rate (ints/sec)",
|
||||
.err = "using default of " __MODULE_STRING(DEFAULT_ITR),
|
||||
.def = DEFAULT_ITR,
|
||||
.arg = { .r = { .min = MIN_ITR,
|
||||
.max = MAX_ITR } }
|
||||
};
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_InterruptThrottleRate > bd) {
|
||||
#endif
|
||||
unsigned int itr = InterruptThrottleRate[bd];
|
||||
|
||||
switch (itr) {
|
||||
case 0:
|
||||
DPRINTK(PROBE, INFO, "%s turned off\n",
|
||||
opt.name);
|
||||
if (hw->mac.type >= e1000_i350)
|
||||
adapter->dmac = IGB_DMAC_DISABLE;
|
||||
adapter->rx_itr_setting = itr;
|
||||
break;
|
||||
case 1:
|
||||
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
|
||||
opt.name);
|
||||
adapter->rx_itr_setting = itr;
|
||||
break;
|
||||
case 3:
|
||||
DPRINTK(PROBE, INFO,
|
||||
"%s set to dynamic conservative mode\n",
|
||||
opt.name);
|
||||
adapter->rx_itr_setting = itr;
|
||||
break;
|
||||
default:
|
||||
igb_validate_option(&itr, &opt, adapter);
|
||||
/* Save the setting, because the dynamic bits
|
||||
* change itr. In case of invalid user value,
|
||||
* default to conservative mode, else need to
|
||||
* clear the lower two bits because they are
|
||||
* used as control */
|
||||
if (itr == 3) {
|
||||
adapter->rx_itr_setting = itr;
|
||||
} else {
|
||||
adapter->rx_itr_setting = 1000000000 /
|
||||
(itr * 256);
|
||||
adapter->rx_itr_setting &= ~3;
|
||||
}
|
||||
break;
|
||||
}
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->rx_itr_setting = opt.def;
|
||||
}
|
||||
#endif
|
||||
adapter->tx_itr_setting = adapter->rx_itr_setting;
|
||||
}
|
||||
{ /* Interrupt Mode */
|
||||
struct igb_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Interrupt Mode",
|
||||
.err = "defaulting to 2 (MSI-X)",
|
||||
.def = IGB_INT_MODE_MSIX,
|
||||
.arg = { .r = { .min = MIN_INTMODE,
|
||||
.max = MAX_INTMODE } }
|
||||
};
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_IntMode > bd) {
|
||||
#endif
|
||||
unsigned int int_mode = IntMode[bd];
|
||||
igb_validate_option(&int_mode, &opt, adapter);
|
||||
adapter->int_mode = int_mode;
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->int_mode = opt.def;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
{ /* Low Latency Interrupt TCP Port */
|
||||
struct igb_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Low Latency Interrupt TCP Port",
|
||||
.err = "using default of " __MODULE_STRING(DEFAULT_LLIPORT),
|
||||
.def = DEFAULT_LLIPORT,
|
||||
.arg = { .r = { .min = MIN_LLIPORT,
|
||||
.max = MAX_LLIPORT } }
|
||||
};
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_LLIPort > bd) {
|
||||
#endif
|
||||
adapter->lli_port = LLIPort[bd];
|
||||
if (adapter->lli_port) {
|
||||
igb_validate_option(&adapter->lli_port, &opt,
|
||||
adapter);
|
||||
} else {
|
||||
DPRINTK(PROBE, INFO, "%s turned off\n",
|
||||
opt.name);
|
||||
}
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->lli_port = opt.def;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
{ /* Low Latency Interrupt on Packet Size */
|
||||
struct igb_option opt = {
|
||||
.type = range_option,
|
||||
.name = "Low Latency Interrupt on Packet Size",
|
||||
.err = "using default of " __MODULE_STRING(DEFAULT_LLISIZE),
|
||||
.def = DEFAULT_LLISIZE,
|
||||
.arg = { .r = { .min = MIN_LLISIZE,
|
||||
.max = MAX_LLISIZE } }
|
||||
};
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_LLISize > bd) {
|
||||
#endif
|
||||
adapter->lli_size = LLISize[bd];
|
||||
if (adapter->lli_size) {
|
||||
igb_validate_option(&adapter->lli_size, &opt,
|
||||
adapter);
|
||||
} else {
|
||||
DPRINTK(PROBE, INFO, "%s turned off\n",
|
||||
opt.name);
|
||||
}
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->lli_size = opt.def;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
{ /* Low Latency Interrupt on TCP Push flag */
|
||||
struct igb_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "Low Latency Interrupt on TCP Push flag",
|
||||
.err = "defaulting to Disabled",
|
||||
.def = OPTION_DISABLED
|
||||
};
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_LLIPush > bd) {
|
||||
#endif
|
||||
unsigned int lli_push = LLIPush[bd];
|
||||
igb_validate_option(&lli_push, &opt, adapter);
|
||||
adapter->flags |= lli_push ? IGB_FLAG_LLI_PUSH : 0;
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->flags |= opt.def ? IGB_FLAG_LLI_PUSH : 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
{ /* SRIOV - Enable SR-IOV VF devices */
|
||||
struct igb_option opt = {
|
||||
.type = range_option,
|
||||
.name = "max_vfs - SR-IOV VF devices",
|
||||
.err = "using default of " __MODULE_STRING(DEFAULT_SRIOV),
|
||||
.def = DEFAULT_SRIOV,
|
||||
.arg = { .r = { .min = MIN_SRIOV,
|
||||
.max = MAX_SRIOV } }
|
||||
};
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_max_vfs > bd) {
|
||||
#endif
|
||||
adapter->vfs_allocated_count = max_vfs[bd];
|
||||
igb_validate_option(&adapter->vfs_allocated_count, &opt, adapter);
|
||||
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->vfs_allocated_count = opt.def;
|
||||
}
|
||||
#endif
|
||||
if (adapter->vfs_allocated_count) {
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82575:
|
||||
case e1000_82580:
|
||||
case e1000_i210:
|
||||
case e1000_i211:
|
||||
case e1000_i354:
|
||||
adapter->vfs_allocated_count = 0;
|
||||
DPRINTK(PROBE, INFO, "SR-IOV option max_vfs not supported.\n");
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
{ /* VMDQ - Enable VMDq multiqueue receive */
|
||||
struct igb_option opt = {
|
||||
.type = range_option,
|
||||
.name = "VMDQ - VMDq multiqueue queue count",
|
||||
.err = "using default of " __MODULE_STRING(DEFAULT_VMDQ),
|
||||
.def = DEFAULT_VMDQ,
|
||||
.arg = { .r = { .min = MIN_VMDQ,
|
||||
.max = (MAX_VMDQ - adapter->vfs_allocated_count) } }
|
||||
};
|
||||
if ((hw->mac.type != e1000_i210) ||
|
||||
(hw->mac.type != e1000_i211)) {
|
||||
#ifdef module_param_array
|
||||
if (num_VMDQ > bd) {
|
||||
#endif
|
||||
adapter->vmdq_pools = (VMDQ[bd] == 1 ? 0 : VMDQ[bd]);
|
||||
if (adapter->vfs_allocated_count && !adapter->vmdq_pools) {
|
||||
DPRINTK(PROBE, INFO, "Enabling SR-IOV requires VMDq be set to at least 1\n");
|
||||
adapter->vmdq_pools = 1;
|
||||
}
|
||||
igb_validate_option(&adapter->vmdq_pools, &opt, adapter);
|
||||
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
if (!adapter->vfs_allocated_count)
|
||||
adapter->vmdq_pools = (opt.def == 1 ? 0 : opt.def);
|
||||
else
|
||||
adapter->vmdq_pools = 1;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_IGB_VMDQ_NETDEV
|
||||
if (hw->mac.type == e1000_82575 && adapter->vmdq_pools) {
|
||||
DPRINTK(PROBE, INFO, "VMDq not supported on this part.\n");
|
||||
adapter->vmdq_pools = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
} else {
|
||||
DPRINTK(PROBE, INFO, "VMDq option is not supported.\n");
|
||||
adapter->vmdq_pools = opt.def;
|
||||
}
|
||||
}
|
||||
{ /* RSS - Enable RSS multiqueue receives */
|
||||
struct igb_option opt = {
|
||||
.type = range_option,
|
||||
.name = "RSS - RSS multiqueue receive count",
|
||||
.err = "using default of " __MODULE_STRING(DEFAULT_RSS),
|
||||
.def = DEFAULT_RSS,
|
||||
.arg = { .r = { .min = MIN_RSS,
|
||||
.max = MAX_RSS } }
|
||||
};
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82575:
|
||||
#ifndef CONFIG_IGB_VMDQ_NETDEV
|
||||
if (!!adapter->vmdq_pools) {
|
||||
if (adapter->vmdq_pools <= 2) {
|
||||
if (adapter->vmdq_pools == 2)
|
||||
opt.arg.r.max = 3;
|
||||
} else {
|
||||
opt.arg.r.max = 1;
|
||||
}
|
||||
} else {
|
||||
opt.arg.r.max = 4;
|
||||
}
|
||||
#else
|
||||
opt.arg.r.max = !!adapter->vmdq_pools ? 1 : 4;
|
||||
#endif /* CONFIG_IGB_VMDQ_NETDEV */
|
||||
break;
|
||||
case e1000_i210:
|
||||
opt.arg.r.max = 4;
|
||||
break;
|
||||
case e1000_i211:
|
||||
opt.arg.r.max = 2;
|
||||
break;
|
||||
case e1000_82576:
|
||||
#ifndef CONFIG_IGB_VMDQ_NETDEV
|
||||
if (!!adapter->vmdq_pools)
|
||||
opt.arg.r.max = 2;
|
||||
break;
|
||||
#endif /* CONFIG_IGB_VMDQ_NETDEV */
|
||||
case e1000_82580:
|
||||
case e1000_i350:
|
||||
case e1000_i354:
|
||||
default:
|
||||
if (!!adapter->vmdq_pools)
|
||||
opt.arg.r.max = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (adapter->int_mode != IGB_INT_MODE_MSIX) {
|
||||
DPRINTK(PROBE, INFO, "RSS is not supported when in MSI/Legacy Interrupt mode, %s\n",
|
||||
opt.err);
|
||||
opt.arg.r.max = 1;
|
||||
}
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_RSS > bd) {
|
||||
#endif
|
||||
adapter->rss_queues = RSS[bd];
|
||||
switch (adapter->rss_queues) {
|
||||
case 1:
|
||||
break;
|
||||
default:
|
||||
igb_validate_option(&adapter->rss_queues, &opt, adapter);
|
||||
if (adapter->rss_queues)
|
||||
break;
|
||||
case 0:
|
||||
adapter->rss_queues = min_t(u32, opt.arg.r.max, num_online_cpus());
|
||||
break;
|
||||
}
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->rss_queues = opt.def;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
{ /* QueuePairs - Enable Tx/Rx queue pairs for interrupt handling */
|
||||
struct igb_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "QueuePairs - Tx/Rx queue pairs for interrupt handling",
|
||||
.err = "defaulting to Enabled",
|
||||
.def = OPTION_ENABLED
|
||||
};
|
||||
#ifdef module_param_array
|
||||
if (num_QueuePairs > bd) {
|
||||
#endif
|
||||
unsigned int qp = QueuePairs[bd];
|
||||
/*
|
||||
* We must enable queue pairs if the number of queues
|
||||
* exceeds the number of available interrupts. We are
|
||||
* limited to 10, or 3 per unallocated vf. On I210 and
|
||||
* I211 devices, we are limited to 5 interrupts.
|
||||
* However, since I211 only supports 2 queues, we do not
|
||||
* need to check and override the user option.
|
||||
*/
|
||||
if (qp == OPTION_DISABLED) {
|
||||
if (adapter->rss_queues > 4)
|
||||
qp = OPTION_ENABLED;
|
||||
|
||||
if (adapter->vmdq_pools > 4)
|
||||
qp = OPTION_ENABLED;
|
||||
|
||||
if (adapter->rss_queues > 1 &&
|
||||
(adapter->vmdq_pools > 3 ||
|
||||
adapter->vfs_allocated_count > 6))
|
||||
qp = OPTION_ENABLED;
|
||||
|
||||
if (hw->mac.type == e1000_i210 &&
|
||||
adapter->rss_queues > 2)
|
||||
qp = OPTION_ENABLED;
|
||||
|
||||
if (qp == OPTION_ENABLED)
|
||||
DPRINTK(PROBE, INFO, "Number of queues exceeds available interrupts, %s\n",
|
||||
opt.err);
|
||||
}
|
||||
igb_validate_option(&qp, &opt, adapter);
|
||||
adapter->flags |= qp ? IGB_FLAG_QUEUE_PAIRS : 0;
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->flags |= opt.def ? IGB_FLAG_QUEUE_PAIRS : 0;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
{ /* EEE - Enable EEE for capable adapters */
|
||||
|
||||
if (hw->mac.type >= e1000_i350) {
|
||||
struct igb_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "EEE Support",
|
||||
.err = "defaulting to Enabled",
|
||||
.def = OPTION_ENABLED
|
||||
};
|
||||
#ifdef module_param_array
|
||||
if (num_EEE > bd) {
|
||||
#endif
|
||||
unsigned int eee = EEE[bd];
|
||||
igb_validate_option(&eee, &opt, adapter);
|
||||
adapter->flags |= eee ? IGB_FLAG_EEE : 0;
|
||||
if (eee)
|
||||
hw->dev_spec._82575.eee_disable = false;
|
||||
else
|
||||
hw->dev_spec._82575.eee_disable = true;
|
||||
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->flags |= opt.def ? IGB_FLAG_EEE : 0;
|
||||
if (adapter->flags & IGB_FLAG_EEE)
|
||||
hw->dev_spec._82575.eee_disable = false;
|
||||
else
|
||||
hw->dev_spec._82575.eee_disable = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
{ /* DMAC - Enable DMA Coalescing for capable adapters */
|
||||
|
||||
if (hw->mac.type >= e1000_i350) {
|
||||
struct igb_opt_list list [] = {
|
||||
{ IGB_DMAC_DISABLE, "DMAC Disable"},
|
||||
{ IGB_DMAC_MIN, "DMAC 250 usec"},
|
||||
{ IGB_DMAC_500, "DMAC 500 usec"},
|
||||
{ IGB_DMAC_EN_DEFAULT, "DMAC 1000 usec"},
|
||||
{ IGB_DMAC_2000, "DMAC 2000 usec"},
|
||||
{ IGB_DMAC_3000, "DMAC 3000 usec"},
|
||||
{ IGB_DMAC_4000, "DMAC 4000 usec"},
|
||||
{ IGB_DMAC_5000, "DMAC 5000 usec"},
|
||||
{ IGB_DMAC_6000, "DMAC 6000 usec"},
|
||||
{ IGB_DMAC_7000, "DMAC 7000 usec"},
|
||||
{ IGB_DMAC_8000, "DMAC 8000 usec"},
|
||||
{ IGB_DMAC_9000, "DMAC 9000 usec"},
|
||||
{ IGB_DMAC_MAX, "DMAC 10000 usec"}
|
||||
};
|
||||
struct igb_option opt = {
|
||||
.type = list_option,
|
||||
.name = "DMA Coalescing",
|
||||
.err = "using default of "__MODULE_STRING(IGB_DMAC_DISABLE),
|
||||
.def = IGB_DMAC_DISABLE,
|
||||
.arg = { .l = { .nr = 13,
|
||||
.p = list
|
||||
}
|
||||
}
|
||||
};
|
||||
#ifdef module_param_array
|
||||
if (num_DMAC > bd) {
|
||||
#endif
|
||||
unsigned int dmac = DMAC[bd];
|
||||
if (adapter->rx_itr_setting == IGB_DMAC_DISABLE)
|
||||
dmac = IGB_DMAC_DISABLE;
|
||||
igb_validate_option(&dmac, &opt, adapter);
|
||||
switch (dmac) {
|
||||
case IGB_DMAC_DISABLE:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_MIN:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_500:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_EN_DEFAULT:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_2000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_3000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_4000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_5000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_6000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_7000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_8000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_9000:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
case IGB_DMAC_MAX:
|
||||
adapter->dmac = dmac;
|
||||
break;
|
||||
default:
|
||||
adapter->dmac = opt.def;
|
||||
DPRINTK(PROBE, INFO,
|
||||
"Invalid DMAC setting, "
|
||||
"resetting DMAC to %d\n", opt.def);
|
||||
}
|
||||
#ifdef module_param_array
|
||||
} else
|
||||
adapter->dmac = opt.def;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#ifndef IGB_NO_LRO
|
||||
{ /* LRO - Enable Large Receive Offload */
|
||||
struct igb_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "LRO - Large Receive Offload",
|
||||
.err = "defaulting to Disabled",
|
||||
.def = OPTION_DISABLED
|
||||
};
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
#ifdef module_param_array
|
||||
if (num_LRO > bd) {
|
||||
#endif
|
||||
unsigned int lro = LRO[bd];
|
||||
igb_validate_option(&lro, &opt, adapter);
|
||||
netdev->features |= lro ? NETIF_F_LRO : 0;
|
||||
#ifdef module_param_array
|
||||
} else if (opt.def == OPTION_ENABLED) {
|
||||
netdev->features |= NETIF_F_LRO;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif /* IGB_NO_LRO */
|
||||
{ /* MDD - Enable Malicious Driver Detection. Only available when
|
||||
SR-IOV is enabled. */
|
||||
struct igb_option opt = {
|
||||
.type = enable_option,
|
||||
.name = "Malicious Driver Detection",
|
||||
.err = "defaulting to 1",
|
||||
.def = OPTION_ENABLED,
|
||||
.arg = { .r = { .min = OPTION_DISABLED,
|
||||
.max = OPTION_ENABLED } }
|
||||
};
|
||||
|
||||
#ifdef module_param_array
|
||||
if (num_MDD > bd) {
|
||||
#endif
|
||||
adapter->mdd = MDD[bd];
|
||||
igb_validate_option((uint *)&adapter->mdd, &opt,
|
||||
adapter);
|
||||
#ifdef module_param_array
|
||||
} else {
|
||||
adapter->mdd = opt.def;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
@ -1,234 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
/* ethtool register test data */
|
||||
struct igb_reg_test {
|
||||
u16 reg;
|
||||
u16 reg_offset;
|
||||
u16 array_len;
|
||||
u16 test_type;
|
||||
u32 mask;
|
||||
u32 write;
|
||||
};
|
||||
|
||||
/* In the hardware, registers are laid out either singly, in arrays
|
||||
* spaced 0x100 bytes apart, or in contiguous tables. We assume
|
||||
* most tests take place on arrays or single registers (handled
|
||||
* as a single-element array) and special-case the tables.
|
||||
* Table tests are always pattern tests.
|
||||
*
|
||||
* We also make provision for some required setup steps by specifying
|
||||
* registers to be written without any read-back testing.
|
||||
*/
|
||||
|
||||
#define PATTERN_TEST 1
|
||||
#define SET_READ_TEST 2
|
||||
#define WRITE_NO_TEST 3
|
||||
#define TABLE32_TEST 4
|
||||
#define TABLE64_TEST_LO 5
|
||||
#define TABLE64_TEST_HI 6
|
||||
|
||||
/* i210 reg test */
|
||||
static struct igb_reg_test reg_test_i210[] = {
|
||||
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
/* RDH is read-only for i210, only test RDT. */
|
||||
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0003FFF0, 0x0003FFF0 },
|
||||
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_HI,
|
||||
0x900FFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 0, 128, TABLE32_TEST,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* i350 reg test */
|
||||
static struct igb_reg_test reg_test_i350[] = {
|
||||
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
/* VET is readonly on i350 */
|
||||
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
{ E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
/* RDH is read-only for i350, only test RDT. */
|
||||
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
|
||||
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
{ E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_HI,
|
||||
0xC3FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 16, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 16, TABLE64_TEST_HI,
|
||||
0xC3FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 0, 128, TABLE32_TEST,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* 82580 reg test */
|
||||
static struct igb_reg_test reg_test_82580[] = {
|
||||
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
/* RDH is read-only for 82580, only test RDT. */
|
||||
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
|
||||
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_HI,
|
||||
0x83FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 8, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 8, TABLE64_TEST_HI,
|
||||
0x83FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 0, 128, TABLE32_TEST,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* 82576 reg test */
|
||||
static struct igb_reg_test reg_test_82576[] = {
|
||||
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_RDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
/* Enable all queues before testing. */
|
||||
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
|
||||
{ E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
|
||||
/* RDH is read-only for 82576, only test RDT. */
|
||||
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
|
||||
{ E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, 0 },
|
||||
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
|
||||
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_TDBAL(4), 0x40, 12, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_HI,
|
||||
0x83FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 8, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA2, 0, 8, TABLE64_TEST_HI,
|
||||
0x83FFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 0, 128, TABLE32_TEST,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
/* 82575 register test */
|
||||
static struct igb_reg_test reg_test_82575[] = {
|
||||
{ E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
|
||||
{ E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
/* Enable all four RX queues before testing. */
|
||||
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
|
||||
/* RDH is read-only for 82575, only test RDT. */
|
||||
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
|
||||
{ E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
|
||||
{ E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
|
||||
{ E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
|
||||
{ E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
|
||||
{ E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
|
||||
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
|
||||
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
|
||||
{ E1000_TXCW, 0x100, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ E1000_RA, 0, 16, TABLE64_TEST_HI,
|
||||
0x800FFFFF, 0xFFFFFFFF },
|
||||
{ E1000_MTA, 0, 128, TABLE32_TEST,
|
||||
0xFFFFFFFF, 0xFFFFFFFF },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
@ -1,421 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
|
||||
#include <linux/tcp.h>
|
||||
|
||||
#include "igb.h"
|
||||
#include "igb_vmdq.h"
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
#ifdef CONFIG_IGB_VMDQ_NETDEV
|
||||
int igb_vmdq_open(struct net_device *dev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
struct net_device *main_netdev = adapter->netdev;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
if (test_bit(__IGB_DOWN, &adapter->state)) {
|
||||
DPRINTK(DRV, WARNING,
|
||||
"Open %s before opening this device.\n",
|
||||
main_netdev->name);
|
||||
return -EAGAIN;
|
||||
}
|
||||
netif_carrier_off(dev);
|
||||
vadapter->tx_ring->vmdq_netdev = dev;
|
||||
vadapter->rx_ring->vmdq_netdev = dev;
|
||||
if (is_valid_ether_addr(dev->dev_addr)) {
|
||||
igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
|
||||
igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
|
||||
}
|
||||
netif_carrier_on(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int igb_vmdq_close(struct net_device *dev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
|
||||
|
||||
vadapter->tx_ring->vmdq_netdev = NULL;
|
||||
vadapter->rx_ring->vmdq_netdev = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
|
||||
return igb_xmit_frame_ring(skb, vadapter->tx_ring);
|
||||
}
|
||||
|
||||
struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
vadapter->net_stats.rx_packets +=
|
||||
E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
|
||||
E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
|
||||
vadapter->net_stats.tx_packets +=
|
||||
E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
|
||||
E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
|
||||
vadapter->net_stats.rx_bytes +=
|
||||
E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
|
||||
E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
|
||||
vadapter->net_stats.tx_bytes +=
|
||||
E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
|
||||
E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
|
||||
vadapter->net_stats.multicast +=
|
||||
E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
|
||||
E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
|
||||
/* only return the current stats */
|
||||
return &vadapter->net_stats;
|
||||
}
|
||||
|
||||
/**
|
||||
* igb_write_vm_addr_list - write unicast addresses to RAR table
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* Writes unicast address list to the RAR table.
|
||||
* Returns: -ENOMEM on failure/insufficient address space
|
||||
* 0 on no addresses written
|
||||
* X on writing X addresses to the RAR table
|
||||
**/
|
||||
static int igb_write_vm_addr_list(struct net_device *netdev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
int count = 0;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
/* return ENOMEM indicating insufficient memory for addresses */
|
||||
if (netdev_uc_count(netdev) > igb_available_rars(adapter))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!netdev_uc_empty(netdev)) {
|
||||
#ifdef NETDEV_HW_ADDR_T_UNICAST
|
||||
struct netdev_hw_addr *ha;
|
||||
#else
|
||||
struct dev_mc_list *ha;
|
||||
#endif
|
||||
netdev_for_each_uc_addr(ha, netdev) {
|
||||
#ifdef NETDEV_HW_ADDR_T_UNICAST
|
||||
igb_del_mac_filter(adapter, ha->addr, hw_queue);
|
||||
igb_add_mac_filter(adapter, ha->addr, hw_queue);
|
||||
#else
|
||||
igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
|
||||
igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
|
||||
#endif
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
#define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
|
||||
void igb_vmdq_set_rx_mode(struct net_device *dev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 vmolr, rctl;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
/* Check for Promiscuous and All Multicast modes */
|
||||
vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
|
||||
|
||||
/* clear the affected bits */
|
||||
vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
|
||||
E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
|
||||
|
||||
if (dev->flags & IFF_PROMISC) {
|
||||
vmolr |= E1000_VMOLR_UPE;
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
rctl |= E1000_RCTL_UPE;
|
||||
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
||||
} else {
|
||||
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
||||
rctl &= ~E1000_RCTL_UPE;
|
||||
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
||||
if (dev->flags & IFF_ALLMULTI) {
|
||||
vmolr |= E1000_VMOLR_MPME;
|
||||
} else {
|
||||
/*
|
||||
* Write addresses to the MTA, if the attempt fails
|
||||
* then we should just turn on promiscuous mode so
|
||||
* that we can at least receive multicast traffic
|
||||
*/
|
||||
if (igb_write_mc_addr_list(adapter->netdev) != 0)
|
||||
vmolr |= E1000_VMOLR_ROMPE;
|
||||
}
|
||||
#ifdef HAVE_SET_RX_MODE
|
||||
/*
|
||||
* Write addresses to available RAR registers, if there is not
|
||||
* sufficient space to store all the addresses then enable
|
||||
* unicast promiscuous mode
|
||||
*/
|
||||
if (igb_write_vm_addr_list(dev) < 0)
|
||||
vmolr |= E1000_VMOLR_UPE;
|
||||
#endif
|
||||
}
|
||||
E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
int igb_vmdq_set_mac(struct net_device *dev, void *p)
|
||||
{
|
||||
struct sockaddr *addr = p;
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
|
||||
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
||||
return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
|
||||
}
|
||||
|
||||
int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
|
||||
if (adapter->netdev->mtu < new_mtu) {
|
||||
DPRINTK(PROBE, INFO,
|
||||
"Set MTU on %s to >= %d "
|
||||
"before changing MTU on %s\n",
|
||||
adapter->netdev->name, new_mtu, dev->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void igb_vmdq_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
vadapter->vlgrp = grp;
|
||||
|
||||
igb_enable_vlan_tags(adapter);
|
||||
E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
|
||||
|
||||
return;
|
||||
}
|
||||
void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
#ifndef HAVE_NETDEV_VLAN_FEATURES
|
||||
struct net_device *v_netdev;
|
||||
#endif
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
/* attempt to add filter to vlvf array */
|
||||
igb_vlvf_set(adapter, vid, TRUE, hw_queue);
|
||||
|
||||
#ifndef HAVE_NETDEV_VLAN_FEATURES
|
||||
|
||||
/* Copy feature flags from netdev to the vlan netdev for this vid.
|
||||
* This allows things like TSO to bubble down to our vlan device.
|
||||
*/
|
||||
v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
|
||||
v_netdev->features |= adapter->netdev->features;
|
||||
vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
int hw_queue = vadapter->rx_ring->queue_index +
|
||||
adapter->vfs_allocated_count;
|
||||
|
||||
vlan_group_set_device(vadapter->vlgrp, vid, NULL);
|
||||
/* remove vlan from VLVF table array */
|
||||
igb_vlvf_set(adapter, vid, FALSE, hw_queue);
|
||||
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int igb_vmdq_get_settings(struct net_device *netdev,
|
||||
struct ethtool_cmd *ecmd)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u32 status;
|
||||
|
||||
if (hw->phy.media_type == e1000_media_type_copper) {
|
||||
|
||||
ecmd->supported = (SUPPORTED_10baseT_Half |
|
||||
SUPPORTED_10baseT_Full |
|
||||
SUPPORTED_100baseT_Half |
|
||||
SUPPORTED_100baseT_Full |
|
||||
SUPPORTED_1000baseT_Full|
|
||||
SUPPORTED_Autoneg |
|
||||
SUPPORTED_TP);
|
||||
ecmd->advertising = ADVERTISED_TP;
|
||||
|
||||
if (hw->mac.autoneg == 1) {
|
||||
ecmd->advertising |= ADVERTISED_Autoneg;
|
||||
/* the e1000 autoneg seems to match ethtool nicely */
|
||||
ecmd->advertising |= hw->phy.autoneg_advertised;
|
||||
}
|
||||
|
||||
ecmd->port = PORT_TP;
|
||||
ecmd->phy_address = hw->phy.addr;
|
||||
} else {
|
||||
ecmd->supported = (SUPPORTED_1000baseT_Full |
|
||||
SUPPORTED_FIBRE |
|
||||
SUPPORTED_Autoneg);
|
||||
|
||||
ecmd->advertising = (ADVERTISED_1000baseT_Full |
|
||||
ADVERTISED_FIBRE |
|
||||
ADVERTISED_Autoneg);
|
||||
|
||||
ecmd->port = PORT_FIBRE;
|
||||
}
|
||||
|
||||
ecmd->transceiver = XCVR_INTERNAL;
|
||||
|
||||
status = E1000_READ_REG(hw, E1000_STATUS);
|
||||
|
||||
if (status & E1000_STATUS_LU) {
|
||||
|
||||
if ((status & E1000_STATUS_SPEED_1000) ||
|
||||
hw->phy.media_type != e1000_media_type_copper)
|
||||
ecmd->speed = SPEED_1000;
|
||||
else if (status & E1000_STATUS_SPEED_100)
|
||||
ecmd->speed = SPEED_100;
|
||||
else
|
||||
ecmd->speed = SPEED_10;
|
||||
|
||||
if ((status & E1000_STATUS_FD) ||
|
||||
hw->phy.media_type != e1000_media_type_copper)
|
||||
ecmd->duplex = DUPLEX_FULL;
|
||||
else
|
||||
ecmd->duplex = DUPLEX_HALF;
|
||||
} else {
|
||||
ecmd->speed = -1;
|
||||
ecmd->duplex = -1;
|
||||
}
|
||||
|
||||
ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
return adapter->msg_enable;
|
||||
}
|
||||
|
||||
static void igb_vmdq_get_drvinfo(struct net_device *netdev,
|
||||
struct ethtool_drvinfo *drvinfo)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
struct net_device *main_netdev = adapter->netdev;
|
||||
|
||||
strncpy(drvinfo->driver, igb_driver_name, 32);
|
||||
strncpy(drvinfo->version, igb_driver_version, 32);
|
||||
|
||||
strncpy(drvinfo->fw_version, "N/A", 4);
|
||||
snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
|
||||
vadapter->rx_ring->queue_index);
|
||||
drvinfo->n_stats = 0;
|
||||
drvinfo->testinfo_len = 0;
|
||||
drvinfo->regdump_len = 0;
|
||||
}
|
||||
|
||||
static void igb_vmdq_get_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *ring)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
|
||||
|
||||
struct igb_ring *tx_ring = vadapter->tx_ring;
|
||||
struct igb_ring *rx_ring = vadapter->rx_ring;
|
||||
|
||||
ring->rx_max_pending = IGB_MAX_RXD;
|
||||
ring->tx_max_pending = IGB_MAX_TXD;
|
||||
ring->rx_mini_max_pending = 0;
|
||||
ring->rx_jumbo_max_pending = 0;
|
||||
ring->rx_pending = rx_ring->count;
|
||||
ring->tx_pending = tx_ring->count;
|
||||
ring->rx_mini_pending = 0;
|
||||
ring->rx_jumbo_pending = 0;
|
||||
}
|
||||
static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
|
||||
{
|
||||
struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
|
||||
struct igb_adapter *adapter = vadapter->real_adapter;
|
||||
|
||||
return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
|
||||
}
|
||||
|
||||
|
||||
static struct ethtool_ops igb_vmdq_ethtool_ops = {
|
||||
.get_settings = igb_vmdq_get_settings,
|
||||
.get_drvinfo = igb_vmdq_get_drvinfo,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_ringparam = igb_vmdq_get_ringparam,
|
||||
.get_rx_csum = igb_vmdq_get_rx_csum,
|
||||
.get_tx_csum = ethtool_op_get_tx_csum,
|
||||
.get_sg = ethtool_op_get_sg,
|
||||
.set_sg = ethtool_op_set_sg,
|
||||
.get_msglevel = igb_vmdq_get_msglevel,
|
||||
#ifdef NETIF_F_TSO
|
||||
.get_tso = ethtool_op_get_tso,
|
||||
#endif
|
||||
#ifdef HAVE_ETHTOOL_GET_PERM_ADDR
|
||||
.get_perm_addr = ethtool_op_get_perm_addr,
|
||||
#endif
|
||||
};
|
||||
|
||||
void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
|
||||
{
|
||||
SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
|
||||
}
|
||||
|
||||
|
||||
#endif /* CONFIG_IGB_VMDQ_NETDEV */
|
@ -1,31 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel(R) Gigabit Ethernet Linux driver
|
||||
Copyright(c) 2007-2013 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IGB_VMDQ_H_
|
||||
#define _IGB_VMDQ_H_
|
||||
|
||||
#ifdef CONFIG_IGB_VMDQ_NETDEV
|
||||
int igb_vmdq_open(struct net_device *dev);
|
||||
int igb_vmdq_close(struct net_device *dev);
|
||||
netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev);
|
||||
struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev);
|
||||
void igb_vmdq_set_rx_mode(struct net_device *dev);
|
||||
int igb_vmdq_set_mac(struct net_device *dev, void *addr);
|
||||
int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu);
|
||||
void igb_vmdq_tx_timeout(struct net_device *dev);
|
||||
void igb_vmdq_vlan_rx_register(struct net_device *dev,
|
||||
struct vlan_group *grp);
|
||||
void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
|
||||
void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
|
||||
void igb_vmdq_set_ethtool_ops(struct net_device *netdev);
|
||||
#endif /* CONFIG_IGB_VMDQ_NETDEV */
|
||||
#endif /* _IGB_VMDQ_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,16 +0,0 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
|
||||
|
||||
kni_igb_sources = files(
|
||||
'e1000_82575.c',
|
||||
'e1000_api.c',
|
||||
'e1000_i210.c',
|
||||
'e1000_mac.c',
|
||||
'e1000_manage.c',
|
||||
'e1000_mbx.c',
|
||||
'e1000_nvm.c',
|
||||
'e1000_phy.c',
|
||||
'igb_ethtool.c',
|
||||
'igb_main.c',
|
||||
'igb_param.c',
|
||||
'igb_vmdq.c')
|
@ -1,912 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_H_
|
||||
#define _IXGBE_H_
|
||||
|
||||
#ifndef IXGBE_NO_LRO
|
||||
#include <net/tcp.h>
|
||||
#endif
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/netdevice.h>
|
||||
#ifdef HAVE_IRQ_AFFINITY_HINT
|
||||
#include <linux/cpumask.h>
|
||||
#endif /* HAVE_IRQ_AFFINITY_HINT */
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#ifdef SIOCETHTOOL
|
||||
#include <linux/ethtool.h>
|
||||
#endif
|
||||
#ifdef NETIF_F_HW_VLAN_TX
|
||||
#include <linux/if_vlan.h>
|
||||
#endif
|
||||
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
|
||||
#define IXGBE_DCA
|
||||
#include <linux/dca.h>
|
||||
#endif
|
||||
#include "ixgbe_dcb.h"
|
||||
|
||||
#include "kcompat.h"
|
||||
|
||||
#ifdef HAVE_SCTP
|
||||
#include <linux/sctp.h>
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
|
||||
#define IXGBE_FCOE
|
||||
#include "ixgbe_fcoe.h"
|
||||
#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
|
||||
|
||||
#if defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE)
|
||||
#define HAVE_IXGBE_PTP
|
||||
#endif
|
||||
|
||||
#include "ixgbe_api.h"
|
||||
|
||||
#define PFX "ixgbe: "
|
||||
#define DPRINTK(nlevel, klevel, fmt, args...) \
|
||||
((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
|
||||
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
|
||||
__func__ , ## args)))
|
||||
|
||||
/* TX/RX descriptor defines */
|
||||
#define IXGBE_DEFAULT_TXD 512
|
||||
#define IXGBE_DEFAULT_TX_WORK 256
|
||||
#define IXGBE_MAX_TXD 4096
|
||||
#define IXGBE_MIN_TXD 64
|
||||
|
||||
#define IXGBE_DEFAULT_RXD 512
|
||||
#define IXGBE_DEFAULT_RX_WORK 256
|
||||
#define IXGBE_MAX_RXD 4096
|
||||
#define IXGBE_MIN_RXD 64
|
||||
|
||||
|
||||
/* flow control */
|
||||
#define IXGBE_MIN_FCRTL 0x40
|
||||
#define IXGBE_MAX_FCRTL 0x7FF80
|
||||
#define IXGBE_MIN_FCRTH 0x600
|
||||
#define IXGBE_MAX_FCRTH 0x7FFF0
|
||||
#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
|
||||
#define IXGBE_MIN_FCPAUSE 0
|
||||
#define IXGBE_MAX_FCPAUSE 0xFFFF
|
||||
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
|
||||
#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
|
||||
#define IXGBE_RXBUFFER_1536 1536
|
||||
#define IXGBE_RXBUFFER_2K 2048
|
||||
#define IXGBE_RXBUFFER_3K 3072
|
||||
#define IXGBE_RXBUFFER_4K 4096
|
||||
#define IXGBE_RXBUFFER_7K 7168
|
||||
#define IXGBE_RXBUFFER_8K 8192
|
||||
#define IXGBE_RXBUFFER_15K 15360
|
||||
#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
|
||||
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */
|
||||
|
||||
/*
|
||||
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
|
||||
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
|
||||
* this adds up to 512 bytes of extra data meaning the smallest allocation
|
||||
* we could have is 1K.
|
||||
* i.e. RXBUFFER_512 --> size-1024 slab
|
||||
*/
|
||||
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
|
||||
|
||||
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
|
||||
|
||||
/* How many Rx Buffers do we bundle into one write to the hardware ? */
|
||||
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
|
||||
|
||||
#define IXGBE_TX_FLAGS_CSUM (u32)(1)
|
||||
#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
|
||||
#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
|
||||
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
|
||||
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
|
||||
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
|
||||
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
|
||||
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
|
||||
#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
|
||||
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
|
||||
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
|
||||
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
|
||||
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
|
||||
|
||||
#define IXGBE_MAX_RX_DESC_POLL 10
|
||||
|
||||
#define IXGBE_MAX_VF_MC_ENTRIES 30
|
||||
#define IXGBE_MAX_VF_FUNCTIONS 64
|
||||
#define IXGBE_MAX_VFTA_ENTRIES 128
|
||||
#define MAX_EMULATION_MAC_ADDRS 16
|
||||
#define IXGBE_MAX_PF_MACVLANS 15
|
||||
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
|
||||
#define IXGBE_X540_VF_DEVICE_ID 0x1515
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
#define VMDQ_P(p) ((p) + adapter->num_vfs)
|
||||
#else
|
||||
#define VMDQ_P(p) (p)
|
||||
#endif
|
||||
|
||||
#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
|
||||
{ \
|
||||
u32 current_counter = IXGBE_READ_REG(hw, reg); \
|
||||
if (current_counter < last_counter) \
|
||||
counter += 0x100000000LL; \
|
||||
last_counter = current_counter; \
|
||||
counter &= 0xFFFFFFFF00000000LL; \
|
||||
counter |= current_counter; \
|
||||
}
|
||||
|
||||
#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
|
||||
{ \
|
||||
u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
|
||||
u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
|
||||
u64 current_counter = (current_counter_msb << 32) | \
|
||||
current_counter_lsb; \
|
||||
if (current_counter < last_counter) \
|
||||
counter += 0x1000000000LL; \
|
||||
last_counter = current_counter; \
|
||||
counter &= 0xFFFFFFF000000000LL; \
|
||||
counter |= current_counter; \
|
||||
}
|
||||
|
||||
struct vf_stats {
|
||||
u64 gprc;
|
||||
u64 gorc;
|
||||
u64 gptc;
|
||||
u64 gotc;
|
||||
u64 mprc;
|
||||
};
|
||||
|
||||
struct vf_data_storage {
|
||||
unsigned char vf_mac_addresses[ETH_ALEN];
|
||||
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
|
||||
u16 num_vf_mc_hashes;
|
||||
u16 default_vf_vlan_id;
|
||||
u16 vlans_enabled;
|
||||
bool clear_to_send;
|
||||
struct vf_stats vfstats;
|
||||
struct vf_stats last_vfstats;
|
||||
struct vf_stats saved_rst_vfstats;
|
||||
bool pf_set_mac;
|
||||
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
|
||||
u16 pf_qos;
|
||||
u16 tx_rate;
|
||||
u16 vlan_count;
|
||||
u8 spoofchk_enabled;
|
||||
struct pci_dev *vfdev;
|
||||
};
|
||||
|
||||
struct vf_macvlans {
|
||||
struct list_head l;
|
||||
int vf;
|
||||
bool free;
|
||||
bool is_macvlan;
|
||||
u8 vf_macvlan[ETH_ALEN];
|
||||
};
|
||||
|
||||
#ifndef IXGBE_NO_LRO
|
||||
#define IXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/
|
||||
#define IXGBE_LRO_GLOBAL 10
|
||||
|
||||
struct ixgbe_lro_stats {
|
||||
u32 flushed;
|
||||
u32 coal;
|
||||
};
|
||||
|
||||
/*
|
||||
* ixgbe_lro_header - header format to be aggregated by LRO
|
||||
* @iph: IP header without options
|
||||
* @tcp: TCP header
|
||||
* @ts: Optional TCP timestamp data in TCP options
|
||||
*
|
||||
* This structure relies on the check above that verifies that the header
|
||||
* is IPv4 and does not contain any options.
|
||||
*/
|
||||
struct ixgbe_lrohdr {
|
||||
struct iphdr iph;
|
||||
struct tcphdr th;
|
||||
__be32 ts[0];
|
||||
};
|
||||
|
||||
struct ixgbe_lro_list {
|
||||
struct sk_buff_head active;
|
||||
struct ixgbe_lro_stats stats;
|
||||
};
|
||||
|
||||
#endif /* IXGBE_NO_LRO */
|
||||
#define IXGBE_MAX_TXD_PWR 14
|
||||
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
|
||||
|
||||
/* Tx Descriptors needed, worst case */
|
||||
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
|
||||
#ifdef MAX_SKB_FRAGS
|
||||
#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
|
||||
#else
|
||||
#define DESC_NEEDED 4
|
||||
#endif
|
||||
|
||||
/* wrapper around a pointer to a socket buffer,
|
||||
* so a DMA handle can be stored along with the buffer */
|
||||
struct ixgbe_tx_buffer {
|
||||
union ixgbe_adv_tx_desc *next_to_watch;
|
||||
unsigned long time_stamp;
|
||||
struct sk_buff *skb;
|
||||
unsigned int bytecount;
|
||||
unsigned short gso_segs;
|
||||
__be16 protocol;
|
||||
DEFINE_DMA_UNMAP_ADDR(dma);
|
||||
DEFINE_DMA_UNMAP_LEN(len);
|
||||
u32 tx_flags;
|
||||
};
|
||||
|
||||
struct ixgbe_rx_buffer {
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma;
|
||||
#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct ixgbe_queue_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
};
|
||||
|
||||
struct ixgbe_tx_queue_stats {
|
||||
u64 restart_queue;
|
||||
u64 tx_busy;
|
||||
u64 tx_done_old;
|
||||
};
|
||||
|
||||
struct ixgbe_rx_queue_stats {
|
||||
u64 rsc_count;
|
||||
u64 rsc_flush;
|
||||
u64 non_eop_descs;
|
||||
u64 alloc_rx_page_failed;
|
||||
u64 alloc_rx_buff_failed;
|
||||
u64 csum_err;
|
||||
};
|
||||
|
||||
enum ixgbe_ring_state_t {
|
||||
__IXGBE_TX_FDIR_INIT_DONE,
|
||||
__IXGBE_TX_DETECT_HANG,
|
||||
__IXGBE_HANG_CHECK_ARMED,
|
||||
__IXGBE_RX_RSC_ENABLED,
|
||||
#ifndef HAVE_NDO_SET_FEATURES
|
||||
__IXGBE_RX_CSUM_ENABLED,
|
||||
#endif
|
||||
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
|
||||
#ifdef IXGBE_FCOE
|
||||
__IXGBE_RX_FCOE_BUFSZ,
|
||||
#endif
|
||||
};
|
||||
|
||||
#define check_for_tx_hang(ring) \
|
||||
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
|
||||
#define set_check_for_tx_hang(ring) \
|
||||
set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
|
||||
#define clear_check_for_tx_hang(ring) \
|
||||
clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
|
||||
#ifndef IXGBE_NO_HW_RSC
|
||||
#define ring_is_rsc_enabled(ring) \
|
||||
test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
|
||||
#else
|
||||
#define ring_is_rsc_enabled(ring) false
|
||||
#endif
|
||||
#define set_ring_rsc_enabled(ring) \
|
||||
set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
|
||||
#define clear_ring_rsc_enabled(ring) \
|
||||
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
|
||||
#define netdev_ring(ring) (ring->netdev)
|
||||
#define ring_queue_index(ring) (ring->queue_index)
|
||||
|
||||
|
||||
struct ixgbe_ring {
|
||||
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
|
||||
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
|
||||
struct net_device *netdev; /* netdev ring belongs to */
|
||||
struct device *dev; /* device for DMA mapping */
|
||||
void *desc; /* descriptor ring memory */
|
||||
union {
|
||||
struct ixgbe_tx_buffer *tx_buffer_info;
|
||||
struct ixgbe_rx_buffer *rx_buffer_info;
|
||||
};
|
||||
unsigned long state;
|
||||
u8 __iomem *tail;
|
||||
dma_addr_t dma; /* phys. address of descriptor ring */
|
||||
unsigned int size; /* length in bytes */
|
||||
|
||||
u16 count; /* amount of descriptors */
|
||||
|
||||
u8 queue_index; /* needed for multiqueue queue management */
|
||||
u8 reg_idx; /* holds the special value that gets
|
||||
* the hardware register offset
|
||||
* associated with this ring, which is
|
||||
* different for DCB and RSS modes
|
||||
*/
|
||||
u16 next_to_use;
|
||||
u16 next_to_clean;
|
||||
|
||||
union {
|
||||
#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
|
||||
u16 rx_buf_len;
|
||||
#else
|
||||
u16 next_to_alloc;
|
||||
#endif
|
||||
struct {
|
||||
u8 atr_sample_rate;
|
||||
u8 atr_count;
|
||||
};
|
||||
};
|
||||
|
||||
u8 dcb_tc;
|
||||
struct ixgbe_queue_stats stats;
|
||||
union {
|
||||
struct ixgbe_tx_queue_stats tx_stats;
|
||||
struct ixgbe_rx_queue_stats rx_stats;
|
||||
};
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
enum ixgbe_ring_f_enum {
|
||||
RING_F_NONE = 0,
|
||||
RING_F_VMDQ, /* SR-IOV uses the same ring feature */
|
||||
RING_F_RSS,
|
||||
RING_F_FDIR,
|
||||
#ifdef IXGBE_FCOE
|
||||
RING_F_FCOE,
|
||||
#endif /* IXGBE_FCOE */
|
||||
RING_F_ARRAY_SIZE /* must be last in enum set */
|
||||
};
|
||||
|
||||
#define IXGBE_MAX_DCB_INDICES 8
|
||||
#define IXGBE_MAX_RSS_INDICES 16
|
||||
#define IXGBE_MAX_VMDQ_INDICES 64
|
||||
#define IXGBE_MAX_FDIR_INDICES 64
|
||||
#ifdef IXGBE_FCOE
|
||||
#define IXGBE_MAX_FCOE_INDICES 8
|
||||
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
|
||||
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
|
||||
#else
|
||||
#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
|
||||
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
|
||||
#endif /* IXGBE_FCOE */
|
||||
struct ixgbe_ring_feature {
|
||||
int indices;
|
||||
int mask;
|
||||
};
|
||||
|
||||
#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
|
||||
/*
|
||||
* FCoE requires that all Rx buffers be over 2200 bytes in length. Since
|
||||
* this is twice the size of a half page we need to double the page order
|
||||
* for FCoE enabled Rx queues.
|
||||
*/
|
||||
#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
|
||||
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
|
||||
{
|
||||
return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
|
||||
}
|
||||
#else
|
||||
#define ixgbe_rx_pg_order(_ring) 0
|
||||
#endif
|
||||
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
|
||||
#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
|
||||
|
||||
#endif
|
||||
struct ixgbe_ring_container {
|
||||
struct ixgbe_ring *ring; /* pointer to linked list of rings */
|
||||
unsigned int total_bytes; /* total bytes processed this int */
|
||||
unsigned int total_packets; /* total packets processed this int */
|
||||
u16 work_limit; /* total work allowed per interrupt */
|
||||
u8 count; /* total number of rings in vector */
|
||||
u8 itr; /* current ITR setting for ring */
|
||||
};
|
||||
|
||||
/* iterator for handling rings in ring container */
|
||||
#define ixgbe_for_each_ring(pos, head) \
|
||||
for (pos = (head).ring; pos != NULL; pos = pos->next)
|
||||
|
||||
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
|
||||
? 8 : 1)
|
||||
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
|
||||
|
||||
/* MAX_MSIX_Q_VECTORS of these are allocated,
|
||||
* but we only use one per queue-specific vector.
|
||||
*/
|
||||
struct ixgbe_q_vector {
|
||||
struct ixgbe_adapter *adapter;
|
||||
int cpu; /* CPU for DCA */
|
||||
u16 v_idx; /* index of q_vector within array, also used for
|
||||
* finding the bit in EICR and friends that
|
||||
* represents the vector for this ring */
|
||||
u16 itr; /* Interrupt throttle rate written to EITR */
|
||||
struct ixgbe_ring_container rx, tx;
|
||||
|
||||
#ifdef CONFIG_IXGBE_NAPI
|
||||
struct napi_struct napi;
|
||||
#endif
|
||||
#ifndef HAVE_NETDEV_NAPI_LIST
|
||||
struct net_device poll_dev;
|
||||
#endif
|
||||
#ifdef HAVE_IRQ_AFFINITY_HINT
|
||||
cpumask_t affinity_mask;
|
||||
#endif
|
||||
#ifndef IXGBE_NO_LRO
|
||||
struct ixgbe_lro_list lrolist; /* LRO list for queue vector*/
|
||||
#endif
|
||||
int numa_node;
|
||||
char name[IFNAMSIZ + 9];
|
||||
|
||||
/* for dynamic allocation of rings associated with this q_vector */
|
||||
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
|
||||
};
|
||||
|
||||
/*
|
||||
* microsecond values for various ITR rates shifted by 2 to fit itr register
|
||||
* with the first 3 bits reserved 0
|
||||
*/
|
||||
#define IXGBE_MIN_RSC_ITR 24
|
||||
#define IXGBE_100K_ITR 40
|
||||
#define IXGBE_20K_ITR 200
|
||||
#define IXGBE_16K_ITR 248
|
||||
#define IXGBE_10K_ITR 400
|
||||
#define IXGBE_8K_ITR 500
|
||||
|
||||
/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
|
||||
static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
|
||||
const u32 stat_err_bits)
|
||||
{
|
||||
return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
|
||||
}
|
||||
|
||||
/* ixgbe_desc_unused - calculate if we have unused descriptors */
|
||||
static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
|
||||
{
|
||||
u16 ntc = ring->next_to_clean;
|
||||
u16 ntu = ring->next_to_use;
|
||||
|
||||
return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
|
||||
}
|
||||
|
||||
#define IXGBE_RX_DESC(R, i) \
|
||||
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
|
||||
#define IXGBE_TX_DESC(R, i) \
|
||||
(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
|
||||
#define IXGBE_TX_CTXTDESC(R, i) \
|
||||
(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
|
||||
|
||||
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
|
||||
#ifdef IXGBE_FCOE
|
||||
/* use 3K as the baby jumbo frame size for FCoE */
|
||||
#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
||||
#define TCP_TIMER_VECTOR 0
|
||||
#define OTHER_VECTOR 1
|
||||
#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
|
||||
|
||||
#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64
|
||||
#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16
|
||||
|
||||
struct ixgbe_mac_addr {
|
||||
u8 addr[ETH_ALEN];
|
||||
u16 queue;
|
||||
u16 state; /* bitmask */
|
||||
};
|
||||
#define IXGBE_MAC_STATE_DEFAULT 0x1
|
||||
#define IXGBE_MAC_STATE_MODIFIED 0x2
|
||||
#define IXGBE_MAC_STATE_IN_USE 0x4
|
||||
|
||||
#ifdef IXGBE_PROCFS
|
||||
struct ixgbe_therm_proc_data {
|
||||
struct ixgbe_hw *hw;
|
||||
struct ixgbe_thermal_diode_data *sensor_data;
|
||||
};
|
||||
|
||||
#endif /* IXGBE_PROCFS */
|
||||
|
||||
/*
|
||||
* Only for array allocations in our adapter struct. On 82598, there will be
|
||||
* unused entries in the array, but that's not a big deal. Also, in 82599,
|
||||
* we can actually assign 64 queue vectors based on our extended-extended
|
||||
* interrupt registers. This is different than 82598, which is limited to 16.
|
||||
*/
|
||||
#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599
|
||||
#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599
|
||||
|
||||
#define MIN_MSIX_Q_VECTORS 1
|
||||
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
|
||||
|
||||
/* default to trying for four seconds */
|
||||
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
|
||||
|
||||
/* board specific private data structure */
|
||||
struct ixgbe_adapter {
|
||||
#ifdef NETIF_F_HW_VLAN_TX
|
||||
#ifdef HAVE_VLAN_RX_REGISTER
|
||||
struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */
|
||||
#else
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
#endif
|
||||
#endif /* NETIF_F_HW_VLAN_TX */
|
||||
/* OS defined structs */
|
||||
struct net_device *netdev;
|
||||
struct pci_dev *pdev;
|
||||
|
||||
unsigned long state;
|
||||
|
||||
/* Some features need tri-state capability,
|
||||
* thus the additional *_CAPABLE flags.
|
||||
*/
|
||||
u32 flags;
|
||||
#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
|
||||
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
|
||||
#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
|
||||
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
|
||||
#ifndef IXGBE_NO_LLI
|
||||
#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4)
|
||||
#endif
|
||||
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 8)
|
||||
#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
|
||||
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 9)
|
||||
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 10)
|
||||
#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 11)
|
||||
#else
|
||||
#define IXGBE_FLAG_DCA_ENABLED (u32)0
|
||||
#define IXGBE_FLAG_DCA_CAPABLE (u32)0
|
||||
#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0
|
||||
#endif
|
||||
#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 12)
|
||||
#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 13)
|
||||
#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 14)
|
||||
#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 15)
|
||||
#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 16)
|
||||
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 18)
|
||||
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 19)
|
||||
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 20)
|
||||
#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 21)
|
||||
#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 22)
|
||||
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 23)
|
||||
#ifdef IXGBE_FCOE
|
||||
#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 24)
|
||||
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 25)
|
||||
#endif /* IXGBE_FCOE */
|
||||
#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 26)
|
||||
#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 27)
|
||||
#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 28)
|
||||
#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 29)
|
||||
#define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE (u32)(1 << 30)
|
||||
#define IXGBE_FLAG_RX_BB_CAPABLE (u32)(1 << 31)
|
||||
|
||||
u32 flags2;
|
||||
#ifndef IXGBE_NO_HW_RSC
|
||||
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
|
||||
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
|
||||
#else
|
||||
#define IXGBE_FLAG2_RSC_CAPABLE 0
|
||||
#define IXGBE_FLAG2_RSC_ENABLED 0
|
||||
#endif
|
||||
#define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 2)
|
||||
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 4)
|
||||
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 5)
|
||||
#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 6)
|
||||
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 7)
|
||||
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 8)
|
||||
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 9)
|
||||
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 10)
|
||||
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 11)
|
||||
#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 12)
|
||||
|
||||
/* Tx fast path data */
|
||||
int num_tx_queues;
|
||||
u16 tx_itr_setting;
|
||||
u16 tx_work_limit;
|
||||
|
||||
/* Rx fast path data */
|
||||
int num_rx_queues;
|
||||
u16 rx_itr_setting;
|
||||
u16 rx_work_limit;
|
||||
|
||||
/* TX */
|
||||
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
|
||||
|
||||
u64 restart_queue;
|
||||
u64 lsc_int;
|
||||
u32 tx_timeout_count;
|
||||
|
||||
/* RX */
|
||||
struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
|
||||
int num_rx_pools; /* == num_rx_queues in 82598 */
|
||||
int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
|
||||
u64 hw_csum_rx_error;
|
||||
u64 hw_rx_no_dma_resources;
|
||||
u64 rsc_total_count;
|
||||
u64 rsc_total_flush;
|
||||
u64 non_eop_descs;
|
||||
#ifndef CONFIG_IXGBE_NAPI
|
||||
u64 rx_dropped_backlog; /* count drops from rx intr handler */
|
||||
#endif
|
||||
u32 alloc_rx_page_failed;
|
||||
u32 alloc_rx_buff_failed;
|
||||
|
||||
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
|
||||
|
||||
#ifdef HAVE_DCBNL_IEEE
|
||||
struct ieee_pfc *ixgbe_ieee_pfc;
|
||||
struct ieee_ets *ixgbe_ieee_ets;
|
||||
#endif
|
||||
struct ixgbe_dcb_config dcb_cfg;
|
||||
struct ixgbe_dcb_config temp_dcb_cfg;
|
||||
u8 dcb_set_bitmap;
|
||||
u8 dcbx_cap;
|
||||
#ifndef HAVE_MQPRIO
|
||||
u8 tc;
|
||||
#endif
|
||||
enum ixgbe_fc_mode last_lfc_mode;
|
||||
|
||||
int num_msix_vectors;
|
||||
int max_msix_q_vectors; /* true count of q_vectors for device */
|
||||
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
|
||||
struct msix_entry *msix_entries;
|
||||
|
||||
#ifndef HAVE_NETDEV_STATS_IN_NETDEV
|
||||
struct net_device_stats net_stats;
|
||||
#endif
|
||||
#ifndef IXGBE_NO_LRO
|
||||
struct ixgbe_lro_stats lro_stats;
|
||||
#endif
|
||||
|
||||
#ifdef ETHTOOL_TEST
|
||||
u32 test_icr;
|
||||
struct ixgbe_ring test_tx_ring;
|
||||
struct ixgbe_ring test_rx_ring;
|
||||
#endif
|
||||
|
||||
/* structs defined in ixgbe_hw.h */
|
||||
struct ixgbe_hw hw;
|
||||
u16 msg_enable;
|
||||
struct ixgbe_hw_stats stats;
|
||||
#ifndef IXGBE_NO_LLI
|
||||
u32 lli_port;
|
||||
u32 lli_size;
|
||||
u32 lli_etype;
|
||||
u32 lli_vlan_pri;
|
||||
#endif /* IXGBE_NO_LLI */
|
||||
|
||||
u32 *config_space;
|
||||
u64 tx_busy;
|
||||
unsigned int tx_ring_count;
|
||||
unsigned int rx_ring_count;
|
||||
|
||||
u32 link_speed;
|
||||
bool link_up;
|
||||
unsigned long link_check_timeout;
|
||||
|
||||
struct timer_list service_timer;
|
||||
struct work_struct service_task;
|
||||
|
||||
struct hlist_head fdir_filter_list;
|
||||
unsigned long fdir_overflow; /* number of times ATR was backed off */
|
||||
union ixgbe_atr_input fdir_mask;
|
||||
int fdir_filter_count;
|
||||
u32 fdir_pballoc;
|
||||
u32 atr_sample_rate;
|
||||
spinlock_t fdir_perfect_lock;
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
struct ixgbe_fcoe fcoe;
|
||||
#endif /* IXGBE_FCOE */
|
||||
u32 wol;
|
||||
|
||||
u16 bd_number;
|
||||
|
||||
char eeprom_id[32];
|
||||
u16 eeprom_cap;
|
||||
bool netdev_registered;
|
||||
u32 interrupt_event;
|
||||
#ifdef HAVE_ETHTOOL_SET_PHYS_ID
|
||||
u32 led_reg;
|
||||
#endif
|
||||
|
||||
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
|
||||
unsigned int num_vfs;
|
||||
struct vf_data_storage *vfinfo;
|
||||
int vf_rate_link_speed;
|
||||
struct vf_macvlans vf_mvs;
|
||||
struct vf_macvlans *mv_list;
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
u32 timer_event_accumulator;
|
||||
u32 vferr_refcount;
|
||||
#endif
|
||||
struct ixgbe_mac_addr *mac_table;
|
||||
#ifdef IXGBE_SYSFS
|
||||
struct kobject *info_kobj;
|
||||
struct kobject *therm_kobj[IXGBE_MAX_SENSORS];
|
||||
#else /* IXGBE_SYSFS */
|
||||
#ifdef IXGBE_PROCFS
|
||||
struct proc_dir_entry *eth_dir;
|
||||
struct proc_dir_entry *info_dir;
|
||||
struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS];
|
||||
struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS];
|
||||
#endif /* IXGBE_PROCFS */
|
||||
#endif /* IXGBE_SYSFS */
|
||||
};
|
||||
|
||||
struct ixgbe_fdir_filter {
|
||||
struct hlist_node fdir_node;
|
||||
union ixgbe_atr_input filter;
|
||||
u16 sw_idx;
|
||||
u16 action;
|
||||
};
|
||||
|
||||
enum ixgbe_state_t {
|
||||
__IXGBE_TESTING,
|
||||
__IXGBE_RESETTING,
|
||||
__IXGBE_DOWN,
|
||||
__IXGBE_SERVICE_SCHED,
|
||||
__IXGBE_IN_SFP_INIT,
|
||||
};
|
||||
|
||||
struct ixgbe_cb {
|
||||
#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
|
||||
union { /* Union defining head/tail partner */
|
||||
struct sk_buff *head;
|
||||
struct sk_buff *tail;
|
||||
};
|
||||
#endif
|
||||
dma_addr_t dma;
|
||||
#ifndef IXGBE_NO_LRO
|
||||
__be32 tsecr; /* timestamp echo response */
|
||||
u32 tsval; /* timestamp value in host order */
|
||||
u32 next_seq; /* next expected sequence number */
|
||||
u16 free; /* 65521 minus total size */
|
||||
u16 mss; /* size of data portion of packet */
|
||||
#endif /* IXGBE_NO_LRO */
|
||||
#ifdef HAVE_VLAN_RX_REGISTER
|
||||
u16 vid; /* VLAN tag */
|
||||
#endif
|
||||
u16 append_cnt; /* number of skb's appended */
|
||||
#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
|
||||
bool page_released;
|
||||
#endif
|
||||
};
|
||||
#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
|
||||
|
||||
#ifdef IXGBE_SYSFS
|
||||
void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
|
||||
int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
|
||||
#endif /* IXGBE_SYSFS */
|
||||
#ifdef IXGBE_PROCFS
|
||||
void ixgbe_procfs_exit(struct ixgbe_adapter *adapter);
|
||||
int ixgbe_procfs_init(struct ixgbe_adapter *adapter);
|
||||
int ixgbe_procfs_topdir_init(void);
|
||||
void ixgbe_procfs_topdir_exit(void);
|
||||
#endif /* IXGBE_PROCFS */
|
||||
|
||||
extern struct dcbnl_rtnl_ops dcbnl_ops;
|
||||
extern int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max);
|
||||
|
||||
extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
|
||||
|
||||
/* needed by ixgbe_main.c */
|
||||
extern int ixgbe_validate_mac_addr(u8 *mc_addr);
|
||||
extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_assign_netdev_ops(struct net_device *netdev);
|
||||
|
||||
/* needed by ixgbe_ethtool.c */
|
||||
extern char ixgbe_driver_name[];
|
||||
extern const char ixgbe_driver_version[];
|
||||
|
||||
extern void ixgbe_up(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_down(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
|
||||
extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
|
||||
extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
|
||||
extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
|
||||
extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
|
||||
extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,
|
||||
struct ixgbe_ring *);
|
||||
extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,
|
||||
struct ixgbe_ring *);
|
||||
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
|
||||
extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
|
||||
extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
|
||||
extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
|
||||
struct ixgbe_adapter *,
|
||||
struct ixgbe_ring *);
|
||||
extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
|
||||
struct ixgbe_tx_buffer *);
|
||||
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
|
||||
extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *);
|
||||
extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *);
|
||||
extern void ixgbe_set_rx_mode(struct net_device *netdev);
|
||||
extern int ixgbe_write_mc_addr_list(struct net_device *netdev);
|
||||
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
|
||||
#ifdef IXGBE_FCOE
|
||||
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
|
||||
#endif /* IXGBE_FCOE */
|
||||
extern void ixgbe_do_reset(struct net_device *netdev);
|
||||
extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector);
|
||||
extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
|
||||
struct ixgbe_ring *);
|
||||
extern void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter);
|
||||
#ifdef ETHTOOL_OPS_COMPAT
|
||||
extern int ethtool_ioctl(struct ifreq *ifr);
|
||||
#endif
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
|
||||
extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
|
||||
struct ixgbe_tx_buffer *first,
|
||||
u8 *hdr_len);
|
||||
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
|
||||
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb);
|
||||
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
|
||||
struct scatterlist *sgl, unsigned int sgc);
|
||||
#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
|
||||
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
|
||||
struct scatterlist *sgl, unsigned int sgc);
|
||||
#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
|
||||
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
|
||||
#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
|
||||
extern int ixgbe_fcoe_enable(struct net_device *netdev);
|
||||
extern int ixgbe_fcoe_disable(struct net_device *netdev);
|
||||
#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
|
||||
#ifdef CONFIG_DCB
|
||||
#ifdef HAVE_DCBNL_OPS_GETAPP
|
||||
extern u8 ixgbe_fcoe_getapp(struct net_device *netdev);
|
||||
#endif /* HAVE_DCBNL_OPS_GETAPP */
|
||||
extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
|
||||
#endif /* CONFIG_DCB */
|
||||
#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
|
||||
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
|
||||
#endif
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
||||
#ifdef CONFIG_DCB
|
||||
#ifdef HAVE_DCBNL_IEEE
|
||||
s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame);
|
||||
#endif /* HAVE_DCBNL_IEEE */
|
||||
#endif /* CONFIG_DCB */
|
||||
|
||||
extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring);
|
||||
#ifndef ETHTOOL_GLINKSETTINGS
|
||||
extern int ixgbe_get_settings(struct net_device *netdev,
|
||||
struct ethtool_cmd *ecmd);
|
||||
#endif
|
||||
extern int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
|
||||
struct net_device *netdev, unsigned int vfn);
|
||||
extern void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
|
||||
extern int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
|
||||
u8 *addr, u16 queue);
|
||||
extern int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
|
||||
u8 *addr, u16 queue);
|
||||
extern int ixgbe_available_rars(struct ixgbe_adapter *adapter);
|
||||
#ifndef HAVE_VLAN_RX_REGISTER
|
||||
extern void ixgbe_vlan_mode(struct net_device *, u32);
|
||||
#endif
|
||||
#ifndef ixgbe_get_netdev_tc_txq
|
||||
#define ixgbe_get_netdev_tc_txq(dev, tc) (&dev->tc_to_txq[tc])
|
||||
#endif
|
||||
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
|
||||
#endif /* _IXGBE_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,29 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_82598_H_
|
||||
#define _IXGBE_82598_H_
|
||||
|
||||
u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
||||
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
|
||||
s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
|
||||
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
|
||||
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
|
||||
u8 *eeprom_data);
|
||||
u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
|
||||
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
|
||||
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
|
||||
#endif /* _IXGBE_82598_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,43 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_82599_H_
|
||||
#define _IXGBE_82599_H_
|
||||
|
||||
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed, bool *autoneg);
|
||||
enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
|
||||
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
|
||||
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
|
||||
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed, bool autoneg,
|
||||
bool autoneg_wait_to_complete);
|
||||
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed, bool autoneg,
|
||||
bool autoneg_wait_to_complete);
|
||||
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
|
||||
bool autoneg_wait_to_complete);
|
||||
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
|
||||
bool autoneg, bool autoneg_wait_to_complete);
|
||||
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
|
||||
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
|
||||
s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
|
||||
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
|
||||
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
|
||||
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
|
||||
#endif /* _IXGBE_82599_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,153 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_API_H_
|
||||
#define _IXGBE_API_H_
|
||||
|
||||
#include "ixgbe_type.h"
|
||||
|
||||
s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
|
||||
|
||||
extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
|
||||
extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
|
||||
extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_start_hw(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
|
||||
enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
|
||||
s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
|
||||
u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
|
||||
u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
|
||||
|
||||
s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
|
||||
u16 *phy_data);
|
||||
s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
|
||||
u16 phy_data);
|
||||
|
||||
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed,
|
||||
bool *link_up);
|
||||
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed,
|
||||
bool autoneg,
|
||||
bool autoneg_wait_to_complete);
|
||||
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
|
||||
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
|
||||
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
|
||||
bool autoneg, bool autoneg_wait_to_complete);
|
||||
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
|
||||
bool *link_up, bool link_up_wait_to_complete);
|
||||
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
|
||||
bool *autoneg);
|
||||
s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
|
||||
|
||||
s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
|
||||
s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
|
||||
s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
|
||||
s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
|
||||
s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
|
||||
s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
|
||||
u32 enable_addr);
|
||||
s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
||||
s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
|
||||
s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
||||
s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
|
||||
u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
|
||||
u32 addr_count, ixgbe_mc_addr_itr func);
|
||||
s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
|
||||
u32 mc_addr_count, ixgbe_mc_addr_itr func,
|
||||
bool clear);
|
||||
void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
|
||||
s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
|
||||
u32 vind, bool vlan_on);
|
||||
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
|
||||
bool vlan_on, bool *vfta_changed);
|
||||
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
|
||||
u8 ver);
|
||||
s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
|
||||
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
|
||||
s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version);
|
||||
s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
|
||||
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
|
||||
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
|
||||
u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
|
||||
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
|
||||
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
|
||||
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
|
||||
union ixgbe_atr_hash_dword input,
|
||||
union ixgbe_atr_hash_dword common,
|
||||
u8 queue);
|
||||
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
|
||||
union ixgbe_atr_input *input_mask);
|
||||
s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
|
||||
union ixgbe_atr_input *input,
|
||||
u16 soft_id, u8 queue);
|
||||
s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
|
||||
union ixgbe_atr_input *input,
|
||||
u16 soft_id);
|
||||
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
|
||||
union ixgbe_atr_input *input,
|
||||
union ixgbe_atr_input *mask,
|
||||
u16 soft_id,
|
||||
u8 queue);
|
||||
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
|
||||
union ixgbe_atr_input *mask);
|
||||
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
|
||||
union ixgbe_atr_hash_dword common);
|
||||
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
|
||||
u8 *data);
|
||||
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
|
||||
u8 data);
|
||||
s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
|
||||
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
|
||||
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
|
||||
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
|
||||
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
|
||||
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
|
||||
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
|
||||
u16 *wwpn_prefix);
|
||||
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
|
||||
|
||||
#endif /* _IXGBE_API_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,125 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_COMMON_H_
|
||||
#define _IXGBE_COMMON_H_
|
||||
|
||||
#include "ixgbe_type.h"
|
||||
|
||||
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
|
||||
u32 pba_num_size);
|
||||
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
|
||||
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
|
||||
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
|
||||
|
||||
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
|
||||
s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
|
||||
s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
|
||||
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
|
||||
u16 *data);
|
||||
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
|
||||
u16 words, u16 *data);
|
||||
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
|
||||
u16 *checksum_val);
|
||||
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
|
||||
|
||||
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
|
||||
u32 enable_addr);
|
||||
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
|
||||
u32 mc_addr_count,
|
||||
ixgbe_mc_addr_itr func, bool clear);
|
||||
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
|
||||
u32 addr_count, ixgbe_mc_addr_itr func);
|
||||
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
|
||||
s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
|
||||
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
|
||||
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
|
||||
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
|
||||
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
|
||||
|
||||
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
|
||||
s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
|
||||
|
||||
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
||||
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
|
||||
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
|
||||
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
|
||||
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
|
||||
u32 vind, bool vlan_on);
|
||||
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
|
||||
bool vlan_on, bool *vfta_changed);
|
||||
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
|
||||
|
||||
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed,
|
||||
bool *link_up, bool link_up_wait_to_complete);
|
||||
|
||||
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
|
||||
u16 *wwpn_prefix);
|
||||
|
||||
s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
|
||||
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
|
||||
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
|
||||
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
|
||||
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
|
||||
int strategy);
|
||||
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
|
||||
u8 build, u8 ver);
|
||||
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
|
||||
|
||||
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
|
||||
#define IXGBE_EMC_INTERNAL_DATA 0x00
|
||||
#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
|
||||
#define IXGBE_EMC_DIODE1_DATA 0x01
|
||||
#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
|
||||
#define IXGBE_EMC_DIODE2_DATA 0x23
|
||||
#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
|
||||
#define IXGBE_EMC_DIODE3_DATA 0x2A
|
||||
#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
|
||||
|
||||
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
|
||||
#endif /* IXGBE_COMMON */
|
@ -1,153 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_DCB_H_
|
||||
#define _IXGBE_DCB_H_
|
||||
|
||||
|
||||
#include "ixgbe_type.h"
|
||||
|
||||
/* DCB defines */
|
||||
/* DCB credit calculation defines */
|
||||
#define IXGBE_DCB_CREDIT_QUANTUM 64
|
||||
#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
|
||||
#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
|
||||
#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
|
||||
|
||||
/* 513 for 32KB TSO packet */
|
||||
#define IXGBE_DCB_MIN_TSO_CREDIT \
|
||||
((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
|
||||
|
||||
/* DCB configuration defines */
|
||||
#define IXGBE_DCB_MAX_USER_PRIORITY 8
|
||||
#define IXGBE_DCB_MAX_BW_GROUP 8
|
||||
#define IXGBE_DCB_BW_PERCENT 100
|
||||
|
||||
#define IXGBE_DCB_TX_CONFIG 0
|
||||
#define IXGBE_DCB_RX_CONFIG 1
|
||||
|
||||
/* DCB capability defines */
|
||||
#define IXGBE_DCB_PG_SUPPORT 0x00000001
|
||||
#define IXGBE_DCB_PFC_SUPPORT 0x00000002
|
||||
#define IXGBE_DCB_BCN_SUPPORT 0x00000004
|
||||
#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
|
||||
#define IXGBE_DCB_GSP_SUPPORT 0x00000010
|
||||
|
||||
struct ixgbe_dcb_support {
|
||||
u32 capabilities; /* DCB capabilities */
|
||||
|
||||
/* Each bit represents a number of TCs configurable in the hw.
|
||||
* If 8 traffic classes can be configured, the value is 0x80. */
|
||||
u8 traffic_classes;
|
||||
u8 pfc_traffic_classes;
|
||||
};
|
||||
|
||||
enum ixgbe_dcb_tsa {
|
||||
ixgbe_dcb_tsa_ets = 0,
|
||||
ixgbe_dcb_tsa_group_strict_cee,
|
||||
ixgbe_dcb_tsa_strict
|
||||
};
|
||||
|
||||
/* Traffic class bandwidth allocation per direction */
|
||||
struct ixgbe_dcb_tc_path {
|
||||
u8 bwg_id; /* Bandwidth Group (BWG) ID */
|
||||
u8 bwg_percent; /* % of BWG's bandwidth */
|
||||
u8 link_percent; /* % of link bandwidth */
|
||||
u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
|
||||
u16 data_credits_refill; /* Credit refill amount in 64B granularity */
|
||||
u16 data_credits_max; /* Max credits for a configured packet buffer
|
||||
* in 64B granularity.*/
|
||||
enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
|
||||
};
|
||||
|
||||
enum ixgbe_dcb_pfc {
|
||||
ixgbe_dcb_pfc_disabled = 0,
|
||||
ixgbe_dcb_pfc_enabled,
|
||||
ixgbe_dcb_pfc_enabled_txonly,
|
||||
ixgbe_dcb_pfc_enabled_rxonly
|
||||
};
|
||||
|
||||
/* Traffic class configuration */
|
||||
struct ixgbe_dcb_tc_config {
|
||||
struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
|
||||
enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
|
||||
|
||||
u16 desc_credits_max; /* For Tx Descriptor arbitration */
|
||||
u8 tc; /* Traffic class (TC) */
|
||||
};
|
||||
|
||||
enum ixgbe_dcb_pba {
|
||||
/* PBA[0-7] each use 64KB FIFO */
|
||||
ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
|
||||
/* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
|
||||
ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
|
||||
};
|
||||
|
||||
struct ixgbe_dcb_num_tcs {
|
||||
u8 pg_tcs;
|
||||
u8 pfc_tcs;
|
||||
};
|
||||
|
||||
struct ixgbe_dcb_config {
|
||||
struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
|
||||
struct ixgbe_dcb_support support;
|
||||
struct ixgbe_dcb_num_tcs num_tcs;
|
||||
u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
|
||||
bool pfc_mode_enable;
|
||||
bool round_robin_enable;
|
||||
|
||||
enum ixgbe_dcb_pba rx_pba_cfg;
|
||||
|
||||
u32 dcb_cfg_version; /* Not used...OS-specific? */
|
||||
u32 link_speed; /* For bandwidth allocation validation purpose */
|
||||
bool vt_mode;
|
||||
};
|
||||
|
||||
/* DCB driver APIs */
|
||||
|
||||
/* DCB rule checking */
|
||||
s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
|
||||
|
||||
/* DCB credits calculation */
|
||||
s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
|
||||
s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
|
||||
struct ixgbe_dcb_config *, u32, u8);
|
||||
|
||||
/* DCB PFC */
|
||||
s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
|
||||
s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
|
||||
|
||||
/* DCB stats */
|
||||
s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
|
||||
s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
|
||||
s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
|
||||
|
||||
/* DCB config arbiters */
|
||||
s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
|
||||
struct ixgbe_dcb_config *);
|
||||
s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
|
||||
struct ixgbe_dcb_config *);
|
||||
s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
|
||||
struct ixgbe_dcb_config *);
|
||||
|
||||
/* DCB unpack routines */
|
||||
void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
|
||||
void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
|
||||
void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
|
||||
void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
|
||||
void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
|
||||
void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
|
||||
|
||||
/* DCB initialization */
|
||||
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
|
||||
s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
|
||||
#endif /* _IXGBE_DCB_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,76 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_FCOE_H
|
||||
#define _IXGBE_FCOE_H
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
|
||||
#include <scsi/fc/fc_fs.h>
|
||||
#include <scsi/fc/fc_fcoe.h>
|
||||
|
||||
/* shift bits within STAT fo FCSTAT */
|
||||
#define IXGBE_RXDADV_FCSTAT_SHIFT 4
|
||||
|
||||
/* ddp user buffer */
|
||||
#define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */
|
||||
#define IXGBE_FCPTR_ALIGN 16
|
||||
#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t))
|
||||
#define IXGBE_FCBUFF_4KB 0x0
|
||||
#define IXGBE_FCBUFF_8KB 0x1
|
||||
#define IXGBE_FCBUFF_16KB 0x2
|
||||
#define IXGBE_FCBUFF_64KB 0x3
|
||||
#define IXGBE_FCBUFF_MAX 65536 /* 64KB max */
|
||||
#define IXGBE_FCBUFF_MIN 4096 /* 4KB min */
|
||||
#define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */
|
||||
|
||||
/* Default traffic class to use for FCoE */
|
||||
#define IXGBE_FCOE_DEFTC 3
|
||||
|
||||
/* fcerr */
|
||||
#define IXGBE_FCERR_BADCRC 0x00100000
|
||||
#define IXGBE_FCERR_EOFSOF 0x00200000
|
||||
#define IXGBE_FCERR_NOFIRST 0x00300000
|
||||
#define IXGBE_FCERR_OOOSEQ 0x00400000
|
||||
#define IXGBE_FCERR_NODMA 0x00500000
|
||||
#define IXGBE_FCERR_PKTLOST 0x00600000
|
||||
|
||||
/* FCoE DDP for target mode */
|
||||
#define __IXGBE_FCOE_TARGET 1
|
||||
|
||||
struct ixgbe_fcoe_ddp {
|
||||
int len;
|
||||
u32 err;
|
||||
unsigned int sgc;
|
||||
struct scatterlist *sgl;
|
||||
dma_addr_t udp;
|
||||
u64 *udl;
|
||||
struct pci_pool *pool;
|
||||
};
|
||||
|
||||
struct ixgbe_fcoe {
|
||||
struct pci_pool **pool;
|
||||
atomic_t refcnt;
|
||||
spinlock_t lock;
|
||||
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
|
||||
unsigned char *extra_ddp_buffer;
|
||||
dma_addr_t extra_ddp_buffer_dma;
|
||||
u64 __percpu *pcpu_noddp;
|
||||
u64 __percpu *pcpu_noddp_ext_buff;
|
||||
unsigned long mode;
|
||||
u8 tc;
|
||||
u8 up;
|
||||
u8 up_set;
|
||||
};
|
||||
#endif /* IXGBE_FCOE */
|
||||
|
||||
#endif /* _IXGBE_FCOE_H */
|
File diff suppressed because it is too large
Load Diff
@ -1,90 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_MBX_H_
|
||||
#define _IXGBE_MBX_H_
|
||||
|
||||
#include "ixgbe_type.h"
|
||||
|
||||
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
|
||||
#define IXGBE_ERR_MBX -100
|
||||
|
||||
#define IXGBE_VFMAILBOX 0x002FC
|
||||
#define IXGBE_VFMBMEM 0x00200
|
||||
|
||||
/* Define mailbox register bits */
|
||||
#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
|
||||
#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
|
||||
#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
|
||||
#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
|
||||
#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
|
||||
#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
|
||||
#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
|
||||
#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
|
||||
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
|
||||
|
||||
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
|
||||
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
|
||||
#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
|
||||
#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
|
||||
#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
|
||||
|
||||
#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
|
||||
#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
|
||||
#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
|
||||
#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
|
||||
|
||||
|
||||
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
|
||||
* PF. The reverse is true if it is IXGBE_PF_*.
|
||||
* Message ACK's are the value or'd with 0xF0000000
|
||||
*/
|
||||
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
|
||||
* this are the ACK */
|
||||
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
|
||||
* this are the NACK */
|
||||
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
|
||||
* clear to send requests */
|
||||
#define IXGBE_VT_MSGINFO_SHIFT 16
|
||||
/* bits 23:16 are used for extra info for certain messages */
|
||||
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
|
||||
|
||||
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
|
||||
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
|
||||
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
|
||||
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
|
||||
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
|
||||
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
|
||||
|
||||
/* length of permanent address message returned from PF */
|
||||
#define IXGBE_VF_PERMADDR_MSG_LEN 4
|
||||
/* word in permanent address message with the current multicast type */
|
||||
#define IXGBE_VF_MC_TYPE_WORD 3
|
||||
|
||||
#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
|
||||
|
||||
|
||||
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
|
||||
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
|
||||
|
||||
s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
|
||||
s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
|
||||
s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
|
||||
s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
|
||||
s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
|
||||
s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
|
||||
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
|
||||
void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
|
||||
void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
|
||||
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
|
||||
|
||||
#endif /* _IXGBE_MBX_H_ */
|
@ -1,117 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
|
||||
/* glue for the OS independent part of ixgbe
|
||||
* includes register access macros
|
||||
*/
|
||||
|
||||
#ifndef _IXGBE_OSDEP_H_
|
||||
#define _IXGBE_OSDEP_H_
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/sched.h>
|
||||
#include "kcompat.h"
|
||||
|
||||
|
||||
#ifndef msleep
|
||||
#define msleep(x) do { if (in_interrupt()) { \
|
||||
/* Don't mdelay in interrupt context! */ \
|
||||
BUG(); \
|
||||
} else { \
|
||||
msleep(x); \
|
||||
} } while (0)
|
||||
|
||||
#endif
|
||||
|
||||
#undef ASSERT
|
||||
|
||||
#ifdef DBG
|
||||
#define hw_dbg(hw, S, A...) printk(KERN_DEBUG S, ## A)
|
||||
#else
|
||||
#define hw_dbg(hw, S, A...) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define e_dev_info(format, arg...) \
|
||||
dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg)
|
||||
#define e_dev_warn(format, arg...) \
|
||||
dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg)
|
||||
#define e_dev_err(format, arg...) \
|
||||
dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg)
|
||||
#define e_dev_notice(format, arg...) \
|
||||
dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg)
|
||||
#define e_info(msglvl, format, arg...) \
|
||||
netif_info(adapter, msglvl, adapter->netdev, format, ## arg)
|
||||
#define e_err(msglvl, format, arg...) \
|
||||
netif_err(adapter, msglvl, adapter->netdev, format, ## arg)
|
||||
#define e_warn(msglvl, format, arg...) \
|
||||
netif_warn(adapter, msglvl, adapter->netdev, format, ## arg)
|
||||
#define e_crit(msglvl, format, arg...) \
|
||||
netif_crit(adapter, msglvl, adapter->netdev, format, ## arg)
|
||||
|
||||
|
||||
#ifdef DBG
|
||||
#define IXGBE_WRITE_REG(a, reg, value) do {\
|
||||
switch (reg) { \
|
||||
case IXGBE_EIMS: \
|
||||
case IXGBE_EIMC: \
|
||||
case IXGBE_EIAM: \
|
||||
case IXGBE_EIAC: \
|
||||
case IXGBE_EICR: \
|
||||
case IXGBE_EICS: \
|
||||
printk("%s: Reg - 0x%05X, value - 0x%08X\n", __func__, \
|
||||
reg, (u32)(value)); \
|
||||
default: \
|
||||
break; \
|
||||
} \
|
||||
writel((value), ((a)->hw_addr + (reg))); \
|
||||
} while (0)
|
||||
#else
|
||||
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
|
||||
#endif
|
||||
|
||||
#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
|
||||
|
||||
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
|
||||
writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
|
||||
|
||||
#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
|
||||
readl((a)->hw_addr + (reg) + ((offset) << 2)))
|
||||
|
||||
#ifndef writeq
|
||||
#define writeq(val, addr) do { writel((u32) (val), addr); \
|
||||
writel((u32) (val >> 32), (addr + 4)); \
|
||||
} while (0);
|
||||
#endif
|
||||
|
||||
#define IXGBE_WRITE_REG64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
|
||||
|
||||
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
|
||||
struct ixgbe_hw;
|
||||
extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
|
||||
extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
|
||||
extern void ewarn(struct ixgbe_hw *hw, const char *str, u32 status);
|
||||
|
||||
#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word
|
||||
#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word
|
||||
#define IXGBE_EEPROM_GRANT_ATTEMPS 100
|
||||
#define IXGBE_HTONL(_i) htonl(_i)
|
||||
#define IXGBE_NTOHL(_i) ntohl(_i)
|
||||
#define IXGBE_NTOHS(_i) ntohs(_i)
|
||||
#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i)
|
||||
#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i)
|
||||
#define EWARN(H, W, S) ewarn(H, W, S)
|
||||
|
||||
#endif /* _IXGBE_OSDEP_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,122 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_PHY_H_
|
||||
#define _IXGBE_PHY_H_
|
||||
|
||||
#include "ixgbe_type.h"
|
||||
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
|
||||
|
||||
/* EEPROM byte offsets */
|
||||
#define IXGBE_SFF_IDENTIFIER 0x0
|
||||
#define IXGBE_SFF_IDENTIFIER_SFP 0x3
|
||||
#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
|
||||
#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
|
||||
#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
|
||||
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
|
||||
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
|
||||
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
|
||||
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
|
||||
|
||||
/* Bitmasks */
|
||||
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
|
||||
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
|
||||
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
|
||||
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
|
||||
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
|
||||
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
|
||||
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
|
||||
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
|
||||
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
|
||||
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
|
||||
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
|
||||
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
|
||||
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
|
||||
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
|
||||
|
||||
/* Flow control defines */
|
||||
#define IXGBE_TAF_SYM_PAUSE 0x400
|
||||
#define IXGBE_TAF_ASM_PAUSE 0x800
|
||||
|
||||
/* Bit-shift macros */
|
||||
#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
|
||||
#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
|
||||
#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
|
||||
|
||||
/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
|
||||
#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
|
||||
#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
|
||||
#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
|
||||
#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
|
||||
|
||||
/* I2C SDA and SCL timing parameters for standard mode */
|
||||
#define IXGBE_I2C_T_HD_STA 4
|
||||
#define IXGBE_I2C_T_LOW 5
|
||||
#define IXGBE_I2C_T_HIGH 4
|
||||
#define IXGBE_I2C_T_SU_STA 5
|
||||
#define IXGBE_I2C_T_HD_DATA 5
|
||||
#define IXGBE_I2C_T_SU_DATA 1
|
||||
#define IXGBE_I2C_T_RISE 1
|
||||
#define IXGBE_I2C_T_FALL 1
|
||||
#define IXGBE_I2C_T_SU_STO 4
|
||||
#define IXGBE_I2C_T_BUF 5
|
||||
|
||||
#define IXGBE_TN_LASI_STATUS_REG 0x9005
|
||||
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
|
||||
|
||||
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
|
||||
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
|
||||
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
|
||||
s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
|
||||
u32 device_type, u16 *phy_data);
|
||||
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
|
||||
u32 device_type, u16 phy_data);
|
||||
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed,
|
||||
bool autoneg,
|
||||
bool autoneg_wait_to_complete);
|
||||
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed,
|
||||
bool *autoneg);
|
||||
|
||||
/* PHY specific */
|
||||
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed,
|
||||
bool *link_up);
|
||||
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version);
|
||||
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
|
||||
u16 *firmware_version);
|
||||
|
||||
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
|
||||
u16 *list_offset,
|
||||
u16 *data_offset);
|
||||
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 *data);
|
||||
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 data);
|
||||
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
|
||||
u8 *eeprom_data);
|
||||
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
|
||||
u8 eeprom_data);
|
||||
void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
|
||||
#endif /* _IXGBE_PHY_H_ */
|
File diff suppressed because it is too large
Load Diff
@ -1,922 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#include "ixgbe_x540.h"
|
||||
#include "ixgbe_type.h"
|
||||
#include "ixgbe_api.h"
|
||||
#include "ixgbe_common.h"
|
||||
#include "ixgbe_phy.h"
|
||||
|
||||
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
|
||||
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
|
||||
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
|
||||
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
|
||||
|
||||
/**
|
||||
* ixgbe_init_ops_X540 - Inits func ptrs and MAC type
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Initialize the function pointers and assign the MAC type for X540.
|
||||
* Does not touch the hardware.
|
||||
**/
|
||||
s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
struct ixgbe_mac_info *mac = &hw->mac;
|
||||
struct ixgbe_phy_info *phy = &hw->phy;
|
||||
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
|
||||
s32 ret_val;
|
||||
|
||||
ret_val = ixgbe_init_phy_ops_generic(hw);
|
||||
ret_val = ixgbe_init_ops_generic(hw);
|
||||
|
||||
|
||||
/* EEPROM */
|
||||
eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
|
||||
eeprom->ops.read = &ixgbe_read_eerd_X540;
|
||||
eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
|
||||
eeprom->ops.write = &ixgbe_write_eewr_X540;
|
||||
eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
|
||||
eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
|
||||
eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
|
||||
eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
|
||||
|
||||
/* PHY */
|
||||
phy->ops.init = &ixgbe_init_phy_ops_generic;
|
||||
phy->ops.reset = NULL;
|
||||
|
||||
/* MAC */
|
||||
mac->ops.reset_hw = &ixgbe_reset_hw_X540;
|
||||
mac->ops.get_media_type = &ixgbe_get_media_type_X540;
|
||||
mac->ops.get_supported_physical_layer =
|
||||
&ixgbe_get_supported_physical_layer_X540;
|
||||
mac->ops.read_analog_reg8 = NULL;
|
||||
mac->ops.write_analog_reg8 = NULL;
|
||||
mac->ops.start_hw = &ixgbe_start_hw_X540;
|
||||
mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
|
||||
mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
|
||||
mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
|
||||
mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
|
||||
mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
|
||||
mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
|
||||
mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
|
||||
mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
|
||||
mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
|
||||
|
||||
/* RAR, Multicast, VLAN */
|
||||
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
|
||||
mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
|
||||
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
|
||||
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
|
||||
mac->rar_highwater = 1;
|
||||
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
|
||||
mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
|
||||
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
|
||||
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
|
||||
mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
|
||||
mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
|
||||
|
||||
/* Link */
|
||||
mac->ops.get_link_capabilities =
|
||||
&ixgbe_get_copper_link_capabilities_generic;
|
||||
mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
|
||||
mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
|
||||
mac->ops.check_link = &ixgbe_check_mac_link_generic;
|
||||
|
||||
mac->mcft_size = 128;
|
||||
mac->vft_size = 128;
|
||||
mac->num_rar_entries = 128;
|
||||
mac->rx_pb_size = 384;
|
||||
mac->max_tx_queues = 128;
|
||||
mac->max_rx_queues = 128;
|
||||
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
|
||||
|
||||
/*
|
||||
* FWSM register
|
||||
* ARC supported; valid only if manageability features are
|
||||
* enabled.
|
||||
*/
|
||||
mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
|
||||
IXGBE_FWSM_MODE_MASK) ? true : false;
|
||||
|
||||
//hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
|
||||
|
||||
/* LEDs */
|
||||
mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
|
||||
mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
|
||||
|
||||
/* Manageability interface */
|
||||
mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_link_capabilities_X540 - Determines link capabilities
|
||||
* @hw: pointer to hardware structure
|
||||
* @speed: pointer to link speed
|
||||
* @autoneg: true when autoneg or autotry is enabled
|
||||
*
|
||||
* Determines the link capabilities by reading the AUTOC register.
|
||||
**/
|
||||
s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed,
|
||||
bool *autoneg)
|
||||
{
|
||||
ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_media_type_X540 - Get media type
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Returns the media type (fiber, copper, backplane)
|
||||
**/
|
||||
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
return ixgbe_media_type_copper;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
|
||||
* @hw: pointer to hardware structure
|
||||
* @speed: new link speed
|
||||
* @autoneg: true if autonegotiation enabled
|
||||
* @autoneg_wait_to_complete: true when waiting for completion is needed
|
||||
**/
|
||||
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed speed, bool autoneg,
|
||||
bool autoneg_wait_to_complete)
|
||||
{
|
||||
return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
|
||||
autoneg_wait_to_complete);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_reset_hw_X540 - Perform hardware reset
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Resets the hardware by resetting the transmit and receive units, masks
|
||||
* and clears all interrupts, and perform a reset.
|
||||
**/
|
||||
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
/*
|
||||
* Userland DPDK takes the ownershiop of device
|
||||
* Kernel driver here used as the simple path for ethtool only
|
||||
* Won't real reset device anyway
|
||||
*/
|
||||
#if 0
|
||||
u32 ctrl, i;
|
||||
|
||||
/* Call adapter stop to disable tx/rx and clear interrupts */
|
||||
status = hw->mac.ops.stop_adapter(hw);
|
||||
if (status != 0)
|
||||
goto reset_hw_out;
|
||||
|
||||
/* flush pending Tx transactions */
|
||||
ixgbe_clear_tx_pending(hw);
|
||||
|
||||
mac_reset_top:
|
||||
ctrl = IXGBE_CTRL_RST;
|
||||
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
/* Poll for reset bit to self-clear indicating reset is complete */
|
||||
for (i = 0; i < 10; i++) {
|
||||
udelay(1);
|
||||
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
|
||||
if (!(ctrl & IXGBE_CTRL_RST_MASK))
|
||||
break;
|
||||
}
|
||||
|
||||
if (ctrl & IXGBE_CTRL_RST_MASK) {
|
||||
status = IXGBE_ERR_RESET_FAILED;
|
||||
hw_dbg(hw, "Reset polling failed to complete.\n");
|
||||
}
|
||||
msleep(100);
|
||||
|
||||
/*
|
||||
* Double resets are required for recovery from certain error
|
||||
* conditions. Between resets, it is necessary to stall to allow time
|
||||
* for any pending HW events to complete.
|
||||
*/
|
||||
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
|
||||
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
|
||||
goto mac_reset_top;
|
||||
}
|
||||
|
||||
/* Set the Rx packet buffer size. */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
|
||||
|
||||
#endif
|
||||
|
||||
/* Store the permanent mac address */
|
||||
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
|
||||
|
||||
/*
|
||||
* Store MAC address from RAR0, clear receive address registers, and
|
||||
* clear the multicast table. Also reset num_rar_entries to 128,
|
||||
* since we modify this value when programming the SAN MAC address.
|
||||
*/
|
||||
hw->mac.num_rar_entries = 128;
|
||||
hw->mac.ops.init_rx_addrs(hw);
|
||||
|
||||
/* Store the permanent SAN mac address */
|
||||
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
|
||||
|
||||
/* Add the SAN MAC address to the RAR only if it's a valid address */
|
||||
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
|
||||
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
|
||||
hw->mac.san_addr, 0, IXGBE_RAH_AV);
|
||||
|
||||
/* Save the SAN MAC RAR index */
|
||||
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
|
||||
|
||||
/* Reserve the last RAR for the SAN MAC address */
|
||||
hw->mac.num_rar_entries--;
|
||||
}
|
||||
|
||||
/* Store the alternative WWNN/WWPN prefix */
|
||||
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
|
||||
&hw->mac.wwpn_prefix);
|
||||
|
||||
//reset_hw_out:
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Starts the hardware using the generic start_hw function
|
||||
* and the generation start_hw function.
|
||||
* Then performs revision-specific operations, if any.
|
||||
**/
|
||||
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
|
||||
ret_val = ixgbe_start_hw_generic(hw);
|
||||
if (ret_val != 0)
|
||||
goto out;
|
||||
|
||||
ret_val = ixgbe_start_hw_gen2(hw);
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Determines physical layer capabilities of the current configuration.
|
||||
**/
|
||||
u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
|
||||
u16 ext_ability = 0;
|
||||
|
||||
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
|
||||
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
|
||||
if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
|
||||
physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
|
||||
if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
|
||||
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
|
||||
if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
|
||||
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
|
||||
|
||||
return physical_layer;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
|
||||
* ixgbe_hw struct in order to set up EEPROM access.
|
||||
**/
|
||||
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
|
||||
u32 eec;
|
||||
u16 eeprom_size;
|
||||
|
||||
if (eeprom->type == ixgbe_eeprom_uninitialized) {
|
||||
eeprom->semaphore_delay = 10;
|
||||
eeprom->type = ixgbe_flash;
|
||||
|
||||
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
|
||||
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
|
||||
IXGBE_EEC_SIZE_SHIFT);
|
||||
eeprom->word_size = 1 << (eeprom_size +
|
||||
IXGBE_EEPROM_WORD_SIZE_SHIFT);
|
||||
|
||||
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
|
||||
eeprom->type, eeprom->word_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_read_eerd_X540- Read EEPROM word using EERD
|
||||
* @hw: pointer to hardware structure
|
||||
* @offset: offset of word in the EEPROM to read
|
||||
* @data: word read from the EEPROM
|
||||
*
|
||||
* Reads a 16 bit word from the EEPROM using the EERD register.
|
||||
**/
|
||||
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
|
||||
0)
|
||||
status = ixgbe_read_eerd_generic(hw, offset, data);
|
||||
else
|
||||
status = IXGBE_ERR_SWFW_SYNC;
|
||||
|
||||
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
|
||||
* @hw: pointer to hardware structure
|
||||
* @offset: offset of word in the EEPROM to read
|
||||
* @words: number of words
|
||||
* @data: word(s) read from the EEPROM
|
||||
*
|
||||
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
|
||||
**/
|
||||
s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
|
||||
u16 offset, u16 words, u16 *data)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
|
||||
0)
|
||||
status = ixgbe_read_eerd_buffer_generic(hw, offset,
|
||||
words, data);
|
||||
else
|
||||
status = IXGBE_ERR_SWFW_SYNC;
|
||||
|
||||
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
|
||||
* @hw: pointer to hardware structure
|
||||
* @offset: offset of word in the EEPROM to write
|
||||
* @data: word write to the EEPROM
|
||||
*
|
||||
* Write a 16 bit word to the EEPROM using the EEWR register.
|
||||
**/
|
||||
s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
|
||||
0)
|
||||
status = ixgbe_write_eewr_generic(hw, offset, data);
|
||||
else
|
||||
status = IXGBE_ERR_SWFW_SYNC;
|
||||
|
||||
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
|
||||
* @hw: pointer to hardware structure
|
||||
* @offset: offset of word in the EEPROM to write
|
||||
* @words: number of words
|
||||
* @data: word(s) write to the EEPROM
|
||||
*
|
||||
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
|
||||
**/
|
||||
s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
|
||||
u16 offset, u16 words, u16 *data)
|
||||
{
|
||||
s32 status = 0;
|
||||
|
||||
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
|
||||
0)
|
||||
status = ixgbe_write_eewr_buffer_generic(hw, offset,
|
||||
words, data);
|
||||
else
|
||||
status = IXGBE_ERR_SWFW_SYNC;
|
||||
|
||||
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
|
||||
*
|
||||
* This function does not use synchronization for EERD and EEWR. It can
|
||||
* be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
|
||||
*
|
||||
* @hw: pointer to hardware structure
|
||||
**/
|
||||
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
u16 i;
|
||||
u16 j;
|
||||
u16 checksum = 0;
|
||||
u16 length = 0;
|
||||
u16 pointer = 0;
|
||||
u16 word = 0;
|
||||
|
||||
/*
|
||||
* Do not use hw->eeprom.ops.read because we do not want to take
|
||||
* the synchronization semaphores here. Instead use
|
||||
* ixgbe_read_eerd_generic
|
||||
*/
|
||||
|
||||
/* Include 0x0-0x3F in the checksum */
|
||||
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
|
||||
if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
|
||||
hw_dbg(hw, "EEPROM read failed\n");
|
||||
break;
|
||||
}
|
||||
checksum += word;
|
||||
}
|
||||
|
||||
/*
|
||||
* Include all data from pointers 0x3, 0x6-0xE. This excludes the
|
||||
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
|
||||
*/
|
||||
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
|
||||
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
|
||||
continue;
|
||||
|
||||
if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
|
||||
hw_dbg(hw, "EEPROM read failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* Skip pointer section if the pointer is invalid. */
|
||||
if (pointer == 0xFFFF || pointer == 0 ||
|
||||
pointer >= hw->eeprom.word_size)
|
||||
continue;
|
||||
|
||||
if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
|
||||
0) {
|
||||
hw_dbg(hw, "EEPROM read failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* Skip pointer section if length is invalid. */
|
||||
if (length == 0xFFFF || length == 0 ||
|
||||
(pointer + length) >= hw->eeprom.word_size)
|
||||
continue;
|
||||
|
||||
for (j = pointer+1; j <= pointer+length; j++) {
|
||||
if (ixgbe_read_eerd_generic(hw, j, &word) !=
|
||||
0) {
|
||||
hw_dbg(hw, "EEPROM read failed\n");
|
||||
break;
|
||||
}
|
||||
checksum += word;
|
||||
}
|
||||
}
|
||||
|
||||
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
|
||||
|
||||
return checksum;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
|
||||
* @hw: pointer to hardware structure
|
||||
* @checksum_val: calculated checksum
|
||||
*
|
||||
* Performs checksum calculation and validates the EEPROM checksum. If the
|
||||
* caller does not need checksum_val, the value can be NULL.
|
||||
**/
|
||||
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
|
||||
u16 *checksum_val)
|
||||
{
|
||||
s32 status;
|
||||
u16 checksum;
|
||||
u16 read_checksum = 0;
|
||||
|
||||
/*
|
||||
* Read the first word from the EEPROM. If this times out or fails, do
|
||||
* not continue or we could be in for a very long wait while every
|
||||
* EEPROM read fails
|
||||
*/
|
||||
status = hw->eeprom.ops.read(hw, 0, &checksum);
|
||||
|
||||
if (status != 0) {
|
||||
hw_dbg(hw, "EEPROM read failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
|
||||
0) {
|
||||
checksum = hw->eeprom.ops.calc_checksum(hw);
|
||||
|
||||
/*
|
||||
* Do not use hw->eeprom.ops.read because we do not want to take
|
||||
* the synchronization semaphores twice here.
|
||||
*/
|
||||
ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
|
||||
&read_checksum);
|
||||
|
||||
/*
|
||||
* Verify read checksum from EEPROM is the same as
|
||||
* calculated checksum
|
||||
*/
|
||||
if (read_checksum != checksum)
|
||||
status = IXGBE_ERR_EEPROM_CHECKSUM;
|
||||
|
||||
/* If the user cares, return the calculated checksum */
|
||||
if (checksum_val)
|
||||
*checksum_val = checksum;
|
||||
} else {
|
||||
status = IXGBE_ERR_SWFW_SYNC;
|
||||
}
|
||||
|
||||
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* After writing EEPROM to shadow RAM using EEWR register, software calculates
|
||||
* checksum and updates the EEPROM and instructs the hardware to update
|
||||
* the flash.
|
||||
**/
|
||||
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status;
|
||||
u16 checksum;
|
||||
|
||||
/*
|
||||
* Read the first word from the EEPROM. If this times out or fails, do
|
||||
* not continue or we could be in for a very long wait while every
|
||||
* EEPROM read fails
|
||||
*/
|
||||
status = hw->eeprom.ops.read(hw, 0, &checksum);
|
||||
|
||||
if (status != 0)
|
||||
hw_dbg(hw, "EEPROM read failed\n");
|
||||
|
||||
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
|
||||
0) {
|
||||
checksum = hw->eeprom.ops.calc_checksum(hw);
|
||||
|
||||
/*
|
||||
* Do not use hw->eeprom.ops.write because we do not want to
|
||||
* take the synchronization semaphores twice here.
|
||||
*/
|
||||
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
|
||||
checksum);
|
||||
|
||||
if (status == 0)
|
||||
status = ixgbe_update_flash_X540(hw);
|
||||
else
|
||||
status = IXGBE_ERR_SWFW_SYNC;
|
||||
}
|
||||
|
||||
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
|
||||
* EEPROM from shadow RAM to the flash device.
|
||||
**/
|
||||
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 flup;
|
||||
s32 status = IXGBE_ERR_EEPROM;
|
||||
|
||||
status = ixgbe_poll_flash_update_done_X540(hw);
|
||||
if (status == IXGBE_ERR_EEPROM) {
|
||||
hw_dbg(hw, "Flash update time out\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
|
||||
|
||||
status = ixgbe_poll_flash_update_done_X540(hw);
|
||||
if (status == 0)
|
||||
hw_dbg(hw, "Flash update complete\n");
|
||||
else
|
||||
hw_dbg(hw, "Flash update time out\n");
|
||||
|
||||
if (hw->revision_id == 0) {
|
||||
flup = IXGBE_READ_REG(hw, IXGBE_EEC);
|
||||
|
||||
if (flup & IXGBE_EEC_SEC1VAL) {
|
||||
flup |= IXGBE_EEC_FLUP;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
|
||||
}
|
||||
|
||||
status = ixgbe_poll_flash_update_done_X540(hw);
|
||||
if (status == 0)
|
||||
hw_dbg(hw, "Flash update complete\n");
|
||||
else
|
||||
hw_dbg(hw, "Flash update time out\n");
|
||||
}
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_poll_flash_update_done_X540 - Poll flash update status
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
|
||||
* flash update is done.
|
||||
**/
|
||||
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 i;
|
||||
u32 reg;
|
||||
s32 status = IXGBE_ERR_EEPROM;
|
||||
|
||||
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
|
||||
reg = IXGBE_READ_REG(hw, IXGBE_EEC);
|
||||
if (reg & IXGBE_EEC_FLUDONE) {
|
||||
status = 0;
|
||||
break;
|
||||
}
|
||||
udelay(5);
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
|
||||
* @hw: pointer to hardware structure
|
||||
* @mask: Mask to specify which semaphore to acquire
|
||||
*
|
||||
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
|
||||
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
|
||||
**/
|
||||
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
|
||||
{
|
||||
u32 swfw_sync;
|
||||
u32 swmask = mask;
|
||||
u32 fwmask = mask << 5;
|
||||
u32 hwmask = 0;
|
||||
u32 timeout = 200;
|
||||
u32 i;
|
||||
s32 ret_val = 0;
|
||||
|
||||
if (swmask == IXGBE_GSSR_EEP_SM)
|
||||
hwmask = IXGBE_GSSR_FLASH_SM;
|
||||
|
||||
/* SW only mask doesn't have FW bit pair */
|
||||
if (swmask == IXGBE_GSSR_SW_MNG_SM)
|
||||
fwmask = 0;
|
||||
|
||||
for (i = 0; i < timeout; i++) {
|
||||
/*
|
||||
* SW NVM semaphore bit is used for access to all
|
||||
* SW_FW_SYNC bits (not just NVM)
|
||||
*/
|
||||
if (ixgbe_get_swfw_sync_semaphore(hw)) {
|
||||
ret_val = IXGBE_ERR_SWFW_SYNC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
|
||||
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
|
||||
swfw_sync |= swmask;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
|
||||
ixgbe_release_swfw_sync_semaphore(hw);
|
||||
msleep(5);
|
||||
goto out;
|
||||
} else {
|
||||
/*
|
||||
* Firmware currently using resource (fwmask), hardware
|
||||
* currently using resource (hwmask), or other software
|
||||
* thread currently using resource (swmask)
|
||||
*/
|
||||
ixgbe_release_swfw_sync_semaphore(hw);
|
||||
msleep(5);
|
||||
}
|
||||
}
|
||||
|
||||
/* Failed to get SW only semaphore */
|
||||
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
|
||||
ret_val = IXGBE_ERR_SWFW_SYNC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If the resource is not released by the FW/HW the SW can assume that
|
||||
* the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
|
||||
* of the requested resource(s) while ignoring the corresponding FW/HW
|
||||
* bits in the SW_FW_SYNC register.
|
||||
*/
|
||||
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
|
||||
if (swfw_sync & (fwmask | hwmask)) {
|
||||
if (ixgbe_get_swfw_sync_semaphore(hw)) {
|
||||
ret_val = IXGBE_ERR_SWFW_SYNC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
swfw_sync |= swmask;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
|
||||
ixgbe_release_swfw_sync_semaphore(hw);
|
||||
msleep(5);
|
||||
}
|
||||
|
||||
out:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
|
||||
* @hw: pointer to hardware structure
|
||||
* @mask: Mask to specify which semaphore to release
|
||||
*
|
||||
* Releases the SWFW semaphore through the SW_FW_SYNC register
|
||||
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
|
||||
**/
|
||||
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
|
||||
{
|
||||
u32 swfw_sync;
|
||||
u32 swmask = mask;
|
||||
|
||||
ixgbe_get_swfw_sync_semaphore(hw);
|
||||
|
||||
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
|
||||
swfw_sync &= ~swmask;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
|
||||
|
||||
ixgbe_release_swfw_sync_semaphore(hw);
|
||||
msleep(5);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_get_nvm_semaphore - Get hardware semaphore
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Sets the hardware semaphores so SW/FW can gain control of shared resources
|
||||
**/
|
||||
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 status = IXGBE_ERR_EEPROM;
|
||||
u32 timeout = 2000;
|
||||
u32 i;
|
||||
u32 swsm;
|
||||
|
||||
/* Get SMBI software semaphore between device drivers first */
|
||||
for (i = 0; i < timeout; i++) {
|
||||
/*
|
||||
* If the SMBI bit is 0 when we read it, then the bit will be
|
||||
* set and we have the semaphore
|
||||
*/
|
||||
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
|
||||
if (!(swsm & IXGBE_SWSM_SMBI)) {
|
||||
status = 0;
|
||||
break;
|
||||
}
|
||||
udelay(50);
|
||||
}
|
||||
|
||||
/* Now get the semaphore between SW/FW through the REGSMP bit */
|
||||
if (status == 0) {
|
||||
for (i = 0; i < timeout; i++) {
|
||||
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
|
||||
if (!(swsm & IXGBE_SWFW_REGSMP))
|
||||
break;
|
||||
|
||||
udelay(50);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release semaphores and return error if SW NVM semaphore
|
||||
* was not granted because we don't have access to the EEPROM
|
||||
*/
|
||||
if (i >= timeout) {
|
||||
hw_dbg(hw, "REGSMP Software NVM semaphore not "
|
||||
"granted.\n");
|
||||
ixgbe_release_swfw_sync_semaphore(hw);
|
||||
status = IXGBE_ERR_EEPROM;
|
||||
}
|
||||
} else {
|
||||
hw_dbg(hw, "Software semaphore SMBI between device drivers "
|
||||
"not granted.\n");
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_release_nvm_semaphore - Release hardware semaphore
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* This function clears hardware semaphore bits.
|
||||
**/
|
||||
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
|
||||
{
|
||||
u32 swsm;
|
||||
|
||||
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
|
||||
|
||||
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
|
||||
swsm &= ~IXGBE_SWSM_SMBI;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
|
||||
|
||||
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
|
||||
swsm &= ~IXGBE_SWFW_REGSMP;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
|
||||
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_blink_led_start_X540 - Blink LED based on index.
|
||||
* @hw: pointer to hardware structure
|
||||
* @index: led number to blink
|
||||
*
|
||||
* Devices that implement the version 2 interface:
|
||||
* X540
|
||||
**/
|
||||
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
|
||||
{
|
||||
u32 macc_reg;
|
||||
u32 ledctl_reg;
|
||||
ixgbe_link_speed speed;
|
||||
bool link_up;
|
||||
|
||||
/*
|
||||
* Link should be up in order for the blink bit in the LED control
|
||||
* register to work. Force link and speed in the MAC if link is down.
|
||||
* This will be reversed when we stop the blinking.
|
||||
*/
|
||||
hw->mac.ops.check_link(hw, &speed, &link_up, false);
|
||||
if (link_up == false) {
|
||||
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
|
||||
macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
|
||||
}
|
||||
/* Set the LED to LINK_UP + BLINK. */
|
||||
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
|
||||
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
|
||||
ledctl_reg |= IXGBE_LED_BLINK(index);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
|
||||
* @hw: pointer to hardware structure
|
||||
* @index: led number to stop blinking
|
||||
*
|
||||
* Devices that implement the version 2 interface:
|
||||
* X540
|
||||
**/
|
||||
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
|
||||
{
|
||||
u32 macc_reg;
|
||||
u32 ledctl_reg;
|
||||
|
||||
/* Restore the LED to its default value. */
|
||||
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
|
||||
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
|
||||
ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
|
||||
ledctl_reg &= ~IXGBE_LED_BLINK(index);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
|
||||
|
||||
/* Unforce link and speed in the MAC. */
|
||||
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
|
||||
macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*******************************************************************************
|
||||
|
||||
Intel 10 Gigabit PCI Express Linux driver
|
||||
Copyright(c) 1999 - 2012 Intel Corporation.
|
||||
|
||||
Contact Information:
|
||||
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef _IXGBE_X540_H_
|
||||
#define _IXGBE_X540_H_
|
||||
|
||||
#include "ixgbe_type.h"
|
||||
|
||||
s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
|
||||
ixgbe_link_speed *speed, bool *autoneg);
|
||||
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
|
||||
bool autoneg, bool link_up_wait_to_complete);
|
||||
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
|
||||
u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
|
||||
s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
|
||||
u16 *data);
|
||||
s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
|
||||
s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
|
||||
u16 *data);
|
||||
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
|
||||
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
|
||||
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
|
||||
|
||||
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
|
||||
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
|
||||
|
||||
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
|
||||
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
|
||||
#endif /* _IXGBE_X540_H_ */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,13 +0,0 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
|
||||
|
||||
kni_ixgbe_sources = files(
|
||||
'ixgbe_82598.c',
|
||||
'ixgbe_82599.c',
|
||||
'ixgbe_api.c',
|
||||
'ixgbe_common.c',
|
||||
'ixgbe_ethtool.c',
|
||||
'ixgbe_main.c',
|
||||
'ixgbe_phy.c',
|
||||
'ixgbe_x540.c',
|
||||
'kcompat.c')
|
@ -1,5 +0,0 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
|
||||
|
||||
subdir('igb')
|
||||
subdir('ixgbe')
|
@ -55,8 +55,6 @@ struct kni_dev {
|
||||
|
||||
/* kni device */
|
||||
struct net_device *net_dev;
|
||||
struct net_device *lad_dev;
|
||||
struct pci_dev *pci_dev;
|
||||
|
||||
/* queue for packets to be sent out */
|
||||
void *tx_q;
|
||||
@ -100,11 +98,5 @@ void kni_net_rx(struct kni_dev *kni);
|
||||
void kni_net_init(struct net_device *dev);
|
||||
void kni_net_config_lo_mode(char *lo_str);
|
||||
void kni_net_poll_resp(struct kni_dev *kni);
|
||||
void kni_set_ethtool_ops(struct net_device *netdev);
|
||||
|
||||
int ixgbe_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
|
||||
void ixgbe_kni_remove(struct pci_dev *pdev);
|
||||
int igb_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
|
||||
void igb_kni_remove(struct pci_dev *pdev);
|
||||
|
||||
#endif
|
||||
|
@ -1,229 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright(c) 2010-2014 Intel Corporation.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include "kni_dev.h"
|
||||
|
||||
static int
|
||||
kni_check_if_running(struct net_device *dev)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
if (priv->lad_dev)
|
||||
return 0;
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static void
|
||||
kni_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->get_drvinfo(priv->lad_dev, info);
|
||||
}
|
||||
|
||||
/* ETHTOOL_GLINKSETTINGS replaces ETHTOOL_GSET */
|
||||
#ifndef ETHTOOL_GLINKSETTINGS
|
||||
static int
|
||||
kni_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->get_settings(priv->lad_dev, ecmd);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ETHTOOL_SLINKSETTINGS replaces ETHTOOL_SSET */
|
||||
#ifndef ETHTOOL_SLINKSETTINGS
|
||||
static int
|
||||
kni_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->set_settings(priv->lad_dev, ecmd);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
kni_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->get_wol(priv->lad_dev, wol);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->set_wol(priv->lad_dev, wol);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_nway_reset(struct net_device *dev)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->nway_reset(priv->lad_dev);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_get_eeprom_len(struct net_device *dev)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->get_eeprom_len(priv->lad_dev);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
|
||||
u8 *bytes)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->get_eeprom(priv->lad_dev, eeprom,
|
||||
bytes);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
|
||||
u8 *bytes)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->set_eeprom(priv->lad_dev, eeprom,
|
||||
bytes);
|
||||
}
|
||||
|
||||
static void
|
||||
kni_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->get_ringparam(priv->lad_dev, ring);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->set_ringparam(priv->lad_dev, ring);
|
||||
}
|
||||
|
||||
static void
|
||||
kni_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->get_pauseparam(priv->lad_dev, pause);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->set_pauseparam(priv->lad_dev,
|
||||
pause);
|
||||
}
|
||||
|
||||
static u32
|
||||
kni_get_msglevel(struct net_device *dev)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->get_msglevel(priv->lad_dev);
|
||||
}
|
||||
|
||||
static void
|
||||
kni_set_msglevel(struct net_device *dev, u32 data)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->set_msglevel(priv->lad_dev, data);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_get_regs_len(struct net_device *dev)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->get_regs_len(priv->lad_dev);
|
||||
}
|
||||
|
||||
static void
|
||||
kni_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->get_regs(priv->lad_dev, regs, p);
|
||||
}
|
||||
|
||||
static void
|
||||
kni_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->get_strings(priv->lad_dev, stringset,
|
||||
data);
|
||||
}
|
||||
|
||||
static int
|
||||
kni_get_sset_count(struct net_device *dev, int sset)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
return priv->lad_dev->ethtool_ops->get_sset_count(priv->lad_dev, sset);
|
||||
}
|
||||
|
||||
static void
|
||||
kni_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats,
|
||||
u64 *data)
|
||||
{
|
||||
struct kni_dev *priv = netdev_priv(dev);
|
||||
|
||||
priv->lad_dev->ethtool_ops->get_ethtool_stats(priv->lad_dev, stats,
|
||||
data);
|
||||
}
|
||||
|
||||
struct ethtool_ops kni_ethtool_ops = {
|
||||
.begin = kni_check_if_running,
|
||||
.get_drvinfo = kni_get_drvinfo,
|
||||
#ifndef ETHTOOL_GLINKSETTINGS
|
||||
.get_settings = kni_get_settings,
|
||||
#endif
|
||||
#ifndef ETHTOOL_SLINKSETTINGS
|
||||
.set_settings = kni_set_settings,
|
||||
#endif
|
||||
.get_regs_len = kni_get_regs_len,
|
||||
.get_regs = kni_get_regs,
|
||||
.get_wol = kni_get_wol,
|
||||
.set_wol = kni_set_wol,
|
||||
.nway_reset = kni_nway_reset,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_eeprom_len = kni_get_eeprom_len,
|
||||
.get_eeprom = kni_get_eeprom,
|
||||
.set_eeprom = kni_set_eeprom,
|
||||
.get_ringparam = kni_get_ringparam,
|
||||
.set_ringparam = kni_set_ringparam,
|
||||
.get_pauseparam = kni_get_pauseparam,
|
||||
.set_pauseparam = kni_set_pauseparam,
|
||||
.get_msglevel = kni_get_msglevel,
|
||||
.set_msglevel = kni_set_msglevel,
|
||||
.get_strings = kni_get_strings,
|
||||
.get_sset_count = kni_get_sset_count,
|
||||
.get_ethtool_stats = kni_get_ethtool_stats,
|
||||
};
|
||||
|
||||
void
|
||||
kni_set_ethtool_ops(struct net_device *netdev)
|
||||
{
|
||||
netdev->ethtool_ops = &kni_ethtool_ops;
|
||||
}
|
@ -29,9 +29,6 @@ MODULE_DESCRIPTION("Kernel Module for managing kni devices");
|
||||
|
||||
#define KNI_MAX_DEVICES 32
|
||||
|
||||
extern const struct pci_device_id ixgbe_pci_tbl[];
|
||||
extern const struct pci_device_id igb_pci_tbl[];
|
||||
|
||||
/* loopback mode */
|
||||
static char *lo_mode;
|
||||
|
||||
@ -182,15 +179,6 @@ kni_dev_remove(struct kni_dev *dev)
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
#ifdef RTE_KNI_KMOD_ETHTOOL
|
||||
if (dev->pci_dev) {
|
||||
if (pci_match_id(ixgbe_pci_tbl, dev->pci_dev))
|
||||
ixgbe_kni_remove(dev->pci_dev);
|
||||
else if (pci_match_id(igb_pci_tbl, dev->pci_dev))
|
||||
igb_kni_remove(dev->pci_dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (dev->net_dev) {
|
||||
unregister_netdev(dev->net_dev);
|
||||
free_netdev(dev->net_dev);
|
||||
@ -306,11 +294,6 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
|
||||
struct rte_kni_device_info dev_info;
|
||||
struct net_device *net_dev = NULL;
|
||||
struct kni_dev *kni, *dev, *n;
|
||||
#ifdef RTE_KNI_KMOD_ETHTOOL
|
||||
struct pci_dev *found_pci = NULL;
|
||||
struct net_device *lad_dev = NULL;
|
||||
struct pci_dev *pci = NULL;
|
||||
#endif
|
||||
|
||||
pr_info("Creating kni...\n");
|
||||
/* Check the buffer size, to avoid warning */
|
||||
@ -400,62 +383,15 @@ kni_ioctl_create(struct net *net, uint32_t ioctl_num,
|
||||
dev_info.function,
|
||||
dev_info.vendor_id,
|
||||
dev_info.device_id);
|
||||
#ifdef RTE_KNI_KMOD_ETHTOOL
|
||||
pci = pci_get_device(dev_info.vendor_id, dev_info.device_id, NULL);
|
||||
|
||||
/* Support Ethtool */
|
||||
while (pci) {
|
||||
pr_debug("pci_bus: %02x:%02x:%02x\n",
|
||||
pci->bus->number,
|
||||
PCI_SLOT(pci->devfn),
|
||||
PCI_FUNC(pci->devfn));
|
||||
|
||||
if ((pci->bus->number == dev_info.bus) &&
|
||||
(PCI_SLOT(pci->devfn) == dev_info.devid) &&
|
||||
(PCI_FUNC(pci->devfn) == dev_info.function)) {
|
||||
found_pci = pci;
|
||||
|
||||
if (pci_match_id(ixgbe_pci_tbl, found_pci))
|
||||
ret = ixgbe_kni_probe(found_pci, &lad_dev);
|
||||
else if (pci_match_id(igb_pci_tbl, found_pci))
|
||||
ret = igb_kni_probe(found_pci, &lad_dev);
|
||||
else
|
||||
ret = -1;
|
||||
|
||||
pr_debug("PCI found: pci=0x%p, lad_dev=0x%p\n",
|
||||
pci, lad_dev);
|
||||
if (ret == 0) {
|
||||
kni->lad_dev = lad_dev;
|
||||
kni_set_ethtool_ops(kni->net_dev);
|
||||
} else {
|
||||
pr_err("Device not supported by ethtool");
|
||||
kni->lad_dev = NULL;
|
||||
}
|
||||
|
||||
kni->pci_dev = found_pci;
|
||||
kni->device_id = dev_info.device_id;
|
||||
break;
|
||||
}
|
||||
pci = pci_get_device(dev_info.vendor_id,
|
||||
dev_info.device_id, pci);
|
||||
}
|
||||
if (pci)
|
||||
pci_dev_put(pci);
|
||||
#endif
|
||||
|
||||
if (kni->lad_dev)
|
||||
ether_addr_copy(net_dev->dev_addr, kni->lad_dev->dev_addr);
|
||||
else {
|
||||
/* if user has provided a valid mac address */
|
||||
if (is_valid_ether_addr(dev_info.mac_addr))
|
||||
memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN);
|
||||
else
|
||||
/*
|
||||
* Generate random mac address. eth_random_addr() is the
|
||||
* newer version of generating mac address in kernel.
|
||||
*/
|
||||
random_ether_addr(net_dev->dev_addr);
|
||||
}
|
||||
/* if user has provided a valid mac address */
|
||||
if (is_valid_ether_addr(dev_info.mac_addr))
|
||||
memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN);
|
||||
else
|
||||
/*
|
||||
* Generate random mac address. eth_random_addr() is the
|
||||
* newer version of generating mac address in kernel.
|
||||
*/
|
||||
random_ether_addr(net_dev->dev_addr);
|
||||
|
||||
if (dev_info.mtu)
|
||||
net_dev->mtu = dev_info.mtu;
|
||||
|
@ -1,20 +1,17 @@
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
|
||||
|
||||
subdir('ethtool')
|
||||
|
||||
kni_mkfile = custom_target('rte_kni_makefile',
|
||||
output: 'Makefile',
|
||||
command: ['touch', '@OUTPUT@'])
|
||||
|
||||
kni_sources = files(
|
||||
'kni_ethtool.c',
|
||||
'kni_misc.c',
|
||||
'kni_net.c',
|
||||
'Kbuild')
|
||||
|
||||
custom_target('rte_kni',
|
||||
input: kni_sources + kni_igb_sources + kni_ixgbe_sources,
|
||||
input: kni_sources,
|
||||
output: 'rte_kni.ko',
|
||||
command: ['make', '-j4', '-C', kernel_dir,
|
||||
'M=' + meson.current_build_dir(),
|
||||
@ -23,9 +20,7 @@ custom_target('rte_kni',
|
||||
' -I' + meson.source_root() + '/lib/librte_eal/common/include' +
|
||||
' -I' + meson.source_root() + '/lib/librte_eal/linux/eal/include' +
|
||||
' -I' + meson.build_root() +
|
||||
' -I' + meson.current_source_dir() +
|
||||
' -I' + meson.current_source_dir() + '/ethtool/ixgbe' +
|
||||
' -I' + meson.current_source_dir() + '/ethtool/igb',
|
||||
' -I' + meson.current_source_dir(),
|
||||
'modules'],
|
||||
depends: kni_mkfile,
|
||||
console: true,
|
||||
|
@ -242,11 +242,6 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
|
||||
kni->ops.port_id = UINT16_MAX;
|
||||
|
||||
memset(&dev_info, 0, sizeof(dev_info));
|
||||
dev_info.bus = conf->addr.bus;
|
||||
dev_info.devid = conf->addr.devid;
|
||||
dev_info.function = conf->addr.function;
|
||||
dev_info.vendor_id = conf->id.vendor_id;
|
||||
dev_info.device_id = conf->id.device_id;
|
||||
dev_info.core_id = conf->core_id;
|
||||
dev_info.force_bind = conf->force_bind;
|
||||
dev_info.group_id = conf->group_id;
|
||||
|
@ -63,8 +63,8 @@ struct rte_kni_conf {
|
||||
uint32_t core_id; /* Core ID to bind kernel thread on */
|
||||
uint16_t group_id; /* Group ID */
|
||||
unsigned mbuf_size; /* mbuf size */
|
||||
struct rte_pci_addr addr;
|
||||
struct rte_pci_id id;
|
||||
struct rte_pci_addr addr; /* depreciated */
|
||||
struct rte_pci_id id; /* depreciated */
|
||||
|
||||
__extension__
|
||||
uint8_t force_bind : 1; /* Flag to bind kernel thread */
|
||||
|
Loading…
Reference in New Issue
Block a user