Update the driver to Intel version 2.1.6

- add some new hardware support for 82599
	- Big change to interrupt architecture, it now
	  uses a queue which contains an RX/TX pair as
	  the recipient of the interrupt. This will reduce
	  overall system interrupts/msix usage.
	- Improved RX mbuf handling: the old get_buf routine
	  is no longer synchronized with rxeof, this allows
	  the elimination of packet discards due to mbuf
	  allocation failure.
	- Much simplified and improved AIM code, it now
	  happens in the queue interrupt context and takes
	  into account both the traffic on the RX AND TX
	  side.
	- variety of small tweaks, like ring size, that have
	  been seen as performance improvements.
	- Thanks to those that provided feedback or suggested
	  changes, I hope I've caught all of them.
This commit is contained in:
jfv 2010-03-27 00:21:40 +00:00
parent fad010c732
commit 8918ac92eb
12 changed files with 1157 additions and 1093 deletions

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -176,7 +176,7 @@
#define MSIX_82599_BAR 4
#define IXGBE_TSO_SIZE 65535
#define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
#define IXGBE_RX_HDR 256
#define IXGBE_RX_HDR 128
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
#define CSUM_OFFLOAD 7 /* Bits in csum flags */
@ -231,6 +231,7 @@ struct ixgbe_tx_buf {
struct ixgbe_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
struct mbuf *fmp;
bus_dmamap_t map;
};
@ -248,20 +249,34 @@ struct ixgbe_dma_alloc {
};
/*
* The transmit ring, one per tx queue
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
*/
struct ix_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
struct resource *res;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* The transmit ring, one per queue
*/
struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
u32 msix;
bool watchdog_check;
int watchdog_time;
union ixgbe_adv_tx_desc *tx_base;
volatile u32 tx_hwb;
struct ixgbe_dma_alloc txdma;
struct task tx_task;
struct taskqueue *tq;
u32 next_avail_desc;
u32 next_to_clean;
struct ixgbe_tx_buf *tx_buffers;
@ -272,17 +287,14 @@ struct tx_ring {
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
/* Interrupt resources */
void *tag;
struct resource *res;
#ifdef IXGBE_FDIR
u16 atr_sample;
u16 atr_count;
#endif
u32 bytes; /* used for AIM */
u32 packets;
/* Soft Stats */
u32 no_tx_desc_avail;
u32 no_tx_desc_late;
u64 tx_irq;
u64 no_desc_avail;
u64 total_packets;
};
@ -294,35 +306,29 @@ struct rx_ring {
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
u32 msix;
u32 payload;
struct task rx_task;
struct taskqueue *tq;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
bool hw_rsc;
unsigned int last_refreshed;
unsigned int next_to_check;
bool discard;
u32 next_to_refresh;
u32 next_to_check;
char mtx_name[16];
struct ixgbe_rx_buf *rx_buffers;
bus_dma_tag_t rxtag;
bus_dmamap_t spare_map;
char mtx_name[16];
u32 bytes; /* Used for AIM calc */
u32 eitr_setting;
/* Interrupt resources */
void *tag;
struct resource *res;
u32 packets;
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
u64 rx_packets;
u64 rx_bytes;
u64 rx_discarded;
u64 rsc_num;
#ifdef IXGBE_FDIR
u64 flm;
@ -331,94 +337,94 @@ struct rx_ring {
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
struct ixgbe_hw hw;
struct ifnet *ifp;
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
struct device *dev;
struct device *dev;
struct resource *pci_mem;
struct resource *msix_mem;
struct resource *pci_mem;
struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSIX
*/
void *tag;
struct resource *res;
void *tag;
struct resource *res;
struct ifmedia media;
struct callout timer;
int msix;
int if_flags;
struct ifmedia media;
struct callout timer;
int msix;
int if_flags;
struct mtx core_mtx;
struct mtx core_mtx;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u16 num_vlans;
u16 num_queues;
u16 num_vlans;
u16 num_queues;
/* Info about the board itself */
u32 optics;
bool link_active;
u16 max_frame_size;
u32 link_speed;
bool link_up;
u32 linkvec;
u32 optics;
bool link_active;
u16 max_frame_size;
u32 link_speed;
bool link_up;
u32 linkvec;
/* Mbuf cluster size */
u32 rx_mbuf_sz;
u32 rx_mbuf_sz;
/* Support for pluggable optics */
bool sfp_probe;
struct task link_task; /* Link tasklet */
struct task mod_task; /* SFP tasklet */
struct task msf_task; /* Multispeed Fiber tasklet */
bool sfp_probe;
struct task link_task; /* Link tasklet */
struct task mod_task; /* SFP tasklet */
struct task msf_task; /* Multispeed Fiber */
#ifdef IXGBE_FDIR
int fdir_reinit;
struct task fdir_task;
#endif
struct taskqueue *tq;
/*
** Queues:
** This is the irq holder, it has
** and RX/TX pair or rings associated
** with it.
*/
struct ix_queue *queues;
/*
* Transmit rings:
* Allocated at run time, an array of rings.
*/
struct tx_ring *tx_rings;
int num_tx_desc;
struct tx_ring *tx_rings;
int num_tx_desc;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
int num_rx_desc;
u64 rx_mask;
u32 rx_process_limit;
#ifdef IXGBE_IEEE1588
/* IEEE 1588 precision time support */
struct cyclecounter cycles;
struct nettimer clock;
struct nettime_compare compare;
struct hwtstamp_ctrl hwtstamp;
#endif
struct rx_ring *rx_rings;
int num_rx_desc;
u64 que_mask;
u32 rx_process_limit;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
unsigned long link_irq;
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
unsigned long link_irq;
struct ixgbe_hw_stats stats;
struct ixgbe_hw_stats stats;
};
/* Precision Time Sync (IEEE 1588) defines */
@ -452,8 +458,8 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
case ixgbe_phy_tw_tyco:
case ixgbe_phy_tw_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
return TRUE;
default:
return FALSE;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -59,6 +59,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
@ -164,6 +165,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* MAC */
mac->ops.start_hw = &ixgbe_start_hw_82598;
mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
mac->ops.reset_hw = &ixgbe_reset_hw_82598;
mac->ops.get_media_type = &ixgbe_get_media_type_82598;
mac->ops.get_supported_physical_layer =
@ -273,7 +275,8 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function.
* Then set pcie completion timeout
* Disables relaxed ordering Then set pcie completion timeout
*
**/
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
@ -287,17 +290,17 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
/* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@ -439,15 +442,23 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
DEBUGFUNC("ixgbe_fc_enable_82598");
/*
* On 82598 backplane having FC on causes resets while doing
* KX, so turn off here.
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
* more details see 82598 Specification update.
*/
hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
if (link_up &&
link_speed == IXGBE_LINK_SPEED_1GB_FULL &&
hw->mac.ops.get_media_type(hw) == ixgbe_media_type_backplane) {
hw->fc.disable_fc_autoneg = TRUE;
hw->fc.requested_mode = ixgbe_fc_none;
if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
switch (hw->fc.requested_mode) {
case ixgbe_fc_full:
hw->fc.requested_mode = ixgbe_fc_tx_pause;
break;
case ixgbe_fc_rx_pause:
hw->fc.requested_mode = ixgbe_fc_none;
break;
default:
/* no change */
break;
}
}
/* Negotiate the fc mode to use */
@ -842,12 +853,9 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
status = ixgbe_disable_pcie_master(hw);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
DEBUGOUT("PCI-E Master disable polling has failed.\n");
}
ixgbe_disable_pcie_master(hw);
mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@ -868,6 +876,19 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
DEBUGOUT("Reset polling failed to complete.\n");
}
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
* for any pending HW events to complete. We use 1usec since that is
* what is needed for ixgbe_disable_pcie_master(). The second reset
* then clears out any effects of those events.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
usec_delay(1);
goto mac_reset_top;
}
msec_delay(50);
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@ -1299,3 +1320,32 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
/**
* ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
* @hw: pointer to hardware structure
*
**/
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
{
u32 regval;
u32 i;
DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
/* Enable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -64,6 +64,7 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
@ -267,6 +268,8 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_get_link_capabilities_82599");
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not
@ -878,7 +881,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u32 ctrl, ctrl_ext;
u32 ctrl;
u32 i;
u32 autoc;
u32 autoc2;
@ -913,12 +916,9 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
status = ixgbe_disable_pcie_master(hw);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
DEBUGOUT("PCI-E Master disable polling has failed.\n");
}
ixgbe_disable_pcie_master(hw);
mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@ -938,10 +938,19 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
status = IXGBE_ERR_RESET_FAILED;
DEBUGOUT("Reset polling failed to complete.\n");
}
/* Clear PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
* for any pending HW events to complete. We use 1usec since that is
* what is needed for ixgbe_disable_pcie_master(). The second reset
* then clears out any effects of those events.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
usec_delay(1);
goto mac_reset_top;
}
msec_delay(50);
@ -981,8 +990,6 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
@ -1207,6 +1214,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
/* Initialize the drop queue to Rx queue 127 */
fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@ -1886,23 +1896,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
* @input_masks: masks for the input bitstream
* @soft_id: software index for the filters
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
u16 soft_id,
u8 queue)
struct ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
u32 src_ipv4, dst_ipv4;
u32 src_ipv4 = 0, dst_ipv4 = 0;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
u16 src_port, dst_port, vlan_id, flex_bytes;
u16 bucket_hash;
u8 l4type;
u8 fdirm = 0;
DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
@ -1959,7 +1972,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@ -1968,7 +1980,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
(dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
(dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
if (src_ipv4 == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
if (dst_ipv4 == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
case IXGBE_ATR_L4TYPE_TCP:
if (src_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
input_masks->src_port_mask);
if (dst_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
(0xffff << 16)));
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
(input_masks->dst_port_mask << 16)));
break;
case IXGBE_ATR_L4TYPE_UDP:
if (src_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
input_masks->src_port_mask);
if (dst_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
(0xffff << 16)));
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
(input_masks->src_port_mask << 16)));
break;
default:
/* this already would have failed above */
break;
}
/* Program the last mask register, FDIRM */
if (input_masks->vlan_id_mask || !vlan_id)
/* Mask both VLAN and VLANP - bits 0 and 1 */
fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP);
if (input_masks->data_mask || !flex_bytes)
/* Flex bytes need masking, so mask the whole thing - bit 4 */
fdirm |= IXGBE_FDIRM_FLEX;
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6);
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
@ -2063,7 +2146,7 @@ s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@ -2192,10 +2275,14 @@ u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
goto out;
switch (hw->phy.type) {
case ixgbe_phy_tw_tyco:
case ixgbe_phy_tw_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
case ixgbe_phy_sfp_ftl_active:
case ixgbe_phy_sfp_active_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
break;
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
@ -2328,3 +2415,30 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
fw_version_out:
return status;
}
/**
* ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering
* @hw: pointer to hardware structure
*
**/
void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw)
{
u32 regval;
u32 i;
DEBUGFUNC("ixgbe_enable_relaxed_ordering_82599");
/* Enable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -111,6 +111,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
default:
@ -167,6 +168,20 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
* which is disabled by default in ixgbe_start_hw();
*
* @hw: pointer to hardware structure
*
* Enable relaxed ordering;
**/
void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
{
if (hw->mac.ops.enable_relaxed_ordering)
hw->mac.ops.enable_relaxed_ordering(hw);
}
/**
* ixgbe_clear_hw_cntrs - Clear hardware counters
* @hw: pointer to hardware structure

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -43,6 +43,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
s32 ixgbe_start_hw(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
@ -122,6 +123,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
u8 queue);
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *masks,
u16 soft_id,
u8 queue);
u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -474,8 +474,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS)
DEBUGOUT("PCI-E Master disable polling has failed.\n");
ixgbe_disable_pcie_master(hw);
return IXGBE_SUCCESS;
}
@ -2198,10 +2197,14 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
u32 i;
u32 reg_val;
u32 number_of_queues;
s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_disable_pcie_master");
/* Just jump out if bus mastering is already disabled */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
/* Disable the receive unit by stopping each queue */
number_of_queues = hw->mac.max_rx_queues;
for (i = 0; i < number_of_queues; i++) {
@ -2217,13 +2220,42 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
status = IXGBE_SUCCESS;
break;
}
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
usec_delay(100);
}
DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
/*
* The GIO Master Disable bit didn't clear. There are multiple reasons
* for this listed in the datasheet 5.2.5.3.2 Master Disable, and they
* all require a double reset to recover from. Before proceeding, we
* first wait a little more to try to ensure that, at a minimum, the
* PCIe block has no transactions pending.
*/
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
break;
usec_delay(100);
}
if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
/*
* Two consecutive resets are required via CTRL.RST per datasheet
* 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
* of this need. The first reset prevents new master requests from
* being issued by our device. We then must wait 1usec for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
out:
return status;
}
@ -2695,6 +2727,10 @@ s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
u32 first_empty_slot = 0;
s32 regindex;
/* short cut the special case */
if (vlan == 0)
return 0;
/*
* Search for the vlan id in the VLVF entries. Save off the first empty
* slot found along the way
@ -2717,7 +2753,7 @@ s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
regindex = first_empty_slot;
else {
DEBUGOUT("No space in VLVF.\n");
regindex = -1;
regindex = IXGBE_ERR_NO_SPACE;
}
}
@ -2738,8 +2774,11 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
{
s32 regindex;
u32 bitindex;
u32 vfta;
u32 bits;
u32 vt;
u32 targetbit;
bool vfta_changed = FALSE;
DEBUGFUNC("ixgbe_set_vfta_generic");
@ -2749,6 +2788,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/*
* this is a 2 part operation - first the VFTA, then the
* VLVF and VLVFB if VT Mode is set
* We don't write the VFTA until we know the VLVF part succeeded.
*/
/* Part 1
@ -2759,13 +2799,20 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*/
regindex = (vlan >> 5) & 0x7F;
bitindex = vlan & 0x1F;
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on)
bits |= (1 << bitindex);
else
bits &= ~(1 << bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
targetbit = (1 << bitindex);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on) {
if (!(vfta & targetbit)) {
vfta |= targetbit;
vfta_changed = TRUE;
}
} else {
if ((vfta & targetbit)) {
vfta &= ~targetbit;
vfta_changed = TRUE;
}
}
/* Part 2
* If VT Mode is set
@ -2777,61 +2824,84 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*/
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
if (vlan == 0) {
regindex = 0;
} else {
regindex = ixgbe_find_vlvf_slot(hw, vlan);
if (regindex < 0)
goto out;
}
s32 vlvf_index;
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
if (vlvf_index < 0)
return vlvf_index;
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB(regindex*2));
IXGBE_VLVFB(vlvf_index*2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB(regindex*2),
IXGBE_VLVFB(vlvf_index*2),
bits);
} else {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB((regindex*2)+1));
bits |= (1 << vind);
IXGBE_VLVFB((vlvf_index*2)+1));
bits |= (1 << (vind-32));
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB((regindex*2)+1),
IXGBE_VLVFB((vlvf_index*2)+1),
bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB(regindex*2));
IXGBE_VLVFB(vlvf_index*2));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB(regindex*2),
IXGBE_VLVFB(vlvf_index*2),
bits);
bits |= IXGBE_READ_REG(hw,
IXGBE_VLVFB((regindex*2)+1));
IXGBE_VLVFB((vlvf_index*2)+1));
} else {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB((regindex*2)+1));
bits &= ~(1 << vind);
IXGBE_VLVFB((vlvf_index*2)+1));
bits &= ~(1 << (vind-32));
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB((regindex*2)+1),
IXGBE_VLVFB((vlvf_index*2)+1),
bits);
bits |= IXGBE_READ_REG(hw,
IXGBE_VLVFB(regindex*2));
IXGBE_VLVFB(vlvf_index*2));
}
}
if (bits)
IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
/*
* If there are still bits set in the VLVFB registers
* for the VLAN ID indicated we need to see if the
* caller is requesting that we clear the VFTA entry bit.
* If the caller has requested that we clear the VFTA
* entry bit but there are still pools/VFs using this VLAN
* ID entry then ignore the request. We're not worried
* about the case where we're turning the VFTA VLAN ID
* entry bit on, only when requested to turn it off as
* there may be multiple pools and/or VFs using the
* VLAN ID entry. In that case we cannot clear the
* VFTA bit until all pools/VFs using that VLAN ID have also
* been cleared. This will be indicated by "bits" being
* zero.
*/
if (bits) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
if (!vlan_on) {
/* someone wants to clear the vfta entry
* but some pools/VFs are still using it.
* Ignore it. */
vfta_changed = FALSE;
}
}
else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
out:
if (vfta_changed)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
return IXGBE_SUCCESS;
}
@ -2869,14 +2939,23 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
bool *link_up, bool link_up_wait_to_complete)
{
u32 links_reg;
u32 links_reg, links_orig;
u32 i;
DEBUGFUNC("ixgbe_check_mac_link_generic");
/* clear the old state */
links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
if (links_orig != links_reg) {
DEBUGOUT2("LINKS changed from %08X to %08X\n",
links_orig, links_reg);
}
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if (links_reg & IXGBE_LINKS_UP) {

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -77,7 +77,8 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
phy->ops.set_low_power_state = &ixgbe_tn_set_low_power_state;
return IXGBE_SUCCESS;
}
@ -241,13 +242,19 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
IXGBE_MDIO_PHY_XS_DEV_TYPE,
IXGBE_MDIO_PHY_XS_RESET);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 500; i++) {
msec_delay(1);
/*
* Poll for reset bit to self-clear indicating reset is complete.
* Some PHYs could take up to 3 seconds to complete and need about
* 1.7 usec delay after the reset is complete.
*/
for (i = 0; i < 30; i++) {
msec_delay(100);
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET))
if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
usec_delay(2);
break;
}
}
if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
@ -922,6 +929,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
u8 cable_spec = 0;
u16 enforce_sfp = 0;
DEBUGFUNC("ixgbe_identify_sfp_module_generic");
@ -968,6 +976,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 4 SFP_DA_CORE1 - 82599-specific
* 5 SFP_SR/LR_CORE0 - 82599-specific
* 6 SFP_SR/LR_CORE1 - 82599-specific
* 7 SFP_act_lmt_DA_CORE0 - 82599-specific
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@ -979,29 +989,40 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core1;
else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
hw->phy.ops.read_i2c_eeprom(
hw, IXGBE_SFF_CABLE_SPEC_COMP,
&cable_spec);
if (cable_spec &
IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_act_lmt_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_act_lmt_core1;
} else
hw->phy.sfp_type =
ixgbe_sfp_type_unknown;
} else if (comp_codes_10g &
(IXGBE_SFF_10GBASESR_CAPABLE |
IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
else
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
}
if (hw->phy.sfp_type != stored_sfp_type)
@ -1036,10 +1057,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_tyco;
hw->phy.type =
ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
hw->phy.type = ixgbe_phy_sfp_ftl;
if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type = ixgbe_phy_sfp_ftl_active;
else
hw->phy.type = ixgbe_phy_sfp_ftl;
break;
case IXGBE_SFF_VENDOR_OUI_AVAGO:
hw->phy.type = ixgbe_phy_sfp_avago;
@ -1049,15 +1074,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
break;
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_unknown;
hw->phy.type =
ixgbe_phy_sfp_passive_unknown;
else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type =
ixgbe_phy_sfp_active_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
/* All passive DA cables are supported */
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
/* Allow any DA cable vendor */
if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
IXGBE_SFF_DA_ACTIVE_CABLE)) {
status = IXGBE_SUCCESS;
goto out;
}
@ -1108,6 +1138,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *data_offset)
{
u16 sfp_id;
u16 sfp_type = hw->phy.sfp_type;
DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
@ -1121,6 +1152,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
return IXGBE_ERR_SFP_NOT_SUPPORTED;
/* Limiting active cables must be initialized as SR modules */
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
@ -1137,7 +1174,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
while (sfp_id != IXGBE_PHY_INIT_END_NL) {
if (sfp_id == hw->phy.sfp_type) {
if (sfp_id == sfp_type) {
(*list_offset)++;
hw->eeprom.ops.read(hw, *list_offset, data_offset);
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
@ -1722,3 +1759,56 @@ void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
/* Put the i2c bus back to default state */
ixgbe_i2c_stop(hw);
}
/**
* ixgbe_check_overtemp - Checks if an overtemp occured.
* @hw: pointer to hardware structure
*
* Checks if the LASI temp alarm status was triggered due to overtemp
**/
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u16 phy_data = 0;
DEBUGFUNC("ixgbe_tn_check_overtemp");
if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
goto out;
/* Check that the LASI temp alarm status was triggered */
hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
goto out;
status = IXGBE_ERR_OVERTEMP;
out:
return status;
}
/**
* ixgbe_set_tn_low_power_state - Sets the teranetics phy into low power state
* @hw: pointer to hardware structure
*
* Sets the phy into low power mode when LASI temp alarm status is triggered
**/
s32 ixgbe_tn_set_low_power_state(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u16 phy_data = 0;
DEBUGFUNC("ixgbe_set_tn_low_power_state");
/* Set the phy into low power mode */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_CONTROL_ADDR,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
phy_data |= IXGBE_MDIO_PHY_LOW_POWER_MODE;
hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_CONTROL_ADDR,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, phy_data);
return status;
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -47,9 +47,12 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
@ -84,6 +87,9 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
@ -119,6 +125,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_tn_set_low_power_state(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -57,9 +57,11 @@
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
/* General Registers */
#define IXGBE_CTRL 0x00000
@ -89,7 +91,7 @@
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */
#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
#define IXGBE_VPDDIAG0 0x10204
#define IXGBE_VPDDIAG1 0x10208
@ -198,6 +200,7 @@
#define IXGBE_RFCTL 0x05008
#define IXGBE_DRECCCTL 0x02F08
#define IXGBE_DRECCCTL_DISABLE 0
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
@ -334,7 +337,7 @@
/* Wake Up Control */
#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/
#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
/* Wake Up Filter Control */
#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@ -736,6 +739,12 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@ -889,6 +898,8 @@
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@ -1020,7 +1031,9 @@
#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
@ -1369,10 +1382,12 @@
* EAPOL 802.1x (0x888e): Filter 0
* FCoE (0x8906): Filter 2
* 1588 (0x88f7): Filter 3
* FIP (0x8914): Filter 4
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
#define IXGBE_ETQF_FILTER_FIP 4
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@ -1476,6 +1491,7 @@
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@ -1655,6 +1671,8 @@
#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
/* PCI Bus Info */
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
@ -1787,6 +1805,7 @@
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
/* Receive Descriptor bit definitions */
@ -2000,10 +2019,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004
#define IXGBE_FDIRM_L3P 0x00000008
#define IXGBE_FDIRM_L4P 0x00000010
#define IXGBE_FDIRM_FLEX 0x00000020
#define IXGBE_FDIRM_DIPv6 0x00000040
#define IXGBE_FDIRM_L4P 0x00000008
#define IXGBE_FDIRM_FLEX 0x00000010
#define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
@ -2218,6 +2236,8 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@ -2258,6 +2278,15 @@ struct ixgbe_atr_input {
u8 byte_stream[42];
};
struct ixgbe_atr_input_masks {
u32 src_ip_mask;
u32 dst_ip_mask;
u16 src_port_mask;
u16 dst_port_mask;
u16 vlan_id_mask;
u16 data_mask;
};
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@ -2281,10 +2310,12 @@ enum ixgbe_phy_type {
ixgbe_phy_qt,
ixgbe_phy_xaui,
ixgbe_phy_nl,
ixgbe_phy_tw_tyco,
ixgbe_phy_tw_unknown,
ixgbe_phy_sfp_passive_tyco,
ixgbe_phy_sfp_passive_unknown,
ixgbe_phy_sfp_active_unknown,
ixgbe_phy_sfp_avago,
ixgbe_phy_sfp_ftl,
ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
@ -2312,6 +2343,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_cu_core1 = 4,
ixgbe_sfp_type_srlr_core0 = 5,
ixgbe_sfp_type_srlr_core1 = 6,
ixgbe_sfp_type_da_act_lmt_core0 = 7,
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@ -2354,25 +2387,25 @@ enum ixgbe_bus_type {
/* PCI bus speeds */
enum ixgbe_bus_speed {
ixgbe_bus_speed_unknown = 0,
ixgbe_bus_speed_33,
ixgbe_bus_speed_66,
ixgbe_bus_speed_100,
ixgbe_bus_speed_120,
ixgbe_bus_speed_133,
ixgbe_bus_speed_2500,
ixgbe_bus_speed_5000,
ixgbe_bus_speed_33 = 33,
ixgbe_bus_speed_66 = 66,
ixgbe_bus_speed_100 = 100,
ixgbe_bus_speed_120 = 120,
ixgbe_bus_speed_133 = 133,
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_reserved
};
/* PCI bus widths */
enum ixgbe_bus_width {
ixgbe_bus_width_unknown = 0,
ixgbe_bus_width_pcie_x1,
ixgbe_bus_width_pcie_x2,
ixgbe_bus_width_pcie_x1 = 1,
ixgbe_bus_width_pcie_x2 = 2,
ixgbe_bus_width_pcie_x4 = 4,
ixgbe_bus_width_pcie_x8 = 8,
ixgbe_bus_width_32,
ixgbe_bus_width_64,
ixgbe_bus_width_32 = 32,
ixgbe_bus_width_64 = 64,
ixgbe_bus_width_reserved
};
@ -2503,6 +2536,7 @@ struct ixgbe_mac_operations {
s32 (*reset_hw)(struct ixgbe_hw *);
s32 (*start_hw)(struct ixgbe_hw *);
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
void (*enable_relaxed_ordering)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
@ -2570,6 +2604,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
void (*i2c_bus_clear)(struct ixgbe_hw *);
s32 (*check_overtemp)(struct ixgbe_hw *);
s32 (*set_low_power_state)(struct ixgbe_hw *);
};
struct ixgbe_eeprom_info {
@ -2580,6 +2616,7 @@ struct ixgbe_eeprom_info {
u16 address_bits;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
@ -2603,6 +2640,7 @@ struct ixgbe_mac_info {
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
};
struct ixgbe_phy_info {
@ -2668,6 +2706,8 @@ struct ixgbe_hw {
#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
#define IXGBE_ERR_FDIR_REINIT_FAILED -23
#define IXGBE_ERR_EEPROM_VERSION -24
#define IXGBE_ERR_NO_SPACE -25
#define IXGBE_ERR_OVERTEMP -26
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF

View File

@ -6,7 +6,7 @@ SRCS += ixgbe.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c
SRCS += ixgbe_82599.c ixgbe_82598.c
CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP
CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP -DIXGBE_FDIR
clean:
rm -f device_if.h bus_if.h pci_if.h setdef* *_StripErr