Update to the Intel ixgbe driver:

- Split the driver into independent pf and vf loadables. This is
	  in preparation for SRIOV support which will be following shortly.
	  This also allows us to keep a seperate revision control over the
	  two parts, making for easier sustaining.
	- Make the TX/RX code a shared/seperated file, in the old code base
	  the ixv code would miss fixes that went into ixgbe, this model
	  will eliminate that problem.
	- The driver loadables will now match the device names, something that
	  has been requested for some time.
	- Rather than a modules/ixgbe there is now modules/ix and modules/ixv
	- It will also be possible to make your static kernel with only one
	  or the other for streamlined installs, or both.

Enjoy!

Submitted by: jfv and erj
This commit is contained in:
Jack F Vogel 2015-03-17 18:32:28 +00:00
parent d470ab05ff
commit 758cc3dcd5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=280182
35 changed files with 8395 additions and 8134 deletions

View File

@ -216,7 +216,8 @@ device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE
device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel PRO/1000 Gigabit Ethernet Family
device igb # Intel PRO/1000 PCIE Server Gigabit Family
device ixgbe # Intel PRO/10GbE PCIE Ethernet Family
device ix # Intel PRO/10GbE PCIE PF Ethernet
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel XL710 40Gbe PCIE Ethernet
device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device le # AMD Am7900 LANCE and Am79C9xx PCnet

View File

@ -2100,7 +2100,8 @@ device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel Pro/1000 Gigabit Ethernet
device igb # Intel Pro/1000 PCIE Gigabit Ethernet
device ixgb # Intel Pro/10Gbe PCI-X Ethernet
device ixgbe # Intel Pro/10Gbe PCIE Ethernet
device ix # Intel Pro/10Gbe PCIE Ethernet
device ixv # Intel Pro/10Gbe PCIE Ethernet VF
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device mxge # Myricom Myri-10G 10GbE NIC
device nxge # Neterion Xframe 10GbE Server/Storage Adapter

View File

@ -1769,31 +1769,31 @@ iwn6050.fw optional iwn6050fw | iwnfw \
dev/ixgb/if_ixgb.c optional ixgb
dev/ixgb/ixgb_ee.c optional ixgb
dev/ixgb/ixgb_hw.c optional ixgb
dev/ixgbe/ixgbe.c optional ixgbe inet \
dev/ixgbe/if_ix.c optional ix inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/ixv.c optional ixgbe inet \
dev/ixgbe/if_ixv.c optional ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe -DSMP"
dev/ixgbe/ixgbe_phy.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_phy.c optional ixgbe inet \
dev/ixgbe/ixgbe_api.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_api.c optional ixgbe inet \
dev/ixgbe/ixgbe_common.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_common.c optional ixgbe inet \
dev/ixgbe/ixgbe_mbx.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_mbx.c optional ixgbe inet \
dev/ixgbe/ixgbe_vf.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_vf.c optional ixgbe inet \
dev/ixgbe/ixgbe_82598.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_82598.c optional ixgbe inet \
dev/ixgbe/ixgbe_82599.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_82599.c optional ixgbe inet \
dev/ixgbe/ixgbe_x540.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_x540.c optional ixgbe inet \
dev/ixgbe/ixgbe_dcb.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb.c optional ixgbe inet \
dev/ixgbe/ixgbe_dcb_82598.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82598.c optional ixgbe inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82599.c optional ixgbe inet \
dev/ixgbe/ixgbe_dcb_82599.c optional ix ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/jme/if_jme.c optional jme pci
dev/joy/joy.c optional joy

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

File diff suppressed because it is too large Load Diff

2107
sys/dev/ixgbe/if_ixv.c Normal file

File diff suppressed because it is too large Load Diff

2259
sys/dev/ixgbe/ix_txrx.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -92,6 +92,7 @@
#include <machine/smp.h>
#include "ixgbe_api.h"
#include "ixgbe_vf.h"
/* Tunables */
@ -197,6 +198,10 @@
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
#define IXGBE_QUEUE_MIN_FREE 32
#define IXGBE_MAX_TX_BUSY 10
#define IXGBE_QUEUE_HUNG 0x80000000
#define IXV_EITR_DEFAULT 128
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
@ -205,6 +210,15 @@
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
/* Backward compatibility items for very old versions */
#ifndef pci_find_cap
#define pci_find_cap pci_find_extcap
#endif
#ifndef DEVMETHOD_END
#define DEVMETHOD_END { NULL, NULL }
#endif
/*
* Interrupt Moderation parameters
*/
@ -213,7 +227,6 @@
#define IXGBE_BULK_LATENCY 1200
#define IXGBE_LINK_ITR 2000
/*
*****************************************************************************
* vendor_info_array
@ -268,8 +281,10 @@ struct ix_queue {
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
u32 me;
struct resource *res;
void *tag;
int busy;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
@ -284,7 +299,8 @@ struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
int watchdog_time;
u32 tail;
int busy;
union ixgbe_adv_tx_desc *tx_base;
struct ixgbe_tx_buf *tx_buffers;
struct ixgbe_dma_alloc txdma;
@ -293,11 +309,6 @@ struct tx_ring {
u16 next_to_clean;
u16 process_limit;
u16 num_desc;
enum {
IXGBE_QUEUE_IDLE,
IXGBE_QUEUE_WORKING,
IXGBE_QUEUE_HUNG,
} queue_status;
u32 txd_cmd;
bus_dma_tag_t txtag;
char mtx_name[16];
@ -312,6 +323,7 @@ struct tx_ring {
u32 bytes; /* used for AIM */
u32 packets;
/* Soft Stats */
unsigned long tx_bytes;
unsigned long tso_tx;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
@ -327,6 +339,7 @@ struct rx_ring {
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
u32 tail;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
struct lro_ctrl lro;
@ -406,7 +419,7 @@ struct adapter {
u16 num_segs;
u32 link_speed;
bool link_up;
u32 linkvec;
u32 vector;
/* Mbuf cluster size */
u32 rx_mbuf_sz;
@ -442,7 +455,7 @@ struct adapter {
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
u64 que_mask;
u64 active_queues;
u32 num_rx_desc;
/* Multicast array memory */
@ -455,9 +468,24 @@ struct adapter {
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long watchdog_events;
unsigned long link_irq;
struct ixgbe_hw_stats stats;
unsigned long vector_irq;
union {
struct ixgbe_hw_stats pf;
struct ixgbevf_hw_stats vf;
} stats;
#if __FreeBSD_version >= 1100036
/* counter(9) stats */
u64 ipackets;
u64 ierrors;
u64 opackets;
u64 oerrors;
u64 ibytes;
u64 obytes;
u64 imcasts;
u64 omcasts;
u64 iqdrops;
u64 noproto;
#endif
};
@ -488,6 +516,45 @@ struct adapter {
#define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA
#endif
/* Stats macros */
#if __FreeBSD_version >= 1100036
#define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count)
#define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count)
#define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count)
#define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count)
#define IXGBE_SET_COLLISIONS(sc, count)
#define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count)
#define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count)
#define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count)
#define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count)
#define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count)
#else
#define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count)
#define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count)
#define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count)
#define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count)
#define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count)
#define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count)
#define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count)
#define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count)
#define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count)
#define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count)
#endif
/* Sysctl help messages; displayed with sysctl -d */
#define IXGBE_SYSCTL_DESC_ADV_SPEED \
"\nControl advertised link speed using these flags:\n" \
"\t0x1 - advertise 100M\n" \
"\t0x2 - advertise 1G\n" \
"\t0x4 - advertise 10G"
#define IXGBE_SYSCTL_DESC_SET_FC \
"\nSet flow control mode using these values:\n" \
"\t0 - off\n" \
"\t1 - rx pause\n" \
"\t2 - tx pause\n" \
"\t3 - tx and rx pause"
static inline bool
ixgbe_is_sfp(struct ixgbe_hw *hw)
{
@ -498,6 +565,10 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
case ixgbe_phy_qsfp_passive_unknown:
case ixgbe_phy_qsfp_active_unknown:
case ixgbe_phy_qsfp_intel:
case ixgbe_phy_qsfp_unknown:
return TRUE;
default:
return FALSE;
@ -530,4 +601,44 @@ ixgbe_rx_unrefreshed(struct rx_ring *rxr)
rxr->next_to_refresh - 1);
}
/*
** This checks for a zero mac addr, something that will be likely
** unless the Admin on the Host has created one.
*/
static inline bool
ixv_check_ether_addr(u8 *addr)
{
bool status = TRUE;
if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
status = FALSE;
return (status);
}
/* Shared Prototypes */
#ifdef IXGBE_LEGACY_TX
void ixgbe_start(struct ifnet *);
void ixgbe_start_locked(struct tx_ring *, struct ifnet *);
#else /* ! IXGBE_LEGACY_TX */
int ixgbe_mq_start(struct ifnet *, struct mbuf *);
int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixgbe_qflush(struct ifnet *);
void ixgbe_deferred_mq_start(void *, int);
#endif /* IXGBE_LEGACY_TX */
int ixgbe_allocate_queues(struct adapter *);
int ixgbe_allocate_transmit_buffers(struct tx_ring *);
int ixgbe_setup_transmit_structures(struct adapter *);
void ixgbe_free_transmit_structures(struct adapter *);
int ixgbe_allocate_receive_buffers(struct rx_ring *);
int ixgbe_setup_receive_structures(struct adapter *);
void ixgbe_free_receive_structures(struct adapter *);
void ixgbe_txeof(struct tx_ring *);
bool ixgbe_rxeof(struct ix_queue *);
int ixgbe_dma_malloc(struct adapter *,
bus_size_t, struct ixgbe_dma_alloc *, int);
void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
#endif /* _IXGBE_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -38,6 +38,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#define IXGBE_82598_MAX_TX_QUEUES 32
#define IXGBE_82598_MAX_RX_QUEUES 64
#define IXGBE_82598_RAR_ENTRIES 16
#define IXGBE_82598_MC_TBL_SIZE 128
#define IXGBE_82598_VFT_TBL_SIZE 128
#define IXGBE_82598_RX_PB_SIZE 512
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
@ -121,47 +128,48 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
ret_val = ixgbe_init_ops_generic(hw);
/* PHY */
phy->ops.init = &ixgbe_init_phy_ops_82598;
phy->ops.init = ixgbe_init_phy_ops_82598;
/* MAC */
mac->ops.start_hw = &ixgbe_start_hw_82598;
mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
mac->ops.reset_hw = &ixgbe_reset_hw_82598;
mac->ops.get_media_type = &ixgbe_get_media_type_82598;
mac->ops.start_hw = ixgbe_start_hw_82598;
mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598;
mac->ops.reset_hw = ixgbe_reset_hw_82598;
mac->ops.get_media_type = ixgbe_get_media_type_82598;
mac->ops.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_82598;
mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
ixgbe_get_supported_physical_layer_82598;
mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598;
mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598;
mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598;
mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
mac->ops.set_vfta = &ixgbe_set_vfta_82598;
mac->ops.set_vmdq = ixgbe_set_vmdq_82598;
mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598;
mac->ops.set_vfta = ixgbe_set_vfta_82598;
mac->ops.set_vlvf = NULL;
mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
mac->ops.clear_vfta = ixgbe_clear_vfta_82598;
/* Flow Control */
mac->ops.fc_enable = &ixgbe_fc_enable_82598;
mac->ops.fc_enable = ixgbe_fc_enable_82598;
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 16;
mac->rx_pb_size = 512;
mac->max_tx_queues = 32;
mac->max_rx_queues = 64;
mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/* SFP+ Module */
phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598;
phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598;
/* Link */
mac->ops.check_link = &ixgbe_check_mac_link_82598;
mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
mac->ops.check_link = ixgbe_check_mac_link_82598;
mac->ops.setup_link = ixgbe_setup_mac_link_82598;
mac->ops.flap_tx_laser = NULL;
mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598;
mac->ops.setup_rxpba = ixgbe_set_rxpba_82598;
/* Manageability interface */
mac->ops.set_fw_drv_ver = NULL;
@ -194,20 +202,20 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Overwrite the link function pointers if copper PHY */
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
mac->ops.setup_link = ixgbe_setup_copper_link_82598;
mac->ops.get_link_capabilities =
&ixgbe_get_copper_link_capabilities_generic;
ixgbe_get_copper_link_capabilities_generic;
}
switch (hw->phy.type) {
case ixgbe_phy_tn:
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.setup_link = ixgbe_setup_phy_link_tnx;
phy->ops.check_link = ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
&ixgbe_get_phy_firmware_version_tnx;
ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
phy->ops.reset = ixgbe_reset_phy_nl;
/* Call SFP+ identify routine to get the SFP+ module type */
ret_val = phy->ops.identify_sfp(hw);
@ -1409,6 +1417,20 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
return;
}
/**
* ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit
* @hw: pointer to hardware structure
* @regval: register value to write to RXCTRL
*
* Enables the Rx DMA unit
**/
s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval)
{
DEBUGFUNC("ixgbe_enable_rx_dma_82598");
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
return IXGBE_SUCCESS;
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -49,4 +49,5 @@ u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval);
#endif /* _IXGBE_82598_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -41,9 +41,8 @@ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
@ -61,4 +60,6 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
#endif /* _IXGBE_82599_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -78,13 +78,23 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_82599EB:
status = ixgbe_init_ops_82599(hw);
break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
status = ixgbe_init_ops_vf(hw);
break;
case ixgbe_mac_X540:
status = ixgbe_init_ops_X540(hw);
break;
case ixgbe_mac_X550:
status = ixgbe_init_ops_X550(hw);
break;
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
status = ixgbe_init_ops_X550EM(hw);
break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
status = ixgbe_init_ops_vf(hw);
break;
default:
status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
break;
@ -138,6 +148,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599_QSFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_BYPASS:
@ -153,9 +164,35 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_X540_vf;
break;
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X540_BYPASS:
hw->mac.type = ixgbe_mac_X540;
break;
case IXGBE_DEV_ID_X550T:
hw->mac.type = ixgbe_mac_X550;
break;
case IXGBE_DEV_ID_X550EM_X_KX4:
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_SFP:
hw->mac.type = ixgbe_mac_X550EM_x;
break;
case IXGBE_DEV_ID_X550EM_A_KR:
hw->mac.type = ixgbe_mac_X550EM_a;
break;
case IXGBE_DEV_ID_X550_VF:
case IXGBE_DEV_ID_X550_VF_HV:
hw->mac.type = ixgbe_mac_X550_vf;
break;
case IXGBE_DEV_ID_X550EM_X_VF:
case IXGBE_DEV_ID_X550EM_X_VF_HV:
hw->mac.type = ixgbe_mac_X550EM_x_vf;
break;
case IXGBE_DEV_ID_X550EM_A_VF:
case IXGBE_DEV_ID_X550EM_A_VF_HV:
hw->mac.type = ixgbe_mac_X550EM_a_vf;
break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
@ -511,6 +548,20 @@ s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_setup_internal_phy - Configure integrated PHY
* @hw: pointer to hardware structure
*
* Reconfigure the integrated PHY in order to enable talk to the external PHY.
* Returns success if not implemented, since nothing needs to be done in this
* case.
*/
s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw),
IXGBE_SUCCESS);
}
/**
* ixgbe_check_phy_link - Determine link and speed status
* @hw: pointer to hardware structure
@ -540,6 +591,17 @@ s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_set_phy_power - Control the phy power state
* @hw: pointer to hardware structure
* @on: TRUE for on, FALSE for off
*/
s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on)
{
return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_check_link - Get link and speed status
* @hw: pointer to hardware structure
@ -608,6 +670,22 @@ s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_setup_mac_link - Set link speed
* @hw: pointer to hardware structure
* @speed: new link speed
*
* Configures link settings. Restarts the link.
* Performs autonegotiation if needed.
**/
s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed,
autoneg_wait_to_complete),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_get_link_capabilities - Returns link capabilities
* @hw: pointer to hardware structure
@ -1001,6 +1079,18 @@ s32 ixgbe_fc_enable(struct ixgbe_hw *hw)
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_setup_fc - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_set_fw_drv_ver - Try to send the driver version number FW
* @hw: pointer to hardware structure
@ -1018,6 +1108,177 @@ s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
/**
* ixgbe_dmac_config - Configure DMA Coalescing registers.
* @hw: pointer to hardware structure
*
* Configure DMA coalescing. If enabling dmac, dmac is activated.
* When disabling dmac, dmac enable dmac bit is cleared.
**/
s32 ixgbe_dmac_config(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_dmac_update_tcs - Configure DMA Coalescing registers.
* @hw: pointer to hardware structure
*
* Disables dmac, updates per TC settings, and then enable dmac.
**/
s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_dmac_config_tcs - Configure DMA Coalescing registers.
* @hw: pointer to hardware structure
*
* Configure DMA coalescing threshold per TC and set high priority bit for
* FCOE TC. The dmac enable bit must be cleared before configuring.
**/
s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_setup_eee - Enable/disable EEE support
* @hw: pointer to the HW structure
* @enable_eee: boolean flag to enable EEE
*
* Enable/disable EEE based on enable_ee flag.
* Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
* are modified.
*
**/
s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee)
{
return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_set_source_address_pruning - Enable/Disable source address pruning
* @hw: pointer to hardware structure
* @enbale: enable or disable source address pruning
* @pool: Rx pool - Rx pool to toggle source address pruning
**/
void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
unsigned int pool)
{
if (hw->mac.ops.set_source_address_pruning)
hw->mac.ops.set_source_address_pruning(hw, enable, pool);
}
/**
* ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
*
**/
void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
if (hw->mac.ops.set_ethertype_anti_spoofing)
hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf);
}
/**
* ixgbe_read_iosf_sb_reg - Read 32 bit PHY register
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
* @device_type: type of device you want to communicate with
* @phy_data: Pointer to read data from PHY register
*
* Reads a value from a specified PHY register
**/
s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *phy_data)
{
return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr,
device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: type of device you want to communicate with
* @phy_data: Data to write to the PHY register
*
* Writes a value to specified PHY register
**/
s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 phy_data)
{
return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr,
device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_disable_mdd - Disable malicious driver detection
* @hw: pointer to hardware structure
*
**/
void ixgbe_disable_mdd(struct ixgbe_hw *hw)
{
if (hw->mac.ops.disable_mdd)
hw->mac.ops.disable_mdd(hw);
}
/**
* ixgbe_enable_mdd - Enable malicious driver detection
* @hw: pointer to hardware structure
*
**/
void ixgbe_enable_mdd(struct ixgbe_hw *hw)
{
if (hw->mac.ops.enable_mdd)
hw->mac.ops.enable_mdd(hw);
}
/**
* ixgbe_mdd_event - Handle malicious driver detection event
* @hw: pointer to hardware structure
* @vf_bitmap: vf bitmap of malicious vfs
*
**/
void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap)
{
if (hw->mac.ops.mdd_event)
hw->mac.ops.mdd_event(hw, vf_bitmap);
}
/**
* ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver
* detection event
* @hw: pointer to hardware structure
* @vf: vf index
*
**/
void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf)
{
if (hw->mac.ops.restore_mdd_vf)
hw->mac.ops.restore_mdd_vf(hw, vf);
}
/**
* ixgbe_enter_lplu - Transition to low power states
* @hw: pointer to hardware structure
*
* Configures Low Power Link Up on transition to low power states
* (from D0 to non-D0).
**/
s32 ixgbe_enter_lplu(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw),
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_read_analog_reg8 - Reads 8 bit analog register
@ -1064,6 +1325,7 @@ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
* ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
* @hw: pointer to hardware structure
* @byte_offset: byte offset to read
* @dev_addr: I2C bus address to read from
* @data: value read
*
* Performs byte read operation to SFP module's EEPROM over I2C interface.
@ -1075,10 +1337,26 @@ s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
dev_addr, data), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_read_i2c_combined - Perform I2C read combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to read from
* @reg: I2C device register to read from
* @val: pointer to location to receive read value
*
* Returns an error code on error.
*/
s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val)
{
return ixgbe_call_func(hw, hw->phy.ops.read_i2c_combined, (hw, addr,
reg, val), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_write_i2c_byte - Writes 8 bit word over I2C
* @hw: pointer to hardware structure
* @byte_offset: byte offset to write
* @dev_addr: I2C bus address to write to
* @data: value to write
*
* Performs byte write operation to SFP module's EEPROM over I2C interface
@ -1091,6 +1369,21 @@ s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
dev_addr, data), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_write_i2c_combined - Perform I2C write combined operation
* @hw: pointer to the hardware structure
* @addr: I2C bus address to write to
* @reg: I2C device register to write to
* @val: value to write
*
* Returns an error code on error.
*/
s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val)
{
return ixgbe_call_func(hw, hw->phy.ops.write_i2c_combined, (hw, addr,
reg, val), IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
@ -1179,7 +1472,7 @@ s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
{
return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
(hw, mask), IXGBE_NOT_IMPLEMENTED);
@ -1193,9 +1486,34 @@ s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through SW_FW_SYNC register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask)
{
if (hw->mac.ops.release_swfw_sync)
hw->mac.ops.release_swfw_sync(hw, mask);
}
void ixgbe_disable_rx(struct ixgbe_hw *hw)
{
if (hw->mac.ops.disable_rx)
hw->mac.ops.disable_rx(hw);
}
void ixgbe_enable_rx(struct ixgbe_hw *hw)
{
if (hw->mac.ops.enable_rx)
hw->mac.ops.enable_rx(hw);
}
/**
* ixgbe_set_rate_select_speed - Set module link speed
* @hw: pointer to hardware structure
* @speed: link speed to set
*
* Set module link speed via the rate select.
*/
void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed)
{
if (hw->mac.ops.set_rate_select_speed)
hw->mac.ops.set_rate_select_speed(hw, speed);
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -44,6 +44,8 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
@ -69,17 +71,21 @@ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 phy_data);
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw);
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on);
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@ -123,6 +129,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
u8 ver);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
@ -139,16 +146,17 @@ s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
bool cloud_mode);
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
union ixgbe_atr_input *input_mask, bool cloud_mode);
s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
u16 soft_id, u8 queue, bool cloud_mode);
s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
@ -156,7 +164,8 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
union ixgbe_atr_input *mask,
u16 soft_id,
u8 queue);
u8 queue,
bool cloud_mode);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
@ -164,16 +173,38 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
s32 ixgbe_read_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
s32 ixgbe_write_i2c_combined(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val);
s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
s32 ixgbe_dmac_config(struct ixgbe_hw *hw);
s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw);
s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw);
s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee);
void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable,
unsigned int vf);
void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable,
int vf);
s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *phy_data);
s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 phy_data);
void ixgbe_disable_mdd(struct ixgbe_hw *hw);
void ixgbe_enable_mdd(struct ixgbe_hw *hw);
void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap);
void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf);
s32 ixgbe_enter_lplu(struct ixgbe_hw *hw);
void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed);
void ixgbe_disable_rx(struct ixgbe_hw *hw);
void ixgbe_enable_rx(struct ixgbe_hw *hw);
#endif /* _IXGBE_API_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -41,6 +41,7 @@
IXGBE_WRITE_REG(hw, reg, (u32) value); \
IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
} while (0)
#define IXGBE_REMOVED(a) (0)
#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW)
struct ixgbe_pba {
u16 word[2];
@ -89,7 +90,7 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
@ -113,12 +114,16 @@ s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
@ -155,10 +160,20 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length);
u32 length, u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed);
#endif /* IXGBE_COMMON */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -394,6 +394,9 @@ s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
break;
@ -422,6 +425,9 @@ s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
break;
@ -461,6 +467,9 @@ s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
tsa, map);
@ -500,6 +509,9 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
bwgid, tsa);
@ -541,6 +553,9 @@ s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
bwgid, tsa,
@ -576,6 +591,9 @@ s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
break;
@ -602,6 +620,9 @@ s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
break;
@ -647,6 +668,9 @@ s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ixgbe_dcb_config_82599(hw, dcb_config);
ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
@ -679,6 +703,9 @@ s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
break;
@ -702,6 +729,9 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
tsa, map);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -35,7 +35,6 @@
#ifndef _IXGBE_DCB_H_
#define _IXGBE_DCB_H_
#include "ixgbe_type.h"
/* DCB defines */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -347,6 +347,8 @@ s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *tsa)
{
UNREFERENCED_1PARAMETER(link_speed);
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
tsa);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -299,7 +299,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
*/
reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
if (hw->mac.type == ixgbe_mac_X540)
if (hw->mac.type >= ixgbe_mac_X540)
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
if (pfc_en)
@ -329,7 +329,14 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
/*
* In order to prevent Tx hangs when the internal Tx
* switch is enabled we must set the high water mark
* to the Rx packet buffer size - 24KB. This allows
* the Tx switch to function even under heavy Rx
* workloads.
*/
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
}
@ -573,6 +580,7 @@ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
u8 *map)
{
UNREFERENCED_1PARAMETER(link_speed);
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
map);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -77,10 +77,11 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
DEBUGFUNC("ixgbe_write_mbx");
if (size > mbx->size)
if (size > mbx->size) {
ret_val = IXGBE_ERR_MBX;
else if (mbx->ops.write)
ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
"Invalid mailbox message size %d", size);
} else if (mbx->ops.write)
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
return ret_val;
@ -170,6 +171,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
usec_delay(mbx->usec_delay);
}
if (countdown == 0)
ERROR_REPORT2(IXGBE_ERROR_POLLING,
"Polling for VF%d mailbox message timedout", mbx_id);
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
@ -198,6 +203,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
usec_delay(mbx->usec_delay);
}
if (countdown == 0)
ERROR_REPORT2(IXGBE_ERROR_POLLING,
"Polling for VF%d mailbox ack timedout", mbx_id);
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
@ -596,6 +605,9 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
@ -633,6 +645,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
ret_val = IXGBE_SUCCESS;
else
ERROR_REPORT2(IXGBE_ERROR_POLLING,
"Failed to obtain mailbox lock for VF%d", vf_number);
return ret_val;
}
@ -727,6 +743,9 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X550EM_a &&
hw->mac.type != ixgbe_mac_X540)
return;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -54,6 +54,15 @@
#define IXGBE_SFF_SFF_8472_COMP 0x5E
#define IXGBE_SFF_SFF_8472_OSCB 0x6E
#define IXGBE_SFF_SFF_8472_ESCB 0x76
#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
#define IXGBE_SFF_QSFP_CONNECTOR 0x82
#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@ -67,6 +76,11 @@
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@ -74,6 +88,11 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
#define IXGBE_CS4227_SPARE24_LSB 0x12B0 /* Reg to program EDC */
#define IXGBE_CS4227_EDC_MODE_CX1 0x0002
#define IXGBE_CS4227_EDC_MODE_SR 0x0004
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
@ -101,16 +120,15 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
#ifndef IXGBE_SFP_DETECT_RETRIES
#define IXGBE_SFP_DETECT_RETRIES 10
#endif /* IXGBE_SFP_DETECT_RETRIES */
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
#define IXGBE_SFF_SFF_8472_REV_9_3 0x01
#define IXGBE_SFF_SFF_8472_REV_9_5 0x02
#define IXGBE_SFF_SFF_8472_REV_10_2 0x03
#define IXGBE_SFF_SFF_8472_REV_10_4 0x04
#define IXGBE_SFF_SFF_8472_REV_11_0 0x05
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
@ -133,6 +151,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@ -145,8 +164,11 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -89,6 +89,49 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
/* ixgbe_virt_clr_reg - Set register to default (power on) state.
* @hw: pointer to hardware structure
*/
static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
{
int i;
u32 vfsrrctl;
u32 vfdca_rxctrl;
u32 vfdca_txctrl;
/* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
/* DCA_RXCTRL default value */
vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
IXGBE_DCA_RXCTRL_DATA_WRO_EN |
IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
/* DCA_TXCTRL default value */
vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
IXGBE_DCA_TXCTRL_DESC_WRO_EN |
IXGBE_DCA_TXCTRL_DATA_RRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
for (i = 0; i < 7; i++) {
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
}
IXGBE_WRITE_FLUSH(hw);
}
/**
* ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@ -134,7 +177,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
u32 ctrl, msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
DEBUGFUNC("ixgbevf_reset_hw_vf");
@ -145,8 +188,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
DEBUGOUT("Issuing a function level reset to MAC\n");
ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, ctrl);
IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
msec_delay(50);
@ -160,6 +202,9 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
if (!timeout)
return IXGBE_ERR_RESET_FAILED;
/* Reset VF registers to initial values */
ixgbe_virt_clr_reg(hw);
/* mailbox timeout can now become active */
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
@ -224,6 +269,8 @@ s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
reg_val &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
}
/* Clear packet split and pool config */
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
/* flush all queues disables */
IXGBE_WRITE_FLUSH(hw);
@ -512,6 +559,21 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
if (mac->type == ixgbe_mac_82599_vf) {
int i;
for (i = 0; i < 5; i++) {
usec_delay(100);
links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
if (!(links_reg & IXGBE_LINKS_UP))
goto out;
}
}
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -84,6 +84,9 @@
#define IXGBE_VFGOTC_LSB 0x02020
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
#define IXGBE_VFMRQC 0x3000
#define IXGBE_VFRSSRK(x) (0x3100 + ((x) * 4))
#define IXGBE_VFRETA(x) (0x3200 + ((x) * 4))
struct ixgbevf_hw_stats {

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -38,6 +38,13 @@
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
#define IXGBE_X540_MAX_TX_QUEUES 128
#define IXGBE_X540_MAX_RX_QUEUES 128
#define IXGBE_X540_RAR_ENTRIES 128
#define IXGBE_X540_MC_TBL_SIZE 128
#define IXGBE_X540_VFT_TBL_SIZE 128
#define IXGBE_X540_RX_PB_SIZE 384
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
@ -63,65 +70,67 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
/* EEPROM */
eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
eeprom->ops.read = &ixgbe_read_eerd_X540;
eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
eeprom->ops.write = &ixgbe_write_eewr_X540;
eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
eeprom->ops.init_params = ixgbe_init_eeprom_params_X540;
eeprom->ops.read = ixgbe_read_eerd_X540;
eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540;
eeprom->ops.write = ixgbe_write_eewr_X540;
eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540;
eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540;
eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540;
eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540;
/* PHY */
phy->ops.init = &ixgbe_init_phy_ops_generic;
phy->ops.init = ixgbe_init_phy_ops_generic;
phy->ops.reset = NULL;
if (!ixgbe_mng_present(hw))
phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
/* MAC */
mac->ops.reset_hw = &ixgbe_reset_hw_X540;
mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
mac->ops.get_media_type = &ixgbe_get_media_type_X540;
mac->ops.reset_hw = ixgbe_reset_hw_X540;
mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2;
mac->ops.get_media_type = ixgbe_get_media_type_X540;
mac->ops.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_X540;
ixgbe_get_supported_physical_layer_X540;
mac->ops.read_analog_reg8 = NULL;
mac->ops.write_analog_reg8 = NULL;
mac->ops.start_hw = &ixgbe_start_hw_X540;
mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
mac->ops.start_hw = ixgbe_start_hw_X540;
mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic;
mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic;
mac->ops.get_device_caps = ixgbe_get_device_caps_generic;
mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic;
mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540;
mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
mac->ops.set_vmdq = ixgbe_set_vmdq_generic;
mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic;
mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic;
mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
mac->ops.set_vfta = ixgbe_set_vfta_generic;
mac->ops.set_vlvf = ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic;
mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing;
mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing;
/* Link */
mac->ops.get_link_capabilities =
&ixgbe_get_copper_link_capabilities_generic;
mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
mac->ops.check_link = &ixgbe_check_mac_link_generic;
ixgbe_get_copper_link_capabilities_generic;
mac->ops.setup_link = ixgbe_setup_mac_link_X540;
mac->ops.setup_rxpba = ixgbe_set_rxpba_generic;
mac->ops.check_link = ixgbe_check_mac_link_generic;
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 128;
mac->rx_pb_size = 384;
mac->max_tx_queues = 128;
mac->max_rx_queues = 128;
mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/*
@ -139,9 +148,9 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
/* Manageability interface */
mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic;
mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic;
return ret_val;
}
@ -469,18 +478,20 @@ s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
* be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
*
* @hw: pointer to hardware structure
*
* Returns a negative error code on error, or the 16-bit checksum
**/
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
u16 i, j;
u16 checksum = 0;
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
/*
* Do not use hw->eeprom.ops.read because we do not want to take
/* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores here. Instead use
* ixgbe_read_eerd_generic
*/
@ -488,25 +499,25 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word) != IXGBE_SUCCESS) {
for (i = 0; i <= checksum_last_word; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
DEBUGOUT("EEPROM read failed\n");
break;
return IXGBE_ERR_EEPROM;
}
checksum += word;
if (i != IXGBE_EEPROM_CHECKSUM)
checksum += word;
}
/*
* Include all data from pointers 0x3, 0x6-0xE. This excludes the
/* Include all data from pointers 0x3, 0x6-0xE. This excludes the
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
*/
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
if (ixgbe_read_eerd_generic(hw, i, &pointer) != IXGBE_SUCCESS) {
if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
DEBUGOUT("EEPROM read failed\n");
break;
return IXGBE_ERR_EEPROM;
}
/* Skip pointer section if the pointer is invalid. */
@ -514,10 +525,9 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
pointer >= hw->eeprom.word_size)
continue;
if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
IXGBE_SUCCESS) {
if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
DEBUGOUT("EEPROM read failed\n");
break;
return IXGBE_ERR_EEPROM;
}
/* Skip pointer section if length is invalid. */
@ -525,11 +535,10 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
(pointer + length) >= hw->eeprom.word_size)
continue;
for (j = pointer+1; j <= pointer+length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word) !=
IXGBE_SUCCESS) {
for (j = pointer + 1; j <= pointer + length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word)) {
DEBUGOUT("EEPROM read failed\n");
break;
return IXGBE_ERR_EEPROM;
}
checksum += word;
}
@ -537,7 +546,7 @@ u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
return checksum;
return (s32)checksum;
}
/**
@ -557,48 +566,49 @@ s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
/*
* Read the first word from the EEPROM. If this times out or fails, do
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status != IXGBE_SUCCESS) {
if (status) {
DEBUGOUT("EEPROM read failed\n");
return status;
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
goto out;
checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores twice here.
*/
status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
&read_checksum);
if (status)
goto out;
/* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum) {
ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
"Invalid EEPROM checksum");
status = IXGBE_ERR_EEPROM_CHECKSUM;
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS) {
checksum = hw->eeprom.ops.calc_checksum(hw);
/*
* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores twice here.
*/
ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
&read_checksum);
/*
* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum) {
status = IXGBE_ERR_EEPROM_CHECKSUM;
ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
"Invalid EEPROM checksum");
}
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
} else {
status = IXGBE_ERR_SWFW_SYNC;
}
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
out:
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@ -617,34 +627,37 @@ s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
/*
* Read the first word from the EEPROM. If this times out or fails, do
/* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status != IXGBE_SUCCESS)
if (status) {
DEBUGOUT("EEPROM read failed\n");
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS) {
checksum = hw->eeprom.ops.calc_checksum(hw);
/*
* Do not use hw->eeprom.ops.write because we do not want to
* take the synchronization semaphores twice here.
*/
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
checksum);
if (status == IXGBE_SUCCESS)
status = ixgbe_update_flash_X540(hw);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
} else {
status = IXGBE_ERR_SWFW_SYNC;
return status;
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
return IXGBE_ERR_SWFW_SYNC;
status = hw->eeprom.ops.calc_checksum(hw);
if (status < 0)
goto out;
checksum = (u16)(status & 0xffff);
/* Do not use hw->eeprom.ops.write because we do not want to
* take the synchronization semaphores twice here.
*/
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
if (status)
goto out;
status = ixgbe_update_flash_X540(hw);
out:
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@ -658,7 +671,7 @@ s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
u32 flup;
s32 status = IXGBE_ERR_EEPROM;
s32 status;
DEBUGFUNC("ixgbe_update_flash_X540");
@ -716,7 +729,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
break;
}
usec_delay(5);
msec_delay(5);
}
if (i == IXGBE_FLUDONE_ATTEMPTS)
@ -734,58 +747,55 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 5;
u32 hwmask = 0;
u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
u32 fwmask = swmask << 5;
u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
u32 timeout = 200;
u32 hwmask = 0;
u32 swfw_sync;
u32 i;
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
if (swmask == IXGBE_GSSR_EEP_SM)
hwmask = IXGBE_GSSR_FLASH_SM;
if (swmask & IXGBE_GSSR_EEP_SM)
hwmask |= IXGBE_GSSR_FLASH_SM;
/* SW only mask doesn't have FW bit pair */
if (swmask == IXGBE_GSSR_SW_MNG_SM)
fwmask = 0;
if (mask & IXGBE_GSSR_SW_MNG_SM)
swmask |= IXGBE_GSSR_SW_MNG_SM;
swmask |= swi2c_mask;
fwmask |= swi2c_mask << 2;
for (i = 0; i < timeout; i++) {
/*
* SW NVM semaphore bit is used for access to all
/* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw)) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
goto out;
} else {
/*
* Firmware currently using resource (fwmask), hardware
* currently using resource (hwmask), or other software
* thread currently using resource (swmask)
*/
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
return IXGBE_SUCCESS;
}
/* Firmware currently using resource (fwmask), hardware
* currently using resource (hwmask), or other software
* thread currently using resource (swmask)
*/
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
}
/* Failed to get SW only semaphore */
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
ret_val = IXGBE_ERR_SWFW_SYNC;
ERROR_REPORT1(IXGBE_ERROR_POLLING,
"Failed to get SW only semaphore");
goto out;
return IXGBE_ERR_SWFW_SYNC;
}
/* If the resource is not released by the FW/HW the SW can assume that
@ -793,32 +803,34 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* of the requested resource(s) while ignoring the corresponding FW/HW
* bits in the SW_FW_SYNC register.
*/
if (ixgbe_get_swfw_sync_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (swfw_sync & (fwmask | hwmask)) {
if (ixgbe_get_swfw_sync_semaphore(hw)) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
return IXGBE_SUCCESS;
}
/* If the resource is not released by other SW the SW can assume that
* the other SW malfunctions. In that case the SW should clear all SW
* flags that it does not own and then repeat the whole process once
* again.
*/
else if (swfw_sync & swmask) {
ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM |
IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM |
IXGBE_GSSR_MAC_CSR_SM);
ret_val = IXGBE_ERR_SWFW_SYNC;
}
if (swfw_sync & swmask) {
u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
out:
return ret_val;
if (swi2c_mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
return IXGBE_ERR_SWFW_SYNC;
}
ixgbe_release_swfw_sync_semaphore(hw);
return IXGBE_ERR_SWFW_SYNC;
}
/**
@ -829,13 +841,15 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
{
u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
u32 swfw_sync;
u32 swmask = mask;
DEBUGFUNC("ixgbe_release_swfw_sync_X540");
if (mask & IXGBE_GSSR_I2C_MASK)
swmask |= mask & IXGBE_GSSR_I2C_MASK;
ixgbe_get_swfw_sync_semaphore(hw);
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
@ -843,10 +857,11 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
}
/**
* ixgbe_get_nvm_semaphore - Get hardware semaphore
* ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
* @hw: pointer to hardware structure
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
@ -904,7 +919,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
* ixgbe_release_nvm_semaphore - Release hardware semaphore
* ixgbe_release_swfw_sync_semaphore - Release hardware semaphore
* @hw: pointer to hardware structure
*
* This function clears hardware semaphore bits.

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2013, Intel Corporation
Copyright (c) 2001-2014, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -55,11 +55,11 @@ s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
u16 *data);
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);

File diff suppressed because it is too large Load Diff

View File

@ -1,438 +0,0 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXV_H_
#define _IXV_H_
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/tcp_lro.h>
#include <netinet/udp.h>
#include <machine/in_cksum.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/clock.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/taskqueue.h>
#include <sys/pcpu.h>
#include <sys/smp.h>
#include <machine/smp.h>
#include "ixgbe_api.h"
#include "ixgbe_vf.h"
/* Tunables */
/*
* TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
* number of transmit descriptors allocated by the driver. Increasing this
* value allows the driver to queue more transmits. Each descriptor is 16
* bytes. Performance tests have show the 2K value to be optimal for top
* performance.
*/
#define DEFAULT_TXD 1024
#define PERFORM_TXD 2048
#define MAX_TXD 4096
#define MIN_TXD 64
/*
* RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
* number of receive descriptors allocated for each RX queue. Increasing this
* value allows the driver to buffer more incoming packets. Each descriptor
* is 16 bytes. A receive buffer is also allocated for each descriptor.
*
* Note: with 8 rings and a dual port card, it is possible to bump up
* against the system mbuf pool limit, you can tune nmbclusters
* to adjust for this.
*/
#define DEFAULT_RXD 1024
#define PERFORM_RXD 2048
#define MAX_RXD 4096
#define MIN_RXD 64
/* Alignment for rings */
#define DBA_ALIGN 128
/*
* This parameter controls the maximum no of times the driver will loop in
* the isr. Minimum Value = 1
*/
#define MAX_LOOP 10
/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*/
#define IXV_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
* transmit descriptors.
*/
#define IXV_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8)
#define IXV_TX_OP_THRESHOLD (adapter->num_tx_desc / 32)
#define IXV_MAX_FRAME_SIZE 0x3F00
/* Flow control constants */
#define IXV_FC_PAUSE 0xFFFF
#define IXV_FC_HI 0x20000
#define IXV_FC_LO 0x10000
/* Defines for printing debug information */
#define DEBUG_INIT 0
#define DEBUG_IOCTL 0
#define DEBUG_HW 0
#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n")
#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A)
#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B)
#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n")
#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A)
#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B)
#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n")
#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A)
#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B)
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define IXV_EITR_DEFAULT 128
#define IXV_SCATTER 32
#define IXV_RX_HDR 128
#define MSIX_BAR 3
#define IXV_TSO_SIZE 65535
#define IXV_BR_SIZE 4096
#define IXV_LINK_ITR 2000
#define TX_BUFFER_SIZE ((u32) 1514)
#define VFTA_SIZE 128
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
#else
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
/*
*****************************************************************************
* vendor_info_array
*
* This array contains the list of Subvendor/Subdevice IDs on which the driver
* should load.
*
*****************************************************************************
*/
typedef struct _ixv_vendor_info_t {
unsigned int vendor_id;
unsigned int device_id;
unsigned int subvendor_id;
unsigned int subdevice_id;
unsigned int index;
} ixv_vendor_info_t;
struct ixv_tx_buf {
u32 eop_index;
struct mbuf *m_head;
bus_dmamap_t map;
};
struct ixv_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
struct mbuf *fmp;
bus_dmamap_t hmap;
bus_dmamap_t pmap;
};
/*
* Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free.
*/
struct ixv_dma_alloc {
bus_addr_t dma_paddr;
caddr_t dma_vaddr;
bus_dma_tag_t dma_tag;
bus_dmamap_t dma_map;
bus_dma_segment_t dma_seg;
bus_size_t dma_size;
int dma_nseg;
};
/*
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
*/
struct ix_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
u32 eitr; /* cached reg */
struct resource *res;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* The transmit ring, one per queue
*/
struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
bool watchdog_check;
int watchdog_time;
union ixgbe_adv_tx_desc *tx_base;
struct ixv_dma_alloc txdma;
u32 next_avail_desc;
u32 next_to_clean;
struct ixv_tx_buf *tx_buffers;
volatile u16 tx_avail;
u32 txd_cmd;
bus_dma_tag_t txtag;
char mtx_name[16];
struct buf_ring *br;
/* Soft Stats */
u32 bytes;
u32 packets;
u64 no_desc_avail;
u64 total_packets;
};
/*
* The Receive ring, one per rx queue
*/
struct rx_ring {
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
union ixgbe_adv_rx_desc *rx_base;
struct ixv_dma_alloc rxdma;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
bool discard;
u32 next_to_refresh;
u32 next_to_check;
char mtx_name[16];
struct ixv_rx_buf *rx_buffers;
bus_dma_tag_t htag;
bus_dma_tag_t ptag;
u32 bytes; /* Used for AIM calc */
u32 packets;
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
u64 rx_packets;
u64 rx_bytes;
u64 rx_discarded;
};
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
struct device *dev;
struct resource *pci_mem;
struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSIX
*/
void *tag;
struct resource *res;
struct ifmedia media;
struct callout timer;
int msix;
int if_flags;
struct mtx core_mtx;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u16 num_vlans;
u16 num_queues;
/* Info about the board itself */
bool link_active;
u16 max_frame_size;
u32 link_speed;
bool link_up;
u32 mbxvec;
/* Mbuf cluster size */
u32 rx_mbuf_sz;
/* Support for pluggable optics */
struct task mbx_task; /* Mailbox tasklet */
struct taskqueue *tq;
/*
** Queues:
** This is the irq holder, it has
** and RX/TX pair or rings associated
** with it.
*/
struct ix_queue *queues;
/*
* Transmit rings:
* Allocated at run time, an array of rings.
*/
struct tx_ring *tx_rings;
int num_tx_desc;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
int num_rx_desc;
u64 que_mask;
u32 rx_process_limit;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
unsigned long mbx_irq;
struct ixgbevf_hw_stats stats;
};
#define IXV_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF)
#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
/* Workaround to make 8.0 buildable */
#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
static __inline int
drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
{
#ifdef ALTQ
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (1);
#endif
return (!buf_ring_empty(br));
}
#endif
/*
** Find the number of unrefreshed RX descriptors
*/
static inline u16
ixv_rx_unrefreshed(struct rx_ring *rxr)
{
struct adapter *adapter = rxr->adapter;
if (rxr->next_to_check > rxr->next_to_refresh)
return (rxr->next_to_check - rxr->next_to_refresh - 1);
else
return ((adapter->num_rx_desc + rxr->next_to_check) -
rxr->next_to_refresh - 1);
}
#endif /* _IXV_H_ */

View File

@ -2,10 +2,10 @@
.PATH: ${.CURDIR}/../../dev/ixgbe
KMOD = if_ixgbe
KMOD = if_ix
SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += ixgbe.c ixv.c
SRCS += if_ix.c ix_txrx.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c

15
sys/modules/ixv/Makefile Normal file
View File

@ -0,0 +1,15 @@
#$FreeBSD$
.PATH: ${.CURDIR}/../../dev/ixgbe
KMOD = if_ixv
SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += if_ixv.c ix_txrx.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
SRCS += ixgbe_82599.c ixgbe_82598.c ixgbe_x540.c
CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP
.include <bsd.kmod.mk>