This delta syncs the em and igb drivers with Intel,

adds header split and SCTP support into the igb driver.
Various small improvements and fixes.

MFC after: 2 weeks
This commit is contained in:
Jack F Vogel 2009-04-10 00:05:46 +00:00
parent 14846f9b49
commit d035aa2db2
22 changed files with 1724 additions and 1044 deletions

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -43,9 +43,7 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw);
static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw);
static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw);
static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw);
static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw);
static void e1000_release_phy_80003es2lan(struct e1000_hw *hw);
static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw);
static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw);
static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw);
static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
@ -276,6 +274,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
mac->ops.blink_led = e1000_blink_led_generic;
/* setup LED */
@ -802,17 +802,16 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
index = phy_data & GG82563_DSPD_CABLE_LENGTH;
if (index < GG82563_CABLE_LENGTH_TABLE_SIZE + 5) {
phy->min_cable_length = e1000_gg82563_cable_length_table[index];
phy->max_cable_length =
e1000_gg82563_cable_length_table[index+5];
phy->cable_length = (phy->min_cable_length +
phy->max_cable_length) / 2;
} else {
if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE + 5) {
ret_val = E1000_ERR_PHY;
goto out;
}
phy->min_cable_length = e1000_gg82563_cable_length_table[index];
phy->max_cable_length = e1000_gg82563_cable_length_table[index+5];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
out:
return ret_val;
}
@ -892,7 +891,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
icr = E1000_READ_REG(hw, E1000_ICR);
e1000_check_alt_mac_addr_generic(hw);
ret_val = e1000_check_alt_mac_addr_generic(hw);
out:
return ret_val;
@ -916,7 +915,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
e1000_initialize_hw_bits_80003es2lan(hw);
/* Initialize identification LED */
ret_val = e1000_id_led_init_generic(hw);
ret_val = mac->ops.id_led_init(hw);
if (ret_val) {
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
@ -1104,9 +1103,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
/* Bypass Rx and Tx FIFO's */
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
if (ret_val)
goto out;
@ -1147,22 +1146,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (!(hw->mac.ops.check_mng_mode(hw))) {
/* Enable Electrical Idle on the PHY */
data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
ret_val = hw->phy.ops.write_reg(hw,
GG82563_PHY_PWR_MGMT_CTRL,
ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
data);
if (ret_val)
goto out;
ret_val = hw->phy.ops.read_reg(hw,
GG82563_PHY_KMRN_MODE_CTRL,
&data);
if (ret_val)
goto out;
ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
&data);
if (ret_val)
goto out;
data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
ret_val = hw->phy.ops.write_reg(hw,
GG82563_PHY_KMRN_MODE_CTRL,
ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
data);
if (ret_val)
goto out;
}
@ -1261,7 +1257,6 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_configure_on_link_up");
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000_get_speed_and_duplex_copper_generic(hw,
&speed,
&duplex);
@ -1393,7 +1388,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
* using the kumeran interface. The information retrieved is stored in data.
* Release the semaphore before exiting.
**/
s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data)
static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 *data)
{
u32 kmrnctrlsta;
s32 ret_val = E1000_SUCCESS;
@ -1429,7 +1425,8 @@ s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data)
* at the offset using the kumeran interface. Release semaphore
* before exiting.
**/
s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data)
static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data)
{
u32 kmrnctrlsta;
s32 ret_val = E1000_SUCCESS;
@ -1461,9 +1458,19 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_read_mac_addr_80003es2lan");
if (e1000_check_alt_mac_addr_generic(hw))
ret_val = e1000_read_mac_addr_generic(hw);
/*
* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
goto out;
ret_val = e1000_read_mac_addr_generic(hw);
out:
return ret_val;
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -229,6 +229,8 @@ static s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* setup LED */
mac->ops.setup_led = e1000_setup_led_generic;
/* cleanup LED */
@ -332,7 +334,7 @@ static s32 e1000_init_hw_82540(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_hw_82540");
/* Initialize identification LED */
ret_val = e1000_id_led_init_generic(hw);
ret_val = mac->ops.id_led_init(hw);
if (ret_val) {
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -261,6 +261,8 @@ static s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* setup LED */
mac->ops.setup_led = e1000_setup_led_82541;
/* cleanup LED */
@ -381,7 +383,7 @@ static s32 e1000_init_hw_82541(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_hw_82541");
/* Initialize identification LED */
ret_val = e1000_id_led_init_generic(hw);
ret_val = mac->ops.id_led_init(hw);
if (ret_val) {
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -46,6 +46,7 @@
* 82573E Gigabit Ethernet Controller (Copper)
* 82573L Gigabit Ethernet Controller
* 82574L Gigabit Network Connection
* 82574L Gigabit Network Connection
*/
#include "e1000_api.h"
@ -67,11 +68,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw);
static void e1000_clear_vfta_82571(struct e1000_hw *hw);
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count);
static s32 e1000_setup_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
@ -330,7 +329,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->ops.check_for_link = e1000_check_for_fiber_link_generic;
break;
case e1000_media_type_internal_serdes:
mac->ops.check_for_link = e1000_check_for_serdes_link_generic;
mac->ops.check_for_link = e1000_check_for_serdes_link_82571;
break;
default:
ret_val = -E1000_ERR_CONFIG;
@ -347,7 +346,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
break;
}
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_82571;
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* writing VFTA */
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
@ -356,6 +355,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82571;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
mac->ops.blink_led = e1000_blink_led_generic;
/* setup LED */
@ -528,8 +529,14 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
if (ret_val)
goto out;
if (hw->mac.type != e1000_82573 && hw->mac.type != e1000_82574)
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
break;
default:
ret_val = e1000_acquire_nvm_generic(hw);
break;
}
if (ret_val)
e1000_put_hw_semaphore_82571(hw);
@ -876,7 +883,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
*/
if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@ -892,6 +901,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
msec_delay(2);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
break;
default:
break;
}
ctrl = E1000_READ_REG(hw, E1000_CTRL);
@ -917,15 +929,30 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
* Need to wait for Phy configuration completion before accessing
* NVM and Phy.
*/
if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574)
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
msec_delay(25);
break;
default:
break;
}
/* Clear any pending interrupt events. */
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
icr = E1000_READ_REG(hw, E1000_ICR);
if (!(e1000_check_alt_mac_addr_generic(hw)))
e1000_set_laa_state_82571(hw, TRUE);
/* Install any alternate MAC address into RAR0 */
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
goto out;
e1000_set_laa_state_82571(hw, TRUE);
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
hw->mac.serdes_link_state = e1000_serdes_link_down;
out:
return ret_val;
@ -949,7 +976,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
e1000_initialize_hw_bits_82571(hw);
/* Initialize identification LED */
ret_val = e1000_id_led_init_generic(hw);
ret_val = mac->ops.id_led_init(hw);
if (ret_val) {
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
@ -985,17 +1012,21 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
/* ...for both queues. */
if (mac->type != e1000_82573 && mac->type != e1000_82574) {
reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
} else {
switch (mac->type) {
case e1000_82574:
case e1000_82573:
e1000_enable_tx_pkt_filtering_generic(hw);
reg_data = E1000_READ_REG(hw, E1000_GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
E1000_WRITE_REG(hw, E1000_GCR, reg_data);
break;
default:
reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
break;
}
/*
@ -1062,25 +1093,70 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
}
/* Device Control */
if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
reg = E1000_READ_REG(hw, E1000_CTRL);
reg &= ~(1 << 29);
E1000_WRITE_REG(hw, E1000_CTRL, reg);
break;
default:
break;
}
/* Extended Device Control */
if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg &= ~(1 << 23);
reg |= (1 << 22);
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
break;
default:
break;
}
/* PCI-Ex Control Register */
if (hw->mac.type == e1000_82574) {
if (hw->mac.type == e1000_82571) {
reg = E1000_READ_REG(hw, E1000_PBA_ECC);
reg |= E1000_PBA_ECC_CORR_EN;
E1000_WRITE_REG(hw, E1000_PBA_ECC, reg);
}
/*
* Workaround for hardware errata.
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
*/
if ((hw->mac.type == e1000_82571) ||
(hw->mac.type == e1000_82572)) {
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
}
/* PCI-Ex Control Registers */
switch (hw->mac.type) {
case e1000_82574:
reg = E1000_READ_REG(hw, E1000_GCR);
reg |= (1 << 22);
E1000_WRITE_REG(hw, E1000_GCR, reg);
/*
* Workaround for hardware errata.
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
* Without fix, issue can cause tx timeouts.
*/
reg = E1000_READ_REG(hw, E1000_GCR2);
reg |= 1;
E1000_WRITE_REG(hw, E1000_GCR2, reg);
break;
default:
break;
}
return;
@ -1102,31 +1178,38 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_clear_vfta_82571");
if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
if (hw->mng_cookie.vlan_id != 0) {
/*
* The VFTA is a 4096b bit-field, each identifying
* a single VLAN ID. The following operations
* determine which 32b entry (i.e. offset) into the
* array we want to set the VLAN ID (i.e. bit) of
* the manageability unit.
*/
*The VFTA is a 4096b bit-field, each identifying
*a single VLAN ID. The following operations
*determine which 32b entry (i.e. offset) into the
*array we want to set the VLAN ID (i.e. bit) of
*the manageability unit.
*/
vfta_offset = (hw->mng_cookie.vlan_id >>
E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK;
E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
}
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/*
* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of
* the manageability unit.
*/
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
E1000_WRITE_FLUSH(hw);
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/*
*If the offset we want to clear is the same offset of
*the manageability VLAN ID, then clear all bits except
*that of the manageability unit
*/
vfta_value = (offset == vfta_offset) ?
vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset,
vfta_value);
E1000_WRITE_FLUSH(hw);
}
break;
default:
break;
}
}
@ -1176,31 +1259,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
return E1000_SUCCESS;
}
/**
* e1000_update_mc_addr_list_82571 - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @rar_used_count: the first RAR register free to program
* @rar_count: total number of supported Receive Address Registers
*
* Updates the Receive Address Registers and Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
DEBUGFUNC("e1000_update_mc_addr_list_82571");
if (e1000_get_laa_state_82571(hw))
rar_count--;
e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
rar_used_count, rar_count);
}
/**
* e1000_setup_link_82571 - Setup flow control and link settings
@ -1221,10 +1279,15 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
* the default flow control setting, so we explicitly
* set it to full.
*/
if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
hw->fc.requested_mode == e1000_fc_default)
hw->fc.requested_mode = e1000_fc_full;
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
if (hw->fc.requested_mode == e1000_fc_default)
hw->fc.requested_mode = e1000_fc_full;
break;
default:
break;
}
return e1000_setup_link_generic(hw);
}
@ -1305,6 +1368,133 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
return e1000_setup_fiber_serdes_link_generic(hw);
}
/**
* e1000_check_for_serdes_link_82571 - Check for link (Serdes)
* @hw: pointer to the HW structure
*
* Checks for link up on the hardware. If link is not up and we have
* a signal, then we need to force link up.
**/
s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 rxcw;
u32 ctrl;
u32 status;
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_check_for_serdes_link_82571");
ctrl = E1000_READ_REG(hw, E1000_CTRL);
status = E1000_READ_REG(hw, E1000_STATUS);
rxcw = E1000_READ_REG(hw, E1000_RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
/* Receiver is synchronized with no invalid bits. */
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
if (!(status & E1000_STATUS_LU)) {
/*
* We have lost link, retry autoneg before
* reporting link failure
*/
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
DEBUGOUT("AN_UP -> AN_PROG\n");
}
break;
case e1000_serdes_link_forced_up:
/*
* If we are receiving /C/ ordered sets, re-enable
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
*/
if (rxcw & E1000_RXCW_C) {
/* Enable autoneg, and unforce link up */
E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
E1000_WRITE_REG(hw, E1000_CTRL,
(ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
DEBUGOUT("FORCED_UP -> AN_PROG\n");
}
break;
case e1000_serdes_link_autoneg_progress:
/*
* If the LU bit is set in the STATUS register,
* autoneg has completed sucessfully. If not,
* try foring the link because the far end may be
* available but not capable of autonegotiation.
*/
if (status & E1000_STATUS_LU) {
mac->serdes_link_state =
e1000_serdes_link_autoneg_complete;
DEBUGOUT("AN_PROG -> AN_UP\n");
} else {
/*
* Disable autoneg, force link up and
* full duplex, and change state to forced
*/
E1000_WRITE_REG(hw, E1000_TXCW,
(mac->txcw & ~E1000_TXCW_ANE));
ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
/* Configure Flow Control after link up. */
ret_val =
e1000_config_fc_after_link_up_generic(hw);
if (ret_val) {
DEBUGOUT("Error config flow control\n");
break;
}
mac->serdes_link_state =
e1000_serdes_link_forced_up;
DEBUGOUT("AN_PROG -> FORCED_UP\n");
}
mac->serdes_has_link = TRUE;
break;
case e1000_serdes_link_down:
default:
/* The link was down but the receiver has now gained
* valid sync, so lets see if we can bring the link
* up. */
E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
E1000_WRITE_REG(hw, E1000_CTRL,
(ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
DEBUGOUT("DOWN -> AN_PROG\n");
break;
}
} else {
if (!(rxcw & E1000_RXCW_SYNCH)) {
mac->serdes_has_link = FALSE;
mac->serdes_link_state = e1000_serdes_link_down;
DEBUGOUT("ANYSTATE -> DOWN\n");
} else {
/*
* We have sync, and can tolerate one
* invalid (IV) codeword before declaring
* link down, so reread to look again
*/
usec_delay(10);
rxcw = E1000_READ_REG(hw, E1000_RXCW);
if (rxcw & E1000_RXCW_IV) {
mac->serdes_link_state = e1000_serdes_link_down;
mac->serdes_has_link = FALSE;
DEBUGOUT("ANYSTATE -> DOWN\n");
}
}
}
return ret_val;
}
/**
* e1000_valid_led_default_82571 - Verify a valid default LED config
* @hw: pointer to the HW structure
@ -1325,11 +1515,19 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
goto out;
}
if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
*data == ID_LED_RESERVED_F746)
*data = ID_LED_DEFAULT_82573;
else if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT;
switch (hw->mac.type) {
case e1000_82574:
case e1000_82573:
if(*data == ID_LED_RESERVED_F746)
*data = ID_LED_DEFAULT_82573;
break;
default:
if (*data == ID_LED_RESERVED_0000 ||
*data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT;
break;
}
out:
return ret_val;
}
@ -1435,6 +1633,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
return ret_val;
}
/**
* e1000_read_mac_addr_82571 - Read device MAC address
* @hw: pointer to the HW structure
@ -1444,9 +1643,19 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_read_mac_addr_82571");
if (e1000_check_alt_mac_addr_generic(hw))
ret_val = e1000_read_mac_addr_generic(hw);
/*
* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
goto out;
ret_val = e1000_read_mac_addr_generic(hw);
out:
return ret_val;
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -36,6 +36,7 @@
* 82575EB Gigabit Network Connection
* 82575EB Gigabit Backplane Connection
* 82575GB Gigabit Network Connection
* 82575GB Gigabit Network Connection
* 82576 Gigabit Network Connection
*/
@ -75,11 +76,6 @@ static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
static void e1000_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count);
static void e1000_update_mc_addr_list_82575(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count);
void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw);
/**
@ -281,13 +277,15 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_82575;
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* writing VFTA */
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
mac->ops.blink_led = e1000_blink_led_generic;
/* setup LED */
@ -854,11 +852,18 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
/* SGMII link check is done through the PCS register. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
(e1000_sgmii_active_82575(hw)))
(e1000_sgmii_active_82575(hw))) {
ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
&duplex);
else
/*
* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
} else {
ret_val = e1000_check_for_copper_link_generic(hw);
}
return ret_val;
}
@ -920,101 +925,6 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
return E1000_SUCCESS;
}
/**
* e1000_init_rx_addrs_82575 - Initialize receive address's
* @hw: pointer to the HW structure
* @rar_count: receive address registers
*
* Setups the receive address registers by setting the base receive address
* register to the devices MAC address and clearing all the other receive
* address registers to 0.
**/
static void e1000_init_rx_addrs_82575(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
u8 addr[6] = {0,0,0,0,0,0};
/*
* This function is essentially the same as that of
* e1000_init_rx_addrs_generic. However it also takes care
* of the special case where the register offset of the
* second set of RARs begins elsewhere. This is implicitly taken care by
* function e1000_rar_set_generic.
*/
DEBUGFUNC("e1000_init_rx_addrs_82575");
/* Setup the receive address */
DEBUGOUT("Programming MAC Address into RAR[0]\n");
hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
/* Zero out the other (rar_entry_count - 1) receive addresses */
DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
for (i = 1; i < rar_count; i++) {
hw->mac.ops.rar_set(hw, addr, i);
}
}
/**
* e1000_update_mc_addr_list_82575 - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @rar_used_count: the first RAR register free to program
* @rar_count: total number of supported Receive Address Registers
*
* Updates the Receive Address Registers and Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
static void e1000_update_mc_addr_list_82575(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
u32 hash_value;
u32 i;
u8 addr[6] = {0,0,0,0,0,0};
/*
* This function is essentially the same as that of
* e1000_update_mc_addr_list_generic. However it also takes care
* of the special case where the register offset of the
* second set of RARs begins elsewhere. This is implicitly taken care by
* function e1000_rar_set_generic.
*/
DEBUGFUNC("e1000_update_mc_addr_list_82575");
/*
* Load the first set of multicast addresses into the exact
* filters (RAR). If there are not enough to fill the RAR
* array, clear the filters.
*/
for (i = rar_used_count; i < rar_count; i++) {
if (mc_addr_count) {
e1000_rar_set_generic(hw, mc_addr_list, i);
mc_addr_count--;
mc_addr_list += ETH_ADDR_LEN;
} else {
e1000_rar_set_generic(hw, addr, i);
}
}
/* Clear the old settings from the MTA */
DEBUGOUT("Clearing MTA\n");
for (i = 0; i < hw->mac.mta_reg_count; i++) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
E1000_WRITE_FLUSH(hw);
}
/* Load any remaining multicast addresses into the hash table. */
for (; mc_addr_count > 0; mc_addr_count--) {
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
hw->mac.ops.mta_set(hw, hash_value);
mc_addr_list += ETH_ADDR_LEN;
}
}
/**
* e1000_shutdown_fiber_serdes_link_82575 - Remove link during power down
* @hw: pointer to the HW structure
@ -1059,6 +969,253 @@ void e1000_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw)
return;
}
/**
* e1000_vmdq_loopback_enable_pf- Enables VM to VM queue loopback replication
* @hw: pointer to the HW structure
**/
void e1000_vmdq_loopback_enable_pf(struct e1000_hw *hw)
{
u32 reg;
reg = E1000_READ_REG(hw, E1000_DTXSWC);
reg |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
}
/**
* e1000_vmdq_loopback_disable_pf - Disable VM to VM queue loopbk replication
* @hw: pointer to the HW structure
**/
void e1000_vmdq_loopback_disable_pf(struct e1000_hw *hw)
{
u32 reg;
reg = E1000_READ_REG(hw, E1000_DTXSWC);
reg &= ~(E1000_DTXSWC_VMDQ_LOOPBACK_EN);
E1000_WRITE_REG(hw, E1000_DTXSWC, reg);
}
/**
* e1000_vmdq_replication_enable_pf - Enable replication of brdcst & multicst
* @hw: pointer to the HW structure
*
* Enables replication of broadcast and multicast packets from the network
* to VM's which have their respective broadcast and multicast accept
* bits set in the VM Offload Register. This gives the PF driver per
* VM granularity control over which VM's get replicated broadcast traffic.
**/
void e1000_vmdq_replication_enable_pf(struct e1000_hw *hw, u32 enables)
{
u32 reg;
u32 i;
for (i = 0; i < MAX_NUM_VFS; i++) {
if (enables & (1 << i)) {
reg = E1000_READ_REG(hw, E1000_VMOLR(i));
reg |= (E1000_VMOLR_AUPE |
E1000_VMOLR_BAM |
E1000_VMOLR_MPME);
E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
}
}
reg = E1000_READ_REG(hw, E1000_VT_CTL);
reg |= E1000_VT_CTL_VM_REPL_EN;
E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
}
/**
* e1000_vmdq_replication_disable_pf - Disable replication of brdcst & multicst
* @hw: pointer to the HW structure
*
* Disables replication of broadcast and multicast packets to the VM's.
**/
void e1000_vmdq_replication_disable_pf(struct e1000_hw *hw)
{
u32 reg;
reg = E1000_READ_REG(hw, E1000_VT_CTL);
reg &= ~(E1000_VT_CTL_VM_REPL_EN);
E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
}
/**
* e1000_vmdq_enable_replication_mode_pf - Enables replication mode in the device
* @hw: pointer to the HW structure
**/
void e1000_vmdq_enable_replication_mode_pf(struct e1000_hw *hw)
{
u32 reg;
reg = E1000_READ_REG(hw, E1000_VT_CTL);
reg |= E1000_VT_CTL_VM_REPL_EN;
E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
}
/**
* e1000_vmdq_broadcast_replication_enable_pf - Enable replication of brdcst
* @hw: pointer to the HW structure
* @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
*
* Enables replication of broadcast packets from the network
* to VM's which have their respective broadcast accept
* bits set in the VM Offload Register. This gives the PF driver per
* VM granularity control over which VM's get replicated broadcast traffic.
**/
void e1000_vmdq_broadcast_replication_enable_pf(struct e1000_hw *hw,
u32 enables)
{
u32 reg;
u32 i;
for (i = 0; i < MAX_NUM_VFS; i++) {
if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
reg = E1000_READ_REG(hw, E1000_VMOLR(i));
reg |= E1000_VMOLR_BAM;
E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
}
}
}
/**
* e1000_vmdq_broadcast_replication_disable_pf - Disable replication
* of broadcast packets
* @hw: pointer to the HW structure
* @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
*
* Disables replication of broadcast packets for specific pools.
* If bam/mpe is disabled on all pools then replication mode is
* turned off.
**/
void e1000_vmdq_broadcast_replication_disable_pf(struct e1000_hw *hw,
u32 disables)
{
u32 reg;
u32 i;
u32 oneenabled = 0;
for (i = 0; i < MAX_NUM_VFS; i++) {
reg = E1000_READ_REG(hw, E1000_VMOLR(i));
if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
reg &= ~(E1000_VMOLR_BAM);
E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
}
if (!oneenabled && (reg & (E1000_VMOLR_AUPE |
E1000_VMOLR_BAM |
E1000_VMOLR_MPME)))
oneenabled = 1;
}
if (!oneenabled) {
reg = E1000_READ_REG(hw, E1000_VT_CTL);
reg &= ~(E1000_VT_CTL_VM_REPL_EN);
E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
}
}
/**
* e1000_vmdq_multicast_promiscuous_enable_pf - Enable promiscuous reception
* @hw: pointer to the HW structure
* @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
*
* Enables promiscuous reception of multicast packets from the network
* to VM's which have their respective multicast promiscuous mode enable
* bits set in the VM Offload Register. This gives the PF driver per
* VM granularity control over which VM's get all multicast traffic.
**/
void e1000_vmdq_multicast_promiscuous_enable_pf(struct e1000_hw *hw,
u32 enables)
{
u32 reg;
u32 i;
for (i = 0; i < MAX_NUM_VFS; i++) {
if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
reg = E1000_READ_REG(hw, E1000_VMOLR(i));
reg |= E1000_VMOLR_MPME;
E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
}
}
}
/**
* e1000_vmdq_multicast_promiscuous_disable_pf - Disable promiscuous
* reception of multicast packets
* @hw: pointer to the HW structure
* @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
*
* Disables promiscuous reception of multicast packets for specific pools.
* If bam/mpe is disabled on all pools then replication mode is
* turned off.
**/
void e1000_vmdq_multicast_promiscuous_disable_pf(struct e1000_hw *hw,
u32 disables)
{
u32 reg;
u32 i;
u32 oneenabled = 0;
for (i = 0; i < MAX_NUM_VFS; i++) {
reg = E1000_READ_REG(hw, E1000_VMOLR(i));
if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
reg &= ~(E1000_VMOLR_MPME);
E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
}
if (!oneenabled && (reg & (E1000_VMOLR_AUPE |
E1000_VMOLR_BAM |
E1000_VMOLR_MPME)))
oneenabled = 1;
}
if (!oneenabled) {
reg = E1000_READ_REG(hw, E1000_VT_CTL);
reg &= ~(E1000_VT_CTL_VM_REPL_EN);
E1000_WRITE_REG(hw, E1000_VT_CTL, reg);
}
}
/**
* e1000_vmdq_aupe_enable_pf - Enable acceptance of untagged packets
* @hw: pointer to the HW structure
* @enables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
*
* Enables acceptance of packets from the network which do not have
* a VLAN tag but match the exact MAC filter of a given VM.
**/
void e1000_vmdq_aupe_enable_pf(struct e1000_hw *hw, u32 enables)
{
u32 reg;
u32 i;
for (i = 0; i < MAX_NUM_VFS; i++) {
if ((enables == ALL_QUEUES) || (enables & (1 << i))) {
reg = E1000_READ_REG(hw, E1000_VMOLR(i));
reg |= E1000_VMOLR_AUPE;
E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
}
}
}
/**
* e1000_vmdq_aupe_disable_pf - Disable acceptance of untagged packets
* @hw: pointer to the HW structure
* @disables: PoolSet Bit - if set to ALL_QUEUES, apply to all pools.
*
* Disables acceptance of packets from the network which do not have
* a VLAN tag but match the exact MAC filter of a given VM.
**/
void e1000_vmdq_aupe_disable_pf(struct e1000_hw *hw, u32 disables)
{
u32 reg;
u32 i;
for (i = 0; i < MAX_NUM_VFS; i++) {
if ((disables == ALL_QUEUES) || (disables & (1 << i))) {
reg = E1000_READ_REG(hw, E1000_VMOLR(i));
reg &= ~E1000_VMOLR_AUPE;
E1000_WRITE_REG(hw, E1000_VMOLR(i), reg);
}
}
}
/**
* e1000_reset_hw_82575 - Reset hardware
* @hw: pointer to the HW structure
@ -1113,7 +1270,8 @@ static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
icr = E1000_READ_REG(hw, E1000_ICR);
e1000_check_alt_mac_addr_generic(hw);
/* Install any alternate MAC address into RAR0 */
ret_val = e1000_check_alt_mac_addr_generic(hw);
return ret_val;
}
@ -1133,7 +1291,7 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw)
DEBUGFUNC("e1000_init_hw_82575");
/* Initialize identification LED */
ret_val = e1000_id_led_init_generic(hw);
ret_val = mac->ops.id_led_init(hw);
if (ret_val) {
DEBUGOUT("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
@ -1144,7 +1302,8 @@ static s32 e1000_init_hw_82575(struct e1000_hw *hw)
mac->ops.clear_vfta(hw);
/* Setup the receive address */
e1000_init_rx_addrs_82575(hw, rar_count);
e1000_init_rx_addrs_generic(hw, rar_count);
/* Zero out the Multicast HASH table */
DEBUGOUT("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++)
@ -1502,9 +1661,19 @@ static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_read_mac_addr_82575");
if (e1000_check_alt_mac_addr_generic(hw))
ret_val = e1000_read_mac_addr_generic(hw);
/*
* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
goto out;
ret_val = e1000_read_mac_addr_generic(hw);
out:
return ret_val;
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -128,6 +128,7 @@ struct e1000_adv_context_desc {
#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000
#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000
#define E1000_SRRCTL_DROP_EN 0x80000000
#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F
#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00
@ -137,6 +138,7 @@ struct e1000_adv_context_desc {
#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
#define E1000_MRQC_ENABLE_VMDQ 0x00000003
#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005
#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
@ -313,6 +315,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
/* IPSec Encrypt Enable for ESP */
#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000
@ -381,6 +384,8 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
#define E1000_DTXSWC_LLE_SHIFT 16
#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
@ -393,82 +398,44 @@ struct e1000_adv_tx_context_desc {
#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
/* Per VM Offload register setup */
#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */
#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */
#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */
#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */
#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */
#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */
#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */
#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */
#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */
#define E1000_VLVF_ARRAY_SIZE 32
#define E1000_VLVF_VLANID_MASK 0x00000FFF
#define E1000_VLVF_POOLSEL_SHIFT 12
#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT)
#define E1000_VLVF_LVLAN 0x00100000
#define E1000_VLVF_VLANID_ENABLE 0x80000000
#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is TRUE if it is E1000_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
#define E1000_VT_MSGTYPE_ACK 0xF0000000 /* Messages below or'd with
* this are the ACK */
#define E1000_VT_MSGTYPE_NACK 0xFF000000 /* Messages below or'd with
* this are the NACK */
#define E1000_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
#define E1000_VF_MSGTYPE_REQ_MAC 1 /* VF needs to know its MAC */
#define E1000_VF_MSGTYPE_VFLR 2 /* VF notifies VFLR to PF */
#define E1000_VF_SET_MULTICAST 3 /* VF requests PF to set MC addr */
#define E1000_VF_SET_VLAN 4 /* VF requests PF to set VLAN */
/* Add 100h to all PF msgs, leaves room for up to 255 discrete message types
* from VF to PF - way more than we'll ever need */
#define E1000_PF_MSGTYPE_RESET (1 + 0x100) /* PF notifies global reset
* imminent to VF */
#define E1000_PF_MSGTYPE_LSC (2 + 0x100) /* PF notifies VF of LSC... VF
* will see extra msg info for
* status */
#define E1000_PF_MSG_LSCDOWN (1 << E1000_VT_MSGINFO_SHIFT)
#define E1000_PF_MSG_LSCUP (2 << E1000_VT_MSGINFO_SHIFT)
#define E1000_IOVCTL 0x05BBC
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
#define ALL_QUEUES 0xFFFF
s32 e1000_send_mail_to_pf_vf(struct e1000_hw *hw, u32 *msg,
s16 size);
s32 e1000_receive_mail_from_pf_vf(struct e1000_hw *hw,
u32 *msg, s16 size);
s32 e1000_send_mail_to_vf(struct e1000_hw *hw, u32 *msg,
u32 vf_number, s16 size);
s32 e1000_receive_mail_from_vf(struct e1000_hw *hw, u32 *msg,
u32 vf_number, s16 size);
void e1000_vmdq_loopback_enable_vf(struct e1000_hw *hw);
void e1000_vmdq_loopback_disable_vf(struct e1000_hw *hw);
void e1000_vmdq_replication_enable_vf(struct e1000_hw *hw, u32 enables);
void e1000_vmdq_replication_disable_vf(struct e1000_hw *hw);
void e1000_vmdq_enable_replication_mode_vf(struct e1000_hw *hw);
void e1000_vmdq_broadcast_replication_enable_vf(struct e1000_hw *hw,
void e1000_vmdq_loopback_enable_pf(struct e1000_hw *hw);
void e1000_vmdq_loopback_disable_pf(struct e1000_hw *hw);
void e1000_vmdq_replication_enable_pf(struct e1000_hw *hw, u32 enables);
void e1000_vmdq_replication_disable_pf(struct e1000_hw *hw);
void e1000_vmdq_enable_replication_mode_pf(struct e1000_hw *hw);
void e1000_vmdq_broadcast_replication_enable_pf(struct e1000_hw *hw,
u32 enables);
void e1000_vmdq_multicast_replication_enable_vf(struct e1000_hw *hw,
void e1000_vmdq_multicast_promiscuous_enable_pf(struct e1000_hw *hw,
u32 enables);
void e1000_vmdq_broadcast_replication_disable_vf(struct e1000_hw *hw,
void e1000_vmdq_broadcast_replication_disable_pf(struct e1000_hw *hw,
u32 disables);
void e1000_vmdq_multicast_replication_disable_vf(struct e1000_hw *hw,
void e1000_vmdq_multicast_promiscuous_disable_pf(struct e1000_hw *hw,
u32 disables);
bool e1000_check_for_pf_ack_vf(struct e1000_hw *hw);
bool e1000_check_for_pf_mail_vf(struct e1000_hw *hw, u32*);
#endif
void e1000_vmdq_aupe_enable_pf(struct e1000_hw *hw, u32 enables);
void e1000_vmdq_aupe_disable_pf(struct e1000_hw *hw, u32 disables);
#endif /* _E1000_82575_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -112,6 +112,7 @@ s32 e1000_init_phy_params(struct e1000_hw *hw)
return ret_val;
}
/**
* e1000_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@ -250,12 +251,14 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82575EB_COPPER:
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82575GB_QUAD_COPPER:
case E1000_DEV_ID_82575GB_QUAD_COPPER_PM:
mac->type = e1000_82575;
break;
case E1000_DEV_ID_82576:
case E1000_DEV_ID_82576_FIBER:
case E1000_DEV_ID_82576_SERDES:
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_NS:
mac->type = e1000_82576;
break;
default:
@ -370,7 +373,6 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
ret_val = e1000_init_phy_params(hw);
if (ret_val)
goto out;
}
out:
@ -426,26 +428,16 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @rar_used_count: the first RAR register free to program
* @rar_count: total number of supported Receive Address Registers
*
* Updates the Receive Address Registers and Multicast Table Array.
* Updates the Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this. Currently no func pointer
* exists and all implementations are handled in the generic version of this
* function.
**/
void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, u32 rar_used_count,
u32 rar_count)
u32 mc_addr_count)
{
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw,
mc_addr_list,
mc_addr_count,
rar_used_count,
rar_count);
hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
mc_addr_count);
}
/**
@ -616,6 +608,21 @@ s32 e1000_blink_led(struct e1000_hw *hw)
return E1000_SUCCESS;
}
/**
* e1000_id_led_init - store LED configurations in SW
* @hw: pointer to the HW structure
*
* Initializes the LED config in SW. This is a function pointer entry point
* called by drivers.
**/
s32 e1000_id_led_init(struct e1000_hw *hw)
{
if (hw->mac.ops.id_led_init)
return hw->mac.ops.id_led_init(hw);
return E1000_SUCCESS;
}
/**
* e1000_led_on - Turn on SW controllable LED
* @hw: pointer to the HW structure

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -70,14 +70,14 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
void e1000_update_mc_addr_list(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count);
u8 *mc_addr_list, u32 mc_addr_count);
s32 e1000_setup_led(struct e1000_hw *hw);
s32 e1000_cleanup_led(struct e1000_hw *hw);
s32 e1000_check_reset_block(struct e1000_hw *hw);
s32 e1000_blink_led(struct e1000_hw *hw);
s32 e1000_led_on(struct e1000_hw *hw);
s32 e1000_led_off(struct e1000_hw *hw);
s32 e1000_id_led_init(struct e1000_hw *hw);
void e1000_reset_adaptive(struct e1000_hw *hw);
void e1000_update_adaptive(struct e1000_hw *hw);
s32 e1000_get_cable_length(struct e1000_hw *hw);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -144,6 +144,7 @@
#define E1000_CTRL_EXT_PFRSTD 0x00004000
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
@ -162,8 +163,6 @@
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
/* IAME enable bit (27) was removed in >= 82575 */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Int acknowledge Auto-mask */
#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers
* after IMS clear */
#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error
* detection enabled */
#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity
@ -402,6 +401,7 @@
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */
#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */
@ -692,14 +692,19 @@
/* PBA constants */
#define E1000_PBA_6K 0x0006 /* 6KB */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_10K 0x000A /* 10KB */
#define E1000_PBA_12K 0x000C /* 12KB */
#define E1000_PBA_14K 0x000E /* 14KB */
#define E1000_PBA_16K 0x0010 /* 16KB */
#define E1000_PBA_18K 0x0012
#define E1000_PBA_20K 0x0014
#define E1000_PBA_22K 0x0016
#define E1000_PBA_24K 0x0018
#define E1000_PBA_26K 0x001A
#define E1000_PBA_30K 0x001E
#define E1000_PBA_32K 0x0020
#define E1000_PBA_34K 0x0022
#define E1000_PBA_35K 0x0023
#define E1000_PBA_38K 0x0026
#define E1000_PBA_40K 0x0028
#define E1000_PBA_48K 0x0030 /* 48KB */
@ -761,6 +766,13 @@
#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
/* PBA ECC Register */
#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
#define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */
/* Extended Interrupt Cause Read */
#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */
#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */
@ -906,6 +918,8 @@
#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */
#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */
#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
/* Transmit Descriptor Control */
#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
@ -936,6 +950,10 @@
*/
#define E1000_RAR_ENTRIES 15
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
#define E1000_RAL_MAC_ADDR_LEN 4
#define E1000_RAH_MAC_ADDR_LEN 2
#define E1000_RAH_POOL_MASK 0x03FC0000
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
#define E1000_SUCCESS 0
@ -951,6 +969,7 @@
#define E1000_BLK_PHY_RESET 12
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50
@ -1145,6 +1164,7 @@
#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SECVAL_SHIFT 22
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */
#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -94,6 +94,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
#define E1000_DEV_ID_82574LA 0x10F6
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
@ -123,10 +124,11 @@ struct e1000_hw;
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
#define E1000_DEV_ID_82576_VF 0x10CA
#define E1000_DEV_ID_82576_NS 0x150A
#define E1000_DEV_ID_82575EB_COPPER 0x10A7
#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
#define E1000_DEV_ID_82575GB_QUAD_COPPER_PM 0x10E2
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
@ -136,6 +138,9 @@ struct e1000_hw;
#define E1000_FUNC_0 0
#define E1000_FUNC_1 1
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
enum e1000_mac_type {
e1000_undefined = 0,
e1000_82542,
@ -160,7 +165,6 @@ enum e1000_mac_type {
e1000_ich10lan,
e1000_82575,
e1000_82576,
e1000_vfadapt,
e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */
};
@ -279,6 +283,13 @@ enum e1000_smart_speed {
e1000_smart_speed_off
};
enum e1000_serdes_link_state {
e1000_serdes_link_down = 0,
e1000_serdes_link_autoneg_progress,
e1000_serdes_link_autoneg_complete,
e1000_serdes_link_forced_up
};
/* Receive Descriptor */
struct e1000_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
@ -496,37 +507,6 @@ struct e1000_hw_stats {
u64 doosync;
};
struct e1000_vf_stats {
u64 base_gprc;
u64 base_gptc;
u64 base_gorc;
u64 base_gotc;
u64 base_mprc;
u64 base_gotlbc;
u64 base_gptlbc;
u64 base_gorlbc;
u64 base_gprlbc;
u32 last_gprc;
u32 last_gptc;
u32 last_gorc;
u32 last_gotc;
u32 last_mprc;
u32 last_gotlbc;
u32 last_gptlbc;
u32 last_gorlbc;
u32 last_gprlbc;
u64 gprc;
u64 gptc;
u64 gorc;
u64 gotc;
u64 mprc;
u64 gotlbc;
u64 gptlbc;
u64 gorlbc;
u64 gprlbc;
};
struct e1000_phy_stats {
u32 idle_errors;
@ -581,6 +561,7 @@ struct e1000_host_mng_command_info {
struct e1000_mac_operations {
/* Function pointers for the MAC. */
s32 (*init_params)(struct e1000_hw *);
s32 (*id_led_init)(struct e1000_hw *);
s32 (*blink_led)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *hw);
@ -592,7 +573,7 @@ struct e1000_mac_operations {
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
void (*shutdown_serdes)(struct e1000_hw *);
@ -666,6 +647,10 @@ struct e1000_mac_info {
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
#define MAX_MTA_REG 128 /* this must be the maximum size of the MTA register
* table in all supported adapters
*/
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
@ -678,6 +663,7 @@ struct e1000_mac_info {
bool get_link_status;
bool in_ifs_mode;
bool report_tx_early;
enum e1000_serdes_link_state serdes_link_state;
bool serdes_has_link;
bool tx_pkt_filtering;
};
@ -785,12 +771,15 @@ struct e1000_dev_spec_ich8lan {
struct e1000_dev_spec_82575 {
bool sgmii_active;
bool global_device_reset;
};
struct e1000_dev_spec_vf {
u32 vf_number;
u32 v2p_mailbox;
};
struct e1000_hw {
void *back;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -197,7 +197,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->ops.read_reg = e1000_read_phy_reg_bm;
ret_val = e1000_determine_phy_address(hw);
if (ret_val) {
DEBUGOUT("Cannot determine PHY address. Erroring out\n");
DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
goto out;
}
}
@ -319,6 +319,9 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
#if defined(NAHUM4) && !defined(NO_PCH_A_SUPPORT)
u16 pci_cfg;
#endif
DEBUGFUNC("e1000_init_mac_params_ich8lan");
@ -360,18 +363,30 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* blink LED */
mac->ops.blink_led = e1000_blink_led_generic;
/* setup LED */
mac->ops.setup_led = e1000_setup_led_generic;
/* cleanup LED */
mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
/* turn on/off LED */
mac->ops.led_on = e1000_led_on_ich8lan;
mac->ops.led_off = e1000_led_off_ich8lan;
/* clear hardware counters */
mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
/* LED operations */
switch (mac->type) {
case e1000_ich8lan:
case e1000_ich9lan:
case e1000_ich10lan:
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
mac->ops.blink_led = e1000_blink_led_generic;
/* setup LED */
mac->ops.setup_led = e1000_setup_led_generic;
/* cleanup LED */
mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
/* turn on/off LED */
mac->ops.led_on = e1000_led_on_ich8lan;
mac->ops.led_off = e1000_led_off_ich8lan;
break;
default:
break;
}
/* Enable PCS Lock-loss workaround for ICH8 */
if (mac->type == e1000_ich8lan)
e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
@ -993,48 +1008,65 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
* @bank: pointer to the variable that returns the active bank
*
* Reads signature byte from the NVM using the flash access registers.
* Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
**/
static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
{
s32 ret_val = E1000_SUCCESS;
u32 eecd;
struct e1000_nvm_info *nvm = &hw->nvm;
/* flash bank size is in words */
u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
u8 bank_high_byte = 0;
u8 sig_byte = 0;
s32 ret_val = E1000_SUCCESS;
if (hw->mac.type != e1000_ich10lan) {
if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_SEC1VAL)
*bank = 1;
else
*bank = 0;
} else {
/*
* Make sure the signature for bank 0 is valid,
* if not check for bank1
*/
e1000_read_flash_byte_ich8lan(hw, act_offset, &bank_high_byte);
if ((bank_high_byte & 0xC0) == 0x80) {
*bank = 0;
} else {
/*
* find if segment 1 is valid by verifying
* bit 15:14 = 10b in word 0x13
*/
e1000_read_flash_byte_ich8lan(hw,
act_offset + bank1_offset,
&bank_high_byte);
/* bank1 has a valid signature equivalent to SEC1V */
if ((bank_high_byte & 0xC0) == 0x80) {
switch (hw->mac.type) {
case e1000_ich8lan:
case e1000_ich9lan:
eecd = E1000_READ_REG(hw, E1000_EECD);
if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
E1000_EECD_SEC1VAL_VALID_MASK) {
if (eecd & E1000_EECD_SEC1VAL)
*bank = 1;
} else {
DEBUGOUT("ERROR: EEPROM not present\n");
ret_val = -E1000_ERR_NVM;
}
}
}
else
*bank = 0;
goto out;
}
DEBUGOUT("Unable to determine valid NVM bank via EEC - "
"reading flash signature\n");
/* fall-thru */
default:
/* set bank to 0 in case flash read fails */
*bank = 0;
/* Check bank 0 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
&sig_byte);
if (ret_val)
goto out;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 0;
goto out;
}
/* Check bank 1 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
bank1_offset,
&sig_byte);
if (ret_val)
goto out;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
E1000_ICH_NVM_SIG_VALUE) {
*bank = 1;
goto out;
}
DEBUGOUT("ERROR: No valid NVM bank present\n");
ret_val = -E1000_ERR_NVM;
break;
}
out:
return ret_val;
}
@ -1072,7 +1104,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
if (ret_val != E1000_SUCCESS)
goto out;
goto release;
act_offset = (bank) ? nvm->flash_bank_size : 0;
act_offset += offset;
@ -1091,9 +1123,13 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
}
release:
nvm->ops.release(hw);
out:
if (ret_val)
DEBUGOUT1("NVM read error: %d\n", ret_val);
return ret_val;
}
@ -1426,17 +1462,27 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
* is going to be written
*/
ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
if (ret_val != E1000_SUCCESS)
if (ret_val != E1000_SUCCESS) {
nvm->ops.release(hw);
goto out;
}
if (bank == 0) {
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
e1000_erase_flash_bank_ich8lan(hw, 1);
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
} else {
old_bank_offset = nvm->flash_bank_size;
new_bank_offset = 0;
e1000_erase_flash_bank_ich8lan(hw, 0);
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
}
for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
@ -1448,9 +1494,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (dev_spec->shadow_ram[i].modified) {
data = dev_spec->shadow_ram[i].value;
} else {
e1000_read_flash_word_ich8lan(hw,
i + old_bank_offset,
&data);
ret_val = e1000_read_flash_word_ich8lan(hw, i +
old_bank_offset,
&data);
if (ret_val)
break;
}
/*
@ -1500,7 +1548,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
* and we need to change bit 14 to 0b
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
e1000_read_flash_word_ich8lan(hw, act_offset, &data);
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
@ -1539,6 +1591,9 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
msec_delay(10);
out:
if (ret_val)
DEBUGOUT1("NVM update error: %d\n", ret_val);
return ret_val;
}
@ -1997,11 +2052,10 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
e1000_initialize_hw_bits_ich8lan(hw);
/* Initialize identification LED */
ret_val = e1000_id_led_init_generic(hw);
if (ret_val) {
DEBUGOUT("Error initializing identification LED\n");
ret_val = mac->ops.id_led_init(hw);
if (ret_val)
/* This is not fatal and we should not stop init due to this */
}
DEBUGOUT("Error initializing identification LED\n");
/* Setup the receive address. */
e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
@ -2140,7 +2194,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
hw->fc.current_mode = hw->fc.requested_mode;
DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
hw->fc.current_mode);
hw->fc.current_mode);
/* Continue to configure the copper link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);
@ -2195,17 +2249,18 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto out;
if (hw->phy.type == e1000_phy_igp_3) {
switch (hw->phy.type) {
case e1000_phy_igp_3:
ret_val = e1000_copper_link_setup_igp(hw);
if (ret_val)
goto out;
} else if (hw->phy.type == e1000_phy_bm) {
break;
case e1000_phy_bm:
ret_val = e1000_copper_link_setup_m88(hw);
if (ret_val)
goto out;
}
if (hw->phy.type == e1000_phy_ife) {
break;
case e1000_phy_ife:
ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
&reg_data);
if (ret_val)
@ -2229,6 +2284,9 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
reg_data);
if (ret_val)
goto out;
break;
default:
break;
}
ret_val = e1000_setup_copper_link_generic(hw);
@ -2476,18 +2534,21 @@ void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
* 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
* to a lower speed.
*
* Should only be called for ICH9 and ICH10 devices.
* Should only be called for applicable parts.
**/
void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
{
u32 phy_ctrl;
if ((hw->mac.type == e1000_ich10lan) ||
(hw->mac.type == e1000_ich9lan)) {
switch (hw->mac.type) {
case e1000_ich9lan:
case e1000_ich10lan:
phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_GBE_DISABLE;
E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
default:
break;
}
return;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -41,9 +41,10 @@
#define ICH_FLASH_FADDR 0x0008
#define ICH_FLASH_FDATA0 0x0010
#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
/* Requires up to 10 seconds when MNG might be accessing part. */
#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
@ -70,12 +71,14 @@
#define E1000_ICH_MNG_IAMT_MODE 0x2
#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_OFF2 << 8) | \
(ID_LED_DEF1_ON2 << 4) | \
(ID_LED_OFF1_OFF2 << 8) | \
(ID_LED_OFF1_ON2 << 4) | \
(ID_LED_DEF1_DEF2))
#define E1000_ICH_NVM_SIG_WORD 0x13
#define E1000_ICH_NVM_SIG_MASK 0xC000
#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
#define E1000_ICH_NVM_SIG_VALUE 0x80
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
@ -99,6 +102,25 @@
#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
#define IGP3_PM_CTRL_FORCE_PWR_DOWN 0x0020
/* PHY Wakeup Registers and defines */
#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
/*
* Additional interrupts need to be handled for ICH family:
* DSW = The FW changed the status of the DISSW bit in FWSM
@ -128,5 +150,8 @@ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw);
#if defined(HANKSVILLE_HW) && !defined(NO_PCH_A_SUPPORT)
s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
#endif
#endif

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -35,6 +35,7 @@
#include "e1000_api.h"
static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
/**
* e1000_init_mac_ops_generic - Initialize MAC function pointers
@ -126,7 +127,7 @@ bool e1000_null_mng_mode(struct e1000_hw *hw)
* e1000_null_update_mc - No-op function, return void
* @hw: pointer to the HW structure
**/
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a, u32 b, u32 c)
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a)
{
DEBUGFUNC("e1000_null_update_mc");
return;
@ -261,18 +262,17 @@ s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
* Determines the LAN function id by reading memory-mapped registers
* and swaps the port value if requested.
**/
void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
{
struct e1000_bus_info *bus = &hw->bus;
u32 reg;
/*
* The status register reports the correct function number
* for the device regardless of function swap state.
*/
reg = E1000_READ_REG(hw, E1000_STATUS);
bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
/* check for a port swap */
reg = E1000_READ_REG(hw, E1000_FACTPS);
if (reg & E1000_FACTPS_LFS)
bus->func ^= 0x1;
}
/**
@ -358,6 +358,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
u8 mac_addr[ETH_ADDR_LEN] = {0};
DEBUGFUNC("e1000_init_rx_addrs_generic");
@ -368,12 +369,8 @@ void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
/* Zero out the other (rar_entry_count - 1) receive addresses */
DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
for (i = 1; i < rar_count; i++) {
E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
E1000_WRITE_FLUSH(hw);
}
for (i = 1; i < rar_count; i++)
hw->mac.ops.rar_set(hw, mac_addr, i);
}
/**
@ -382,10 +379,11 @@ void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
*
* Checks the nvm for an alternate MAC address. An alternate MAC address
* can be setup by pre-boot software and must be treated like a permanent
* address and must override the actual permanent MAC address. If an
* alternate MAC address is found it is saved in the hw struct and
* programmed into RAR0 and the function returns success, otherwise the
* function returns an error.
* address and must override the actual permanent MAC address. If an
* alternate MAC address is found it is programmed into RAR0, replacing
* the permanent address that was installed into RAR0 by the Si on reset.
* This function will return SUCCESS unless it encounters an error while
* reading the EEPROM.
**/
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
{
@ -404,13 +402,12 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
}
if (nvm_alt_mac_addr_offset == 0xFFFF) {
ret_val = -(E1000_NOT_IMPLEMENTED);
/* There is no Alternate MAC Address */
goto out;
}
if (hw->bus.func == E1000_FUNC_1)
nvm_alt_mac_addr_offset += ETH_ADDR_LEN/sizeof(u16);
nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
for (i = 0; i < ETH_ADDR_LEN; i += 2) {
offset = nvm_alt_mac_addr_offset + (i >> 1);
ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@ -425,14 +422,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
/* if multicast bit is set, the alternate address will not be used */
if (alt_mac_addr[0] & 0x01) {
ret_val = -(E1000_NOT_IMPLEMENTED);
DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
goto out;
}
for (i = 0; i < ETH_ADDR_LEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0);
/*
* We have a valid alternate MAC address, and we want to treat it the
* same as the normal permanent MAC address stored by the HW into the
* RAR. Do this by mapping this address into RAR0.
*/
hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
out:
return ret_val;
@ -467,8 +466,15 @@ void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
/*
* Some bridges will combine consecutive 32-bit writes into
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
E1000_WRITE_FLUSH(hw);
}
/**
@ -512,55 +518,36 @@ void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @rar_used_count: the first RAR register free to program
* @rar_count: total number of supported Receive Address Registers
*
* Updates the Receive Address Registers and Multicast Table Array.
* Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value;
u32 i;
u32 hash_value, hash_bit, hash_reg;
int i;
DEBUGFUNC("e1000_update_mc_addr_list_generic");
/*
* Load the first set of multicast addresses into the exact
* filters (RAR). If there are not enough to fill the RAR
* array, clear the filters.
*/
for (i = rar_used_count; i < rar_count; i++) {
if (mc_addr_count) {
hw->mac.ops.rar_set(hw, mc_addr_list, i);
mc_addr_count--;
mc_addr_list += ETH_ADDR_LEN;
} else {
E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
E1000_WRITE_FLUSH(hw);
}
}
/* clear mta_shadow */
memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
/* Clear the old settings from the MTA */
DEBUGOUT("Clearing MTA\n");
for (i = 0; i < hw->mac.mta_reg_count; i++) {
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
E1000_WRITE_FLUSH(hw);
}
/* Load any remaining multicast addresses into the hash table. */
for (; mc_addr_count > 0; mc_addr_count--) {
/* update mta_shadow from mc_addr_list */
for (i = 0; (u32) i < mc_addr_count; i++) {
hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
hw->mac.ops.mta_set(hw, hash_value);
mc_addr_list += ETH_ADDR_LEN;
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
mc_addr_list += (ETH_ADDR_LEN);
}
/* replace the entire MTA table */
for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
E1000_WRITE_FLUSH(hw);
}
/**
@ -1022,7 +1009,7 @@ s32 e1000_setup_link_generic(struct e1000_hw *hw)
hw->fc.current_mode = hw->fc.requested_mode;
DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
hw->fc.current_mode);
hw->fc.current_mode);
/* Call the necessary media_type subroutine to configure the link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -44,7 +44,7 @@ void e1000_null_mac_generic(struct e1000_hw *hw);
s32 e1000_null_ops_generic(struct e1000_hw *hw);
s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
bool e1000_null_mng_mode(struct e1000_hw *hw);
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a, u32 b, u32 c);
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
void e1000_null_mta_set(struct e1000_hw *hw, u32 a);
void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
@ -63,7 +63,6 @@ s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
void e1000_set_lan_id_single_port(struct e1000_hw *hw);
void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
@ -73,8 +72,7 @@ s32 e1000_id_led_init_generic(struct e1000_hw *hw);
s32 e1000_led_on_generic(struct e1000_hw *hw);
s32 e1000_led_off_generic(struct e1000_hw *hw);
void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count);
u8 *mc_addr_list, u32 mc_addr_count);
s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -34,6 +34,8 @@
#include "e1000_api.h"
static void e1000_reload_nvm_generic(struct e1000_hw *hw);
/**
* e1000_init_nvm_ops_generic - Initialize NVM function pointers
* @hw: pointer to the HW structure
@ -815,31 +817,23 @@ s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
**/
s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 offset, nvm_data, i;
u32 rar_high;
u32 rar_low;
u16 i;
DEBUGFUNC("e1000_read_mac_addr");
rar_high = E1000_READ_REG(hw, E1000_RAH(0));
rar_low = E1000_READ_REG(hw, E1000_RAL(0));
for (i = 0; i < ETH_ADDR_LEN; i += 2) {
offset = i >> 1;
ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
if (ret_val) {
DEBUGOUT("NVM Read Error\n");
goto out;
}
hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
}
for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
/* Flip last bit of mac address if we're on second port */
if (hw->bus.func == E1000_FUNC_1)
hw->mac.perm_addr[5] ^= 1;
for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
for (i = 0; i < ETH_ADDR_LEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
out:
return ret_val;
return E1000_SUCCESS;
}
/**
@ -916,7 +910,7 @@ s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
* Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
* extended control register.
**/
void e1000_reload_nvm_generic(struct e1000_hw *hw)
static void e1000_reload_nvm_generic(struct e1000_hw *hw)
{
u32 ctrl_ext;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -61,7 +61,6 @@ s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
void e1000_stop_nvm(struct e1000_hw *hw);
void e1000_release_nvm_generic(struct e1000_hw *hw);
void e1000_reload_nvm_generic(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -237,6 +237,12 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
#if defined(HANKSVILLE_HW) && !defined(NO_PCH_A_SUPPORT)
/* Workaround for Si errata */
if ((hw->phy.type == e1000_phy_lsi) && (hw->revision_id <= 2 ))
msec_delay(10);
#endif /* HANKSVILLE_HW && !NO_PCH_A_SUPPORT */
/*
* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
@ -292,6 +298,12 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
E1000_WRITE_REG(hw, E1000_MDIC, mdic);
#if defined(HANKSVILLE_HW) && !defined(NO_PCH_A_SUPPORT)
/* Workaround for Si errata */
if ((hw->phy.type == e1000_phy_lsi) && (hw->revision_id <= 2))
msec_delay(10);
#endif /* HANKSVILLE_HW && !NO_PCH_A_SUPPORT */
/*
* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
@ -563,8 +575,8 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
if (ret_val)
goto out;
/* For newer PHYs this bit is downshift enable */
if (phy->type == e1000_phy_m88)
/* For BM PHY this bit is downshift enable */
if (phy->type != e1000_phy_bm)
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
/*
@ -1670,16 +1682,16 @@ s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
M88E1000_PSSR_CABLE_LENGTH_SHIFT;
if (index < M88E1000_CABLE_LENGTH_TABLE_SIZE + 1) {
phy->min_cable_length = e1000_m88_cable_length_table[index];
phy->max_cable_length = e1000_m88_cable_length_table[index+1];
phy->cable_length = (phy->min_cable_length +
phy->max_cable_length) / 2;
} else {
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE + 1) {
ret_val = E1000_ERR_PHY;
goto out;
}
phy->min_cable_length = e1000_m88_cable_length_table[index];
phy->max_cable_length = e1000_m88_cable_length_table[index+1];
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
out:
return ret_val;
}
@ -2140,6 +2152,8 @@ s32 e1000_determine_phy_address(struct e1000_hw *hw)
u32 i;
enum e1000_phy_type phy_type = e1000_phy_unknown;
hw->phy.id = phy_type;
for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
hw->phy.addr = phy_addr;
i = 0;
@ -2427,11 +2441,11 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
{
s32 ret_val;
u16 reg = ((u16)offset);
u16 reg = BM_PHY_REG_NUM(offset);
u16 phy_reg = 0;
u8 phy_acquired = 1;
DEBUGFUNC("e1000_read_phy_wakeup_reg_bm");
DEBUGFUNC("e1000_access_phy_wakeup_reg_bm");
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
@ -2484,15 +2498,15 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
if (read) {
/* Read the page 800 value using opcode 0x12 */
ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
data);
data);
} else {
/* Read the page 800 value using opcode 0x12 */
/* Write the page 800 value using opcode 0x12 */
ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
*data);
*data);
}
if (ret_val) {
DEBUGOUT("Could not read data value from page 800\n");
DEBUGOUT("Could not access data value from page 800\n");
goto out;
}

View File

@ -1,6 +1,6 @@
/*****************************************************************************
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -99,14 +99,29 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
#define IGP_PAGE_SHIFT 5
#define PHY_REG_MASK 0x1F
/* BM/HV Specific Registers */
#define BM_PORT_CTRL_PAGE 769
#define BM_PCIE_PAGE 770
#define BM_WUC_PAGE 800
#define BM_WUC_ADDRESS_OPCODE 0x11
#define BM_WUC_DATA_OPCODE 0x12
#define BM_WUC_ENABLE_PAGE 769
#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
#define BM_WUC_ENABLE_REG 17
#define BM_WUC_ENABLE_BIT (1 << 2)
#define BM_WUC_HOST_WU_BIT (1 << 4)
#define PHY_UPPER_SHIFT 21
#define BM_PHY_REG(page, reg) \
(((reg) & MAX_PHY_REG_ADDRESS) |\
(((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
(((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
#define BM_PHY_REG_PAGE(offset) \
((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
#define BM_PHY_REG_NUM(offset) \
((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
(((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
~MAX_PHY_REG_ADDRESS)))
/* BM PHY Copper Specific Control 1 */
#define BM_CS_CTRL1 16
#define BM_CS_CTRL1_ENERGY_DETECT 0x0300 /* Enable Energy Detect */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -62,6 +62,7 @@
#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */
#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */
@ -269,15 +270,6 @@
#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
#define E1000_VFGPRC 0x00F10
#define E1000_VFGORC 0x00F18
#define E1000_VFMPRC 0x00F3C
#define E1000_VFGPTC 0x00F14
#define E1000_VFGOTC 0x00F34
#define E1000_VFGOTLBC 0x00F50
#define E1000_VFGPTLBC 0x00F44
#define E1000_VFGORLBC 0x00F48
#define E1000_VFGPRLBC 0x00F40
#define E1000_LSECTXUT 0x04300 /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
#define E1000_LSECTXPKTE 0x04304 /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
#define E1000_LSECTXPKTP 0x04308 /* LinkSec Protected Tx Packet Count - OutPktsProtected */
@ -387,6 +379,7 @@
#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */
#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */
#define E1000_GCR 0x05B00 /* PCI-Ex Control */
#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */
#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */
#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */
@ -429,7 +422,6 @@
#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
#define E1000_VLVF 0x05D00 /* VLAN Virtual Machine Filter - RW */
#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
@ -440,6 +432,8 @@
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VFVMBMEM(_n) (0x00800 + (_n))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
/* Time Sync */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -93,7 +93,7 @@ int em_display_debug_stats = 0;
/*********************************************************************
* Driver version:
*********************************************************************/
char em_driver_version[] = "6.9.6";
char em_driver_version[] = "6.9.9";
/*********************************************************************
@ -278,10 +278,8 @@ static void em_set_multi(struct adapter *);
static void em_print_hw_stats(struct adapter *);
static void em_update_link_status(struct adapter *);
static int em_get_buf(struct adapter *, int);
static void em_register_vlan(void *, struct ifnet *, u16);
static void em_unregister_vlan(void *, struct ifnet *, u16);
static int em_xmit(struct adapter *, struct mbuf **);
static void em_smartspeed(struct adapter *);
static int em_82547_fifo_workaround(struct adapter *, int);
@ -322,17 +320,19 @@ static void em_irq_fast(void *);
#else
static int em_irq_fast(void *);
#endif
/* MSIX handlers */
static void em_msix_tx(void *);
static void em_msix_rx(void *);
static void em_msix_link(void *);
static void em_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
static void em_handle_rxtx(void *context, int pending);
static void em_handle_rx(void *context, int pending);
static void em_handle_tx(void *context, int pending);
static void em_handle_rxtx(void *context, int pending);
static void em_handle_link(void *context, int pending);
#endif /* EM_LEGACY_IRQ */
static void em_add_rx_process_limit(struct adapter *, const char *,
const char *, int *, int);
#endif /* ~EM_LEGACY_IRQ */
#ifdef DEVICE_POLLING
static poll_handler_t em_poll;
@ -514,8 +514,8 @@ em_attach(device_t dev)
** identified
*/
if ((adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_ich10lan) ||
(adapter->hw.mac.type == e1000_ich9lan)) {
(adapter->hw.mac.type == e1000_ich9lan) ||
(adapter->hw.mac.type == e1000_ich10lan)) {
int rid = EM_BAR_TYPE_FLASH;
adapter->flash = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
@ -644,6 +644,13 @@ em_attach(device_t dev)
adapter->rx_desc_base =
(struct e1000_rx_desc *)adapter->rxdma.dma_vaddr;
/*
** Start from a known state, this is
** important in reading the nvm and
** mac from that.
*/
e1000_reset_hw(&adapter->hw);
/* Make sure we have a good EEPROM before we read from it */
if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
/*
@ -659,13 +666,6 @@ em_attach(device_t dev)
}
}
/* Initialize the hardware */
if (em_hardware_init(adapter)) {
device_printf(dev, "Unable to initialize the hardware\n");
error = EIO;
goto err_hw_init;
}
/* Copy the permanent MAC address out of the EEPROM */
if (e1000_read_mac_addr(&adapter->hw) < 0) {
device_printf(dev, "EEPROM read error while reading MAC"
@ -680,6 +680,13 @@ em_attach(device_t dev)
goto err_hw_init;
}
/* Initialize the hardware */
if (em_hardware_init(adapter)) {
device_printf(dev, "Unable to initialize the hardware\n");
error = EIO;
goto err_hw_init;
}
/* Allocate transmit descriptors and buffers */
if (em_allocate_transmit_structures(adapter)) {
device_printf(dev, "Could not setup transmit structures\n");
@ -1463,8 +1470,6 @@ em_init_locked(struct adapter *adapter)
/* Setup VLAN support, basic and offload if available */
E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
/* New register interface replaces this but
waiting on kernel support to be added */
if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) &&
((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) {
u32 ctrl;
@ -1473,6 +1478,7 @@ em_init_locked(struct adapter *adapter)
E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
}
/* Set hardware offload abilities */
ifp->if_hwassist = 0;
if (adapter->hw.mac.type >= e1000_82543) {
@ -1622,49 +1628,35 @@ em_intr(void *arg)
return;
EM_CORE_LOCK(adapter);
for (;;) {
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
if ((reg_icr == 0xffffffff) || (reg_icr == 0)||
(adapter->hw.mac.type >= e1000_82571 &&
(reg_icr & E1000_ICR_INT_ASSERTED) == 0))
goto out;
if (adapter->hw.mac.type >= e1000_82571 &&
(reg_icr & E1000_ICR_INT_ASSERTED) == 0)
break;
else if (reg_icr == 0)
break;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
goto out;
/*
* XXX: some laptops trigger several spurious interrupts
* on em(4) when in the resume cycle. The ICR register
* reports all-ones value in this case. Processing such
* interrupts would lead to a freeze. I don't know why.
*/
if (reg_icr == 0xffffffff)
break;
EM_TX_LOCK(adapter);
em_txeof(adapter);
em_rxeof(adapter, -1);
em_txeof(adapter);
EM_TX_UNLOCK(adapter);
EM_CORE_UNLOCK(adapter);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
em_rxeof(adapter, -1);
EM_TX_LOCK(adapter);
em_txeof(adapter);
EM_TX_UNLOCK(adapter);
}
EM_CORE_LOCK(adapter);
/* Link status change */
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
callout_stop(&adapter->timer);
adapter->hw.mac.get_link_status = 1;
em_update_link_status(adapter);
/* Deal with TX cruft when link lost */
em_tx_purge(adapter);
callout_reset(&adapter->timer, hz,
em_local_timer, adapter);
}
if (reg_icr & E1000_ICR_RXO)
adapter->rx_overruns++;
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
callout_stop(&adapter->timer);
adapter->hw.mac.get_link_status = 1;
em_update_link_status(adapter);
/* Deal with TX cruft when link lost */
em_tx_purge(adapter);
callout_reset(&adapter->timer, hz,
em_local_timer, adapter);
}
EM_CORE_UNLOCK(adapter);
if (reg_icr & E1000_ICR_RXO)
adapter->rx_overruns++;
out:
EM_CORE_UNLOCK(adapter);
if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start(ifp);
@ -1713,33 +1705,6 @@ em_handle_rxtx(void *context, int pending)
em_enable_intr(adapter);
}
static void
em_handle_rx(void *context, int pending)
{
struct adapter *adapter = context;
struct ifnet *ifp = adapter->ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
(em_rxeof(adapter, adapter->rx_process_limit) != 0))
taskqueue_enqueue(adapter->tq, &adapter->rx_task);
}
static void
em_handle_tx(void *context, int pending)
{
struct adapter *adapter = context;
struct ifnet *ifp = adapter->ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
EM_TX_LOCK(adapter);
em_txeof(adapter);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp);
EM_TX_UNLOCK(adapter);
}
}
/*********************************************************************
*
* Fast Legacy/MSI Combined Interrupt Service routine
@ -1868,6 +1833,33 @@ em_msix_link(void *arg)
EM_MSIX_LINK | E1000_IMS_LSC);
return;
}
static void
em_handle_rx(void *context, int pending)
{
struct adapter *adapter = context;
struct ifnet *ifp = adapter->ifp;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
(em_rxeof(adapter, adapter->rx_process_limit) != 0))
taskqueue_enqueue(adapter->tq, &adapter->rx_task);
}
static void
em_handle_tx(void *context, int pending)
{
struct adapter *adapter = context;
struct ifnet *ifp = adapter->ifp;
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
EM_TX_LOCK(adapter);
em_txeof(adapter);
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
em_start_locked(ifp);
EM_TX_UNLOCK(adapter);
}
}
#endif /* EM_FAST_IRQ */
/*********************************************************************
@ -2468,7 +2460,7 @@ em_set_multi(struct adapter *adapter)
struct ifnet *ifp = adapter->ifp;
struct ifmultiaddr *ifma;
u32 reg_rctl = 0;
u8 mta[512]; /* Largest MTS is 4096 bits */
u8 *mta; /* Multicast array memory */
int mcnt = 0;
IOCTL_DEBUGOUT("em_set_multi: begin");
@ -2483,6 +2475,13 @@ em_set_multi(struct adapter *adapter)
msec_delay(5);
}
/* Allocate temporary memory to setup array */
mta = malloc(sizeof(u8) *
(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (mta == NULL)
panic("em_set_multi memory failure\n");
IF_ADDR_LOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
@ -2502,8 +2501,7 @@ em_set_multi(struct adapter *adapter)
reg_rctl |= E1000_RCTL_MPE;
E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
} else
e1000_update_mc_addr_list(&adapter->hw, mta,
mcnt, 1, adapter->hw.mac.rar_entry_count);
e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
if (adapter->hw.mac.type == e1000_82542 &&
adapter->hw.revision_id == E1000_REVISION_2) {
@ -2514,6 +2512,7 @@ em_set_multi(struct adapter *adapter)
if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
e1000_pci_set_mwi(&adapter->hw);
}
free(mta, M_DEVBUF);
}
@ -2925,6 +2924,7 @@ em_allocate_msix(struct adapter *adapter)
return (0);
}
static void
em_free_pci_resources(struct adapter *adapter)
{
@ -2973,7 +2973,7 @@ em_free_pci_resources(struct adapter *adapter)
}
/*
* Setup MSI/X
* Setup MSI or MSI/X
*/
static int
em_setup_msix(struct adapter *adapter)

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2009, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -172,7 +172,7 @@
#define IGB_DEFAULT_PBA 0x00000030
#define IGB_SMARTSPEED_DOWNSHIFT 3
#define IGB_SMARTSPEED_MAX 15
#define IGB_MAX_INTR 10
#define IGB_MAX_LOOP 10
#define IGB_RX_PTHRESH 16
#define IGB_RX_HTHRESH 8
#define IGB_RX_WTHRESH 1
@ -184,14 +184,18 @@
#define IGB_FC_PAUSE_TIME 0x0680
#define IGB_EEPROM_APME 0x400;
#define MAX_INTS_PER_SEC 8000
#define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256)
/* Code compatilbility between 6 and 7 */
#ifndef ETHER_BPF_MTAP
#define ETHER_BPF_MTAP BPF_MTAP
#endif
#if __FreeBSD_version < 700000
#define CSUM_TSO 0
#define IFCAP_TSO4 0
#define FILTER_STRAY
#define FILTER_HANDLED
#endif
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
@ -230,9 +234,21 @@
#define IGB_MAX_SCATTER 64
#define IGB_TSO_SIZE (65535 + sizeof(struct ether_vlan_header))
#define IGB_TSO_SEG_SIZE 4096 /* Max dma segment size */
#define IGB_HDR_BUF 128
#define ETH_ZLEN 60
#define ETH_ADDR_LEN 6
#define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */
/* Offload bits in mbuf flag */
#if __FreeBSD_version >= 800000
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
#else
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
/* Header split codes for get_buf */
#define IGB_CLEAN_HEADER 1
#define IGB_CLEAN_PAYLOAD 2
#define IGB_CLEAN_BOTH 3
/*
* Interrupt Moderation parameters
@ -305,7 +321,7 @@ struct tx_ring {
u32 next_avail_desc;
u32 next_to_clean;
volatile u16 tx_avail;
struct igb_buffer *tx_buffers;
struct igb_tx_buffer *tx_buffers;
bus_dma_tag_t txtag; /* dma tag for tx */
u32 watchdog_timer;
u64 no_desc_avail;
@ -329,7 +345,7 @@ struct rx_ring {
char mtx_name[16];
u32 last_cleaned;
u32 next_to_check;
struct igb_buffer *rx_buffers;
struct igb_rx_buffer *rx_buffers;
bus_dma_tag_t rxtag; /* dma tag for tx */
bus_dmamap_t rx_spare_map;
/*
@ -344,6 +360,7 @@ struct rx_ring {
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
u64 rx_packets;
u64 rx_bytes;
};
@ -380,6 +397,7 @@ struct adapter {
struct taskqueue *tq; /* private task queue */
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
/* Management and WOL features */
int wol;
int has_manage;
@ -402,15 +420,18 @@ struct adapter {
* Receive rings
*/
struct rx_ring *rx_rings;
bool rx_hdr_split;
u16 num_rx_desc;
u16 num_rx_queues;
int rx_process_limit;
u32 rx_buffer_len;
u32 rx_mbuf_sz;
u32 rx_mask;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_alloc_failed;
unsigned long mbuf_cluster_failed;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
@ -443,12 +464,18 @@ typedef struct _igb_vendor_info_t {
} igb_vendor_info_t;
struct igb_buffer {
struct igb_tx_buffer {
int next_eop; /* Index of the desc to watch */
struct mbuf *m_head;
bus_dmamap_t map; /* bus_dma map for packet */
};
struct igb_rx_buffer {
struct mbuf *m_head;
struct mbuf *m_pack;
bus_dmamap_t map; /* bus_dma map for packet */
};
#define IGB_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF)
#define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)