Update the 1G drivers, shared code sync with Intel,

igb now has a queue notion that has a single interrupt
with an RX/TX pair, this will reduce the total interrupts
seen on a system. Both em and igb have a new watchdog
method. igb has fixes from Pyun Yong-Hyeon that have
improved stability, thank you :)

I wish to MFC this for 7.3 asap, please test if able.
This commit is contained in:
jfv 2010-01-26 22:32:22 +00:00
parent 3967eef496
commit 54091abe67
26 changed files with 1394 additions and 1732 deletions

View File

@ -1,6 +1,6 @@
$FreeBSD$
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -256,8 +256,6 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan;
/* ID LED init */
@ -1048,72 +1046,73 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan");
if (!phy->reset_disable) {
ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
&data);
if (ret_val)
goto out;
if (phy->reset_disable)
goto skip_reset;
data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
/* Use 25MHz for both link down and 1000Base-T for Tx clock. */
data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
&data);
if (ret_val)
goto out;
ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
data);
if (ret_val)
goto out;
data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
/* Use 25MHz for both link down and 1000Base-T for Tx clock. */
data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
/*
* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
* 2 - MDI-X mode
* 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
*/
ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
if (ret_val)
goto out;
ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
data);
if (ret_val)
goto out;
data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
/*
* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
* 2 - MDI-X mode
* 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
*/
ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
if (ret_val)
goto out;
switch (phy->mdix) {
case 1:
data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
break;
case 2:
data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
break;
case 0:
default:
data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
break;
}
/*
* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
* 1 - Enabled
*/
data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
if (phy->disable_polarity_correction)
data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data);
if (ret_val)
goto out;
/* SW Reset the PHY so all changes take effect */
ret_val = hw->phy.ops.commit(hw);
if (ret_val) {
DEBUGOUT("Error Resetting the PHY\n");
goto out;
}
data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
switch (phy->mdix) {
case 1:
data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
break;
case 2:
data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
break;
case 0:
default:
data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
break;
}
/*
* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
* 1 - Enabled
*/
data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
if (phy->disable_polarity_correction)
data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data);
if (ret_val)
goto out;
/* SW Reset the PHY so all changes take effect */
ret_val = hw->phy.ops.commit(hw);
if (ret_val) {
DEBUGOUT("Error Resetting the PHY\n");
goto out;
}
skip_reset:
/* Bypass Rx and Tx FIFO's */
ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -228,8 +228,6 @@ static s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82540;
/* ID LED init */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -260,8 +260,6 @@ static s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82541;
/* ID LED init */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -134,8 +134,6 @@ static s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82542;
/* set RAR */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2008, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -63,7 +63,6 @@ static s32 e1000_led_on_82543(struct e1000_hw *hw);
static s32 e1000_led_off_82543(struct e1000_hw *hw);
static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset,
u32 value);
static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value);
static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw);
static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw);
static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw);
@ -246,8 +245,6 @@ static s32 e1000_init_mac_params_82543(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_82543;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_82543;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82543;
/* turn on/off LED */
@ -1480,45 +1477,6 @@ static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value)
}
}
/**
* e1000_mta_set_82543 - Set multicast filter table address
* @hw: pointer to the HW structure
* @hash_value: determines the MTA register and bit to set
*
* The multicast table address is a register array of 32-bit registers.
* The hash_value is used to determine what register the bit is in, the
* current value is read, the new bit is OR'd in and the new value is
* written back into the register.
**/
static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg, mta, temp;
DEBUGFUNC("e1000_mta_set_82543");
hash_reg = (hash_value >> 5);
/*
* If we are on an 82544 and we are trying to write an odd offset
* in the MTA, save off the previous entry before writing and
* restore the old value after writing.
*/
if ((hw->mac.type == e1000_82544) && (hash_reg & 1)) {
hash_reg &= (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
mta |= (1 << hash_bit);
temp = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg - 1);
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
E1000_WRITE_FLUSH(hw);
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg - 1, temp);
E1000_WRITE_FLUSH(hw);
} else {
e1000_mta_set_generic(hw, hash_value);
}
}
/**
* e1000_led_on_82543 - Turn on SW controllable LED
* @hw: pointer to the HW structure

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -336,8 +336,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_82571;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82571;
/* ID LED init */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -80,8 +80,10 @@ static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
static const u16 e1000_82580_rxpbs_table[] =
@ -122,8 +124,7 @@ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
} else if ((hw->mac.type == e1000_82580) ||
(hw->mac.type == e1000_82580er)) {
} else if (hw->mac.type >= e1000_82580) {
phy->ops.reset = e1000_phy_hw_reset_generic;
phy->ops.read_reg = e1000_read_phy_reg_82580;
phy->ops.write_reg = e1000_write_phy_reg_82580;
@ -273,8 +274,7 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
* if using i2c make certain the MDICNFG register is cleared to prevent
* communications from being misrouted to the mdic registers
*/
if ((ctrl_ext & E1000_CTRL_I2C_ENA) &&
((hw->mac.type == e1000_82580) || (hw->mac.type == e1000_82580er)))
if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
/* Set mta register count */
@ -285,7 +285,7 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
if (mac->type == e1000_82576)
mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
if ((mac->type == e1000_82580) || (mac->type == e1000_82580er))
if (mac->type == e1000_82580)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
/* Set if part includes ASF firmware */
mac->asf_firmware_present = TRUE;
@ -299,7 +299,7 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
/* bus type/speed/width */
mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
/* reset */
if ((mac->type == e1000_82580) || (mac->type == e1000_82580er))
if (mac->type >= e1000_82580)
mac->ops.reset_hw = e1000_reset_hw_82580;
else
mac->ops.reset_hw = e1000_reset_hw_82575;
@ -314,20 +314,22 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
: e1000_setup_serdes_link_82575;
/* physical interface shutdown */
mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
/* physical interface power up */
mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
/* check for link */
mac->ops.check_for_link = e1000_check_for_link_82575;
/* receive address register setting */
mac->ops.rar_set = e1000_rar_set_generic;
/* read mac address */
mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
/* configure collision distance */
mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* writing VFTA */
mac->ops.write_vfta = e1000_write_vfta_generic;
/* clearing VFTA */
mac->ops.clear_vfta = e1000_clear_vfta_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* ID LED init */
mac->ops.id_led_init = e1000_id_led_init_generic;
/* blink LED */
@ -887,6 +889,35 @@ static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
return ret_val;
}
/**
* e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
* @hw: pointer to the HW structure
**/
static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
DEBUGFUNC("e1000_power_up_serdes_link_82575");
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
!e1000_sgmii_active_82575(hw))
return;
/* Enable PCS to turn on link */
reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
reg |= E1000_PCS_CFG_PCS_EN;
E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
/* Power up the laser */
reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
reg &= ~E1000_CTRL_EXT_SDP3_DATA;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* flush the write to verify completion */
E1000_WRITE_FLUSH(hw);
msec_delay(1);
}
/**
* e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
* @hw: pointer to the HW structure
@ -954,28 +985,14 @@ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
u16 eeprom_data = 0;
DEBUGFUNC("e1000_shutdown_serdes_link_82575");
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
!e1000_sgmii_active_82575(hw))
return;
if (hw->bus.func == E1000_FUNC_0)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
else if ((hw->mac.type == e1000_82580) ||
(hw->mac.type == e1000_82580er))
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
else if (hw->bus.func == E1000_FUNC_1)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
/*
* If APM is not enabled in the EEPROM and management interface is
* not enabled, then power down.
*/
if (!(eeprom_data & E1000_NVM_APME_82575) &&
!e1000_enable_mng_pass_thru(hw)) {
if (!e1000_enable_mng_pass_thru(hw)) {
/* Disable PCS to turn off link */
reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
reg &= ~E1000_PCS_CFG_PCS_EN;
@ -1205,16 +1222,10 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
ctrl_reg |= E1000_CTRL_SLU;
if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
/* set both sw defined pins */
/* set both sw defined pins on 82575/82576*/
if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
/* Set switch control to serdes energy detect */
reg = E1000_READ_REG(hw, E1000_CONNSW);
reg |= E1000_CONNSW_ENRGSRC;
E1000_WRITE_REG(hw, E1000_CONNSW, reg);
}
reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
/* default pcs_autoneg to the same setting as mac autoneg */
@ -1268,10 +1279,7 @@ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
@ -1395,6 +1403,28 @@ static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
return ret_val;
}
/**
* e1000_config_collision_dist_82575 - Configure collision distance
* @hw: pointer to the HW structure
*
* Configures the collision distance to the default value and is used
* during link setup.
**/
static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
{
u32 tctl_ext;
DEBUGFUNC("e1000_config_collision_dist_82575");
tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
tctl_ext &= ~E1000_TCTL_EXT_COLD;
tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
E1000_WRITE_FLUSH(hw);
}
/**
* e1000_power_down_phy_copper_82575 - Remove link during PHY power down
* @hw: pointer to the HW structure
@ -1656,7 +1686,6 @@ void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
**/
static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
{
u32 mdicnfg = 0;
s32 ret_val;
DEBUGFUNC("e1000_read_phy_reg_82580");
@ -1665,15 +1694,6 @@ static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
if (ret_val)
goto out;
/*
* We config the phy address in MDICNFG register now. Same bits
* as before. The values in MDIC can be written but will be
* ignored. This allows us to call the old function after
* configuring the PHY address in the new register
*/
mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@ -1692,7 +1712,6 @@ static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
**/
static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
{
u32 mdicnfg = 0;
s32 ret_val;
DEBUGFUNC("e1000_write_phy_reg_82580");
@ -1701,15 +1720,6 @@ static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
if (ret_val)
goto out;
/*
* We config the phy address in MDICNFG register now. Same bits
* as before. The values in MDIC can be written but will be
* ignored. This allows us to call the old function after
* configuring the PHY address in the new register
*/
mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
hw->phy.ops.release(hw);
@ -1717,6 +1727,7 @@ static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
out:
return ret_val;
}
/**
* e1000_reset_hw_82580 - Reset hardware
* @hw: pointer to the HW structure
@ -1822,20 +1833,3 @@ u16 e1000_rxpbs_adjust_82580(u32 data)
return ret_val;
}
/**
* e1000_erfuse_check_82580 - ER Fuse check
* @hw: pointer to the HW structure
*
* This function returns the status of the ER Fuse
**/
s32 e1000_erfuse_check_82580(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
s32 ufuse_reg;
ufuse_reg = E1000_READ_REG(hw, E1000_UFUSE);
if ((ufuse_reg & E1000_ERFUSE) == E1000_ERFUSE)
ret_val = E1000_ERFUSE_FAILURE;
return ret_val;
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -443,6 +443,9 @@ struct e1000_adv_tx_context_desc {
#define E1000_RPLOLR_STRVLAN 0x40000000
#define E1000_RPLOLR_STRCRC 0x80000000
#define E1000_TCTL_EXT_COLD 0x000FFC00
#define E1000_TCTL_EXT_COLD_SHIFT 10
#define E1000_DTXCTL_8023LL 0x0004
#define E1000_DTXCTL_VLAN_ADDED 0x0008
#define E1000_DTXCTL_OOS_ENABLE 0x0010
@ -456,5 +459,4 @@ struct e1000_adv_tx_context_desc {
void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
u16 e1000_rxpbs_adjust_82580(u32 data);
s32 e1000_erfuse_check_82580(struct e1000_hw *);
#endif /* _E1000_82575_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -281,10 +281,6 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82580_COPPER_DUAL:
mac->type = e1000_82580;
break;
case E1000_DEV_ID_82580_ER:
case E1000_DEV_ID_82580_ER_DUAL:
mac->type = e1000_82580er;
break;
default:
/* Should never have loaded on this device */
ret_val = -E1000_ERR_MAC_INIT;
@ -376,7 +372,6 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_82575:
case e1000_82576:
case e1000_82580:
case e1000_82580er:
e1000_init_function_pointers_82575(hw);
break;
default:
@ -759,20 +754,6 @@ s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
return E1000_SUCCESS;
}
/**
* e1000_mta_set - Sets multicast table bit
* @hw: pointer to the HW structure
* @hash_value: Multicast hash value.
*
* This sets the bit in the multicast table corresponding to the
* hash value. This is a function pointer entry point called by drivers.
**/
void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
{
if (hw->mac.ops.mta_set)
hw->mac.ops.mta_set(hw, hash_value);
}
/**
* e1000_hash_mc_addr - Determines address location in multicast table
* @hw: pointer to the HW structure
@ -1251,6 +1232,18 @@ void e1000_power_down_phy(struct e1000_hw *hw)
hw->phy.ops.power_down(hw);
}
/**
* e1000_power_up_fiber_serdes_link - Power up serdes link
* @hw: pointer to the HW structure
*
* Power on the optics and PCS.
**/
void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
{
if (hw->mac.ops.power_up_serdes)
hw->mac.ops.power_up_serdes(hw);
}
/**
* e1000_shutdown_fiber_serdes_link - Remove link during power down
* @hw: pointer to the HW structure

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -47,6 +47,7 @@ extern void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw);
extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
extern void e1000_init_function_pointers_vf(struct e1000_hw *hw);
extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
s32 e1000_set_mac_type(struct e1000_hw *hw);
@ -67,7 +68,6 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
s32 e1000_disable_pcie_master(struct e1000_hw *hw);
void e1000_config_collision_dist(struct e1000_hw *hw);
void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
void e1000_update_mc_addr_list(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -1004,7 +1004,6 @@
#define E1000_ERR_SWFW_SYNC 13
#define E1000_NOT_IMPLEMENTED 14
#define E1000_ERR_MBX 15
#define E1000_ERFUSE_FAILURE 16
/* Loop limit on how long we wait for auto-negotiation to complete */
#define FIBER_LINK_UP_LIMIT 50

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -142,8 +142,6 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
#define E1000_DEV_ID_82580_ER 0x151D
#define E1000_DEV_ID_82580_ER_DUAL 0x151E
#define E1000_REVISION_0 0
#define E1000_REVISION_1 1
#define E1000_REVISION_2 2
@ -187,7 +185,6 @@ enum e1000_mac_type {
e1000_82575,
e1000_82576,
e1000_82580,
e1000_82580er,
e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */
};
@ -603,11 +600,11 @@ struct e1000_mac_operations {
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
void (*shutdown_serdes)(struct e1000_hw *);
void (*power_up_serdes)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*mta_set)(struct e1000_hw *, u32);
void (*config_collision_dist)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8*, u32);
s32 (*read_mac_addr)(struct e1000_hw *);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -125,6 +125,7 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
@ -199,7 +200,21 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->id = e1000_phy_unknown;
e1000_get_phy_id(hw);
ret_val = e1000_get_phy_id(hw);
if (ret_val)
goto out;
if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
/*
* In case the PHY needs to be in mdio slow mode (eg. 82577),
* set slow mode and try to get the PHY id again.
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw);
if (ret_val)
goto out;
ret_val = e1000_get_phy_id(hw);
if (ret_val)
goto out;
}
phy->type = e1000_get_phy_type_from_id(phy->id);
switch (phy->type) {
@ -221,6 +236,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
break;
}
out:
return ret_val;
}
@ -442,8 +458,6 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
/* multicast address update */
mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
/* setting MTA */
mac->ops.mta_set = e1000_mta_set_generic;
/* clear hardware counters */
mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
@ -464,6 +478,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.led_on = e1000_led_on_ich8lan;
mac->ops.led_off = e1000_led_off_ich8lan;
break;
#if defined(NAHUM4) || defined(NAHUM5)
case e1000_pchlan:
/* save PCH revision_id */
e1000_read_pci_cfg(hw, 0x2, &pci_cfg);
@ -478,6 +493,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.led_on = e1000_led_on_pchlan;
mac->ops.led_off = e1000_led_off_pchlan;
break;
#endif /* defined(NAHUM4) || defined(NAHUM5) */
default:
break;
}
@ -596,9 +612,11 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
case e1000_ich10lan:
hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
break;
#if defined(NAHUM4) || defined(NAHUM5)
case e1000_pchlan:
hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
break;
#endif /* defined(NAHUM4) || defined(NAHUM5) */
default:
break;
}
@ -767,9 +785,13 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
s32 ret_val;
s32 ret_val = E1000_SUCCESS;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
!(hw->mac.type == e1000_pchlan))
return ret_val;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
@ -781,95 +803,92 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
* Therefore, after each PHY reset, we will load the
* configuration data out of the NVM manually.
*/
if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) ||
(hw->mac.type == e1000_pchlan)) {
/* Check if SW needs to configure the PHY */
if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
(hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
(hw->mac.type == e1000_pchlan))
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
else
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
(hw->device_id == E1000_DEV_ID_ICH8_IGP_M) ||
(hw->mac.type == e1000_pchlan))
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
else
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
data = E1000_READ_REG(hw, E1000_FEXTNVM);
if (!(data & sw_cfg_mask))
goto out;
data = E1000_READ_REG(hw, E1000_FEXTNVM);
if (!(data & sw_cfg_mask))
goto out;
/* Wait for basic configuration completes before proceeding */
e1000_lan_init_done_ich8lan(hw);
/* Wait for basic configuration completes before proceeding */
e1000_lan_init_done_ich8lan(hw);
/*
* Make sure HW does not configure LCD from PHY
* extended configuration before SW configuration
*/
data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
goto out;
cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
if (!cnf_size)
goto out;
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
#if defined(NAHUM4) || defined(NAHUM5)
if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
(hw->mac.type == e1000_pchlan)) {
/*
* Make sure HW does not configure LCD from PHY
* extended configuration before SW configuration
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
*/
data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
data = E1000_READ_REG(hw, E1000_STRAP);
data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
reg_data);
if (ret_val)
goto out;
cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
if (!cnf_size)
data = E1000_READ_REG(hw, E1000_LEDCTL);
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
(u16)data);
if (ret_val)
goto out;
}
#endif /* defined(NAHUM4) || defined(NAHUM5) */
/* Configure LCD from extended configuration region. */
/* cnf_base_addr is in DWORD */
word_addr = (u16)(cnf_base_addr << 1);
for (i = 0; i < cnf_size; i++) {
ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
&reg_data);
if (ret_val)
goto out;
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1, &reg_addr);
if (ret_val)
goto out;
if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
(hw->mac.type == e1000_pchlan)) {
/*
* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
*/
data = E1000_READ_REG(hw, E1000_STRAP);
data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
reg_data);
if (ret_val)
goto out;
data = E1000_READ_REG(hw, E1000_LEDCTL);
ret_val = e1000_write_phy_reg_hv_locked(hw,
HV_LED_CONFIG,
(u16)data);
if (ret_val)
goto out;
/* Save off the PHY page for future writes. */
if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
phy_page = reg_data;
continue;
}
/* Configure LCD from extended configuration region. */
reg_addr &= PHY_REG_MASK;
reg_addr |= phy_page;
/* cnf_base_addr is in DWORD */
word_addr = (u16)(cnf_base_addr << 1);
for (i = 0; i < cnf_size; i++) {
ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
&reg_data);
if (ret_val)
goto out;
ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
1, &reg_addr);
if (ret_val)
goto out;
/* Save off the PHY page for future writes. */
if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
phy_page = reg_data;
continue;
}
reg_addr &= PHY_REG_MASK;
reg_addr |= phy_page;
ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
reg_data);
if (ret_val)
goto out;
}
ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
reg_data);
if (ret_val)
goto out;
}
out:
@ -1087,6 +1106,26 @@ s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
return hw->phy.ops.write_reg(hw, PHY_REG(768, 25), 0x0444);
}
/**
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
* @hw: pointer to the HW structure
**/
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
{
s32 ret_val;
u16 data;
ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
if (ret_val)
return ret_val;
data |= HV_KMRN_MDIO_SLOW;
ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
return ret_val;
}
/**
* e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
* done after every PHY reset.
@ -1094,10 +1133,18 @@ s32 e1000_hv_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 phy_data;
if (hw->mac.type != e1000_pchlan)
goto out;
/* Set MDIO slow mode before any other MDIO access */
if (hw->phy.type == e1000_phy_82577) {
ret_val = e1000_set_mdio_slow_mode_hv(hw);
if (ret_val)
goto out;
}
/* Hanksville M Phy init for IEEE. */
if ((hw->revision_id == 2) &&
(hw->phy.type == e1000_phy_82577) &&
@ -1186,16 +1233,32 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
hw->phy.addr = 1;
ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
hw->phy.ops.release(hw);
if (ret_val)
goto out;
hw->phy.ops.release(hw);
/*
* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
if (ret_val)
goto out;
/* Workaround for link disconnects on a busy hub in half duplex */
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
goto out;
ret_val = hw->phy.ops.read_reg_locked(hw,
PHY_REG(BM_PORT_CTRL_PAGE, 17),
&phy_data);
if (ret_val)
goto release;
ret_val = hw->phy.ops.write_reg_locked(hw,
PHY_REG(BM_PORT_CTRL_PAGE, 17),
phy_data & 0x00FF);
release:
hw->phy.ops.release(hw);
out:
return ret_val;
}
@ -1256,10 +1319,15 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
/* Allow time for h/w to get to a quiescent state after reset */
msec_delay(10);
if (hw->mac.type == e1000_pchlan) {
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
goto out;
break;
default:
break;
}
/* Dummy read to clear the phy wakeup bit after lcd reset */
@ -1272,8 +1340,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
goto out;
/* Configure the LCD with the OEM bits in NVM */
if (hw->mac.type == e1000_pchlan)
ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
out:
return ret_val;
@ -1972,18 +2039,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
if (ret_val)
goto release;
} else {
old_bank_offset = nvm->flash_bank_size;
new_bank_offset = 0;
ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
if (ret_val)
goto release;
}
for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
@ -2038,8 +2101,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
if (ret_val) {
DEBUGOUT("Flash commit failed.\n");
nvm->ops.release(hw);
goto out;
goto release;
}
/*
@ -2050,19 +2112,15 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
if (ret_val)
goto release;
data &= 0xBFFF;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset * 2 + 1,
(u8)(data >> 8));
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
if (ret_val)
goto release;
/*
* And invalidate the previously valid segment by setting
@ -2072,10 +2130,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
if (ret_val) {
nvm->ops.release(hw);
goto out;
}
if (ret_val)
goto release;
/* Great! Everything worked, we can now clear the cached entries. */
for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
@ -2083,14 +2139,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
release:
nvm->ops.release(hw);
/*
* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
nvm->ops.reload(hw);
msec_delay(10);
if (!ret_val) {
nvm->ops.reload(hw);
msec_delay(10);
}
out:
if (ret_val)
@ -2604,6 +2663,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
if (!ret_val)
e1000_release_swflag_ich8lan(hw);
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
case e1000_pchlan:
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
if (ret_val)
goto out;
break;
default:
break;
}
if (ctrl & E1000_CTRL_PHY_RST)
ret_val = hw->phy.ops.get_cfg_done(hw);
@ -2620,19 +2690,23 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
DEBUGOUT("Auto Read Done did not complete\n");
}
}
#if defined(NAHUM4) || defined(NAHUM5)
/* Dummy read to clear the phy wakeup bit after lcd reset */
#if defined(NAHUM4) && defined(NAHUM5)
if ((hw->mac.type == e1000_pchlan) || (hw->mac.type == e1000_pch2lan))
#else
if (hw->mac.type == e1000_pchlan)
#endif
hw->phy.ops.read_reg(hw, BM_WUC, &reg);
#endif /* defined(NAHUM4) || defined(NAHUM5) */
ret_val = e1000_sw_lcd_config_ich8lan(hw);
if (ret_val)
goto out;
if (hw->mac.type == e1000_pchlan) {
ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
if (ret_val)
goto out;
}
ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
if (ret_val)
goto out;
/*
* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
@ -2648,9 +2722,6 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
kab |= E1000_KABGTXD_BGSQLBIAS;
E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
if (hw->mac.type == e1000_pchlan)
ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
out:
return ret_val;
}
@ -3225,17 +3296,14 @@ void e1000_disable_gig_wol_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_cleanup_led_ich8lan");
if (hw->phy.type == e1000_phy_ife)
ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
0);
else
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
0);
return ret_val;
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
return E1000_SUCCESS;
}
/**
@ -3246,17 +3314,14 @@ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_led_on_ich8lan");
if (hw->phy.type == e1000_phy_ife)
ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
else
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
return ret_val;
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
return E1000_SUCCESS;
}
/**
@ -3267,18 +3332,14 @@ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_led_off_ich8lan");
if (hw->phy.type == e1000_phy_ife)
ret_val = hw->phy.ops.write_reg(hw,
IFE_PHY_SPECIAL_CONTROL_LED,
return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
else
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
return ret_val;
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
return E1000_SUCCESS;
}
/**

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -163,6 +163,10 @@
#define LCD_CFG_PHY_ADDR_BIT 0x0020 /* Phy address bit from LCD Config word */
/* KMRN Mode Control */
#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
#define HV_KMRN_MDIO_SLOW 0x0400
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
/*

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -78,7 +78,6 @@ void e1000_init_mac_ops_generic(struct e1000_hw *hw)
mac->ops.update_mc_addr_list = e1000_null_update_mc;
mac->ops.clear_vfta = e1000_null_mac_generic;
mac->ops.write_vfta = e1000_null_write_vfta;
mac->ops.mta_set = e1000_null_mta_set;
mac->ops.rar_set = e1000_rar_set_generic;
mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
}
@ -143,16 +142,6 @@ void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b)
return;
}
/**
* e1000_null_set_mta - No-op function, return void
* @hw: pointer to the HW structure
**/
void e1000_null_mta_set(struct e1000_hw *hw, u32 a)
{
DEBUGFUNC("e1000_null_mta_set");
return;
}
/**
* e1000_null_rar_set - No-op function, return void
* @hw: pointer to the HW structure
@ -481,42 +470,6 @@ void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
E1000_WRITE_FLUSH(hw);
}
/**
* e1000_mta_set_generic - Set multicast filter table address
* @hw: pointer to the HW structure
* @hash_value: determines the MTA register and bit to set
*
* The multicast table address is a register array of 32-bit registers.
* The hash_value is used to determine what register the bit is in, the
* current value is read, the new bit is OR'd in and the new value is
* written back into the register.
**/
void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg, mta;
DEBUGFUNC("e1000_mta_set_generic");
/*
* The MTA is a register array of 32-bit registers. It is
* treated like an array of (32*mta_reg_count) bits. We want to
* set bit BitArray[hash_value]. So we figure out what register
* the bit is in, read it, OR in the new bit, then write
* back the new value. The (hw->mac.mta_reg_count - 1) serves as a
* mask to bits 31:5 of the hash value which gives us the
* register we're modifying. The hash bit within that register
* is determined by the lower 5 bits of the hash value.
*/
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
mta |= (1 << hash_bit);
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
E1000_WRITE_FLUSH(hw);
}
/**
* e1000_update_mc_addr_list_generic - Update Multicast addresses
* @hw: pointer to the HW structure
@ -560,8 +513,7 @@ void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
* @mc_addr: pointer to a multicast address
*
* Generates a multicast address hash value which is used to determine
* the multicast filter table array address and new table value. See
* e1000_mta_set_generic()
* the multicast filter table array address and new table value.
**/
u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
{
@ -774,7 +726,7 @@ s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
e1000_config_collision_dist_generic(hw);
mac->ops.config_collision_dist(hw);
/*
* Configure Flow Control now that Auto-Neg has completed.
@ -1047,6 +999,7 @@ s32 e1000_setup_link_generic(struct e1000_hw *hw)
**/
s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 ctrl;
s32 ret_val = E1000_SUCCESS;
@ -1057,7 +1010,7 @@ s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
/* Take the link out of reset */
ctrl &= ~E1000_CTRL_LRST;
e1000_config_collision_dist_generic(hw);
mac->ops.config_collision_dist(hw);
ret_val = e1000_commit_fc_settings_generic(hw);
if (ret_val)
@ -1097,8 +1050,7 @@ s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
* Configures the collision distance to the default value and is used
* during link setup. Currently no func pointer exists and all
* implementations are handled in the generic version of this function.
* during link setup.
**/
void e1000_config_collision_dist_generic(struct e1000_hw *hw)
{
@ -1152,7 +1104,7 @@ s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
* link up if we detect a signal. This will allow us to
* communicate with non-autonegotiating link partners.
*/
ret_val = hw->mac.ops.check_for_link(hw);
ret_val = mac->ops.check_for_link(hw);
if (ret_val) {
DEBUGOUT("Error while checking for link\n");
goto out;
@ -1209,7 +1161,7 @@ s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
* Rx Flow control is enabled and Tx Flow control is disabled
* by a software over-ride. Since there really isn't a way to
* advertise that we are capable of Rx Pause ONLY, we will
* advertise that we support both symmetric and asymmetric RX
* advertise that we support both symmetric and asymmetric Rx
* PAUSE. Later, we will disable the adapter's ability to send
* PAUSE frames.
*/
@ -1253,7 +1205,6 @@ s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
**/
s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u32 fcrtl = 0, fcrth = 0;
DEBUGFUNC("e1000_set_fc_watermarks_generic");
@ -1280,7 +1231,7 @@ s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
return ret_val;
return E1000_SUCCESS;
}
/**
@ -1519,7 +1470,7 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
} else {
hw->fc.current_mode = e1000_fc_rx_pause;
DEBUGOUT("Flow Control = "
"RX PAUSE frames only.\r\n");
"Rx PAUSE frames only.\r\n");
}
}
/*
@ -1535,7 +1486,7 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_tx_pause;
DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
DEBUGOUT("Flow Control = Tx PAUSE frames only.\r\n");
}
/*
* For transmitting PAUSE frames ONLY.
@ -1550,7 +1501,7 @@ s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
hw->fc.current_mode = e1000_fc_rx_pause;
DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
DEBUGOUT("Flow Control = Rx PAUSE frames only.\r\n");
} else {
/*
* Per the IEEE spec, at this point flow control
@ -1892,19 +1843,10 @@ s32 e1000_setup_led_generic(struct e1000_hw *hw)
**/
s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
DEBUGFUNC("e1000_cleanup_led_generic");
if (hw->mac.ops.cleanup_led != e1000_cleanup_led_generic) {
ret_val = -E1000_ERR_CONFIG;
goto out;
}
E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
out:
return ret_val;
return E1000_SUCCESS;
}
/**
@ -2063,7 +2005,6 @@ s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
if (!timeout) {
DEBUGOUT("Master requests are pending.\n");
ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
goto out;
}
out:

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -46,7 +46,6 @@ s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
bool e1000_null_mng_mode(struct e1000_hw *hw);
void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
void e1000_null_mta_set(struct e1000_hw *hw, u32 a);
void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
s32 e1000_blink_led_generic(struct e1000_hw *hw);
s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw);
@ -87,7 +86,6 @@ void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
void e1000_config_collision_dist_generic(struct e1000_hw *hw);
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -74,7 +74,7 @@ s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
{
u32 hicr;
s32 ret_val = E1000_SUCCESS;
u8 i;
u8 i;
DEBUGFUNC("e1000_mng_enable_host_if_generic");
@ -112,11 +112,10 @@ s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
**/
bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
{
u32 fwsm;
u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
DEBUGFUNC("e1000_check_mng_mode_generic");
fwsm = E1000_READ_REG(hw, E1000_FWSM);
return (fwsm & E1000_FWSM_MODE_MASK) ==
(E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
@ -136,13 +135,14 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
u32 offset;
s32 ret_val, hdr_csum, csum;
u8 i, len;
bool tx_filter = TRUE;
DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
hw->mac.tx_pkt_filtering = TRUE;
/* No manageability, no filtering */
if (!hw->mac.ops.check_mng_mode(hw)) {
tx_filter = FALSE;
hw->mac.tx_pkt_filtering = FALSE;
goto out;
}
@ -152,7 +152,7 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
*/
ret_val = hw->mac.ops.mng_enable_host_if(hw);
if (ret_val != E1000_SUCCESS) {
tx_filter = FALSE;
hw->mac.tx_pkt_filtering = FALSE;
goto out;
}
@ -171,18 +171,19 @@ bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
if (hdr_csum != csum)
goto out;
if (hdr->signature != E1000_IAMT_SIGNATURE)
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
hw->mac.tx_pkt_filtering = TRUE;
goto out;
}
/* Cookie area is valid, make the final check for filtering. */
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
tx_filter = FALSE;
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
hw->mac.tx_pkt_filtering = FALSE;
goto out;
}
out:
hw->mac.tx_pkt_filtering = tx_filter;
return tx_filter;
return hw->mac.tx_pkt_filtering;
}
/**
@ -342,10 +343,11 @@ s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
}
/**
* e1000_enable_mng_pass_thru - Enable processing of ARP's
* e1000_enable_mng_pass_thru - Check if management passthrough is needed
* @hw: pointer to the HW structure
*
* Verifies the hardware needs to allow ARPs to be processed by the host.
* Verifies the hardware needs to leave interface enabled so that frames can
* be directed to and from the management interface.
**/
bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
{
@ -360,8 +362,7 @@ bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
manc = E1000_READ_REG(hw, E1000_MANC);
if (!(manc & E1000_MANC_RCV_TCO_EN) ||
!(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
if (!(manc & E1000_MANC_RCV_TCO_EN))
goto out;
if (hw->mac.arc_subsystem_valid) {

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -39,8 +39,6 @@
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
@ -82,7 +80,7 @@
/* Mutex used in the shared code */
#define E1000_MUTEX struct mtx
#define E1000_MUTEX_INIT(mutex) mtx_init((mutex), #mutex, \
"E1000 Shared Lock", MTX_DEF)
MTX_NETWORK_LOCK, MTX_DEF)
#define E1000_MUTEX_DESTROY(mutex) mtx_destroy(mutex)
#define E1000_MUTEX_LOCK(mutex) mtx_lock(mutex)
#define E1000_MUTEX_TRYLOCK(mutex) mtx_trylock(mutex)

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -191,32 +191,9 @@ s32 e1000_get_phy_id(struct e1000_hw *hw)
if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
goto out;
/*
* If the PHY ID is still unknown, we may have an 82577
* without link. We will try again after setting Slow MDIC
* mode. No harm in trying again in this case since the PHY
* ID is unknown at this point anyway.
*/
ret_val = phy->ops.acquire(hw);
if (ret_val)
goto out;
ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
if (ret_val)
goto out;
phy->ops.release(hw);
retry_count++;
}
out:
/* Revert to MDIO fast mode, if applicable */
if (retry_count) {
ret_val = phy->ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val = e1000_set_mdio_slow_mode_hv(hw, FALSE);
phy->ops.release(hw);
}
return ret_val;
}
@ -262,6 +239,11 @@ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
DEBUGFUNC("e1000_read_phy_reg_mdic");
if (offset > MAX_PHY_REG_ADDRESS) {
DEBUGOUT1("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
/*
* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
@ -320,6 +302,11 @@ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
DEBUGFUNC("e1000_write_phy_reg_mdic");
if (offset > MAX_PHY_REG_ADDRESS) {
DEBUGOUT1("PHY Address %d is out of range\n", offset);
return -E1000_ERR_PARAM;
}
/*
* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
@ -822,18 +809,17 @@ s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
**/
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_data;
DEBUGFUNC("e1000_copper_link_setup_82577");
if (phy->reset_disable) {
if (hw->phy.reset_disable) {
ret_val = E1000_SUCCESS;
goto out;
}
if (phy->type == e1000_phy_82580) {
if (hw->phy.type == e1000_phy_82580) {
ret_val = hw->phy.ops.reset(hw);
if (ret_val) {
DEBUGOUT("Error resetting the PHY.\n");
@ -842,7 +828,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
}
/* Enable CRS on TX. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
if (ret_val)
goto out;
@ -851,7 +837,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
/* Enable downshift */
phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
out:
return ret_val;
@ -877,7 +863,7 @@ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
goto out;
}
/* Enable CRS on TX. This must be set for half-duplex operation. */
/* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
goto out;
@ -3057,38 +3043,6 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
msec_delay(1);
}
/**
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
* @hw: pointer to the HW structure
* @slow: TRUE for slow mode, FALSE for normal mode
*
* Assumes semaphore already acquired.
**/
s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow)
{
s32 ret_val = E1000_SUCCESS;
u16 data = 0;
/* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */
hw->phy.addr = 1;
ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
(BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
if (ret_val)
goto out;
ret_val = e1000_write_phy_reg_mdic(hw, BM_CS_CTRL1,
(0x2180 | (slow << 10)));
if (ret_val)
goto out;
/* dummy read when reverting to fast mode - throw away result */
if (!slow)
ret_val = e1000_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data);
out:
return ret_val;
}
/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
@ -3106,9 +3060,8 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
bool in_slow_mode = FALSE;
DEBUGFUNC("e1000_read_phy_reg_hv");
DEBUGFUNC("__e1000_read_phy_reg_hv");
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@ -3116,16 +3069,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
return ret_val;
}
/* Workaround failure in MDIO access while cable is disconnected */
if ((hw->phy.type == e1000_phy_82577) &&
!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
if (ret_val)
goto out;
in_slow_mode = TRUE;
}
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@ -3162,10 +3105,6 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
data);
out:
/* Revert to MDIO fast mode, if applicable */
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
ret_val |= e1000_set_mdio_slow_mode_hv(hw, FALSE);
if (!locked)
hw->phy.ops.release(hw);
@ -3217,9 +3156,8 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
s32 ret_val;
u16 page = BM_PHY_REG_PAGE(offset);
u16 reg = BM_PHY_REG_NUM(offset);
bool in_slow_mode = FALSE;
DEBUGFUNC("e1000_write_phy_reg_hv");
DEBUGFUNC("__e1000_write_phy_reg_hv");
if (!locked) {
ret_val = hw->phy.ops.acquire(hw);
@ -3227,16 +3165,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
return ret_val;
}
/* Workaround failure in MDIO access while cable is disconnected */
if ((hw->phy.type == e1000_phy_82577) &&
!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
ret_val = e1000_set_mdio_slow_mode_hv(hw, TRUE);
if (ret_val)
goto out;
in_slow_mode = TRUE;
}
/* Page 800 works differently than the rest so it has its own func */
if (page == BM_WUC_PAGE) {
ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset,
@ -3290,10 +3218,6 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
data);
out:
/* Revert to MDIO fast mode, if applicable */
if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
ret_val |= e1000_set_mdio_slow_mode_hv(hw, FALSE);
if (!locked)
hw->phy.ops.release(hw);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -96,7 +96,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow);
s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
s32 e1000_check_polarity_82577(struct e1000_hw *hw);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -518,5 +518,4 @@
/* PCIe Parity Status Register */
#define E1000_PCIEERRSTS 0x05BA8
#define E1000_ERFUSE 0x00000400
#endif

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -2487,8 +2487,10 @@ em_local_timer(void *arg)
EM_CORE_LOCK_ASSERT(adapter);
#ifndef DEVICE_POLLING
taskqueue_enqueue(adapter->tq,
&adapter->rxtx_task);
#endif
em_update_link_status(adapter);
em_update_stats_counters(adapter);
@ -3132,10 +3134,10 @@ em_setup_interface(device_t dev, struct adapter *adapter)
ifp->if_capabilities |= IFCAP_POLLING;
#endif
/* Enable All WOL methods by default */
/* Limit WOL to MAGIC, not clear others are used */
if (adapter->wol) {
ifp->if_capabilities |= IFCAP_WOL;
ifp->if_capenable |= IFCAP_WOL;
ifp->if_capabilities |= IFCAP_WOL_MAGIC;
ifp->if_capenable |= IFCAP_WOL_MAGIC;
}
/*
@ -4346,7 +4348,7 @@ em_free_receive_structures(struct adapter *adapter)
static int
em_rxeof(struct adapter *adapter, int count)
{
struct ifnet *ifp = adapter->ifp;
struct ifnet *ifp = adapter->ifp;;
struct mbuf *mp;
u8 status, accept_frame = 0, eop = 0;
u16 len, desc_len, prev_len_adj;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -54,7 +54,7 @@
#define EM_MIN_TXD 80
#define EM_MAX_TXD_82543 256
#define EM_MAX_TXD 4096
#define EM_DEFAULT_TXD EM_MAX_TXD_82543
#define EM_DEFAULT_TXD 1024
/*
* EM_RXD - Maximum number of receive Descriptors
@ -72,7 +72,7 @@
#define EM_MIN_RXD 80
#define EM_MAX_RXD_82543 256
#define EM_MAX_RXD 4096
#define EM_DEFAULT_RXD EM_MAX_RXD_82543
#define EM_DEFAULT_RXD 1024
/*
* EM_TIDV - Transmit Interrupt Delay Value
@ -135,9 +135,9 @@
#define EM_RADV 64
/*
* This parameter controls the max duration of transmit watchdog.
* This parameter controls the duration of transmit watchdog.
*/
#define EM_WATCHDOG (5 * hz)
#define EM_WATCHDOG (10 * hz)
/*
* This parameter controls when the driver calls the routine to reclaim
@ -240,6 +240,7 @@
#define ETH_ZLEN 60
#define ETH_ADDR_LEN 6
#define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */
#define M_TSO_LEN 66
/*
* 82574 has a nonstandard address for EIAC
@ -282,111 +283,73 @@ struct em_int_delay_info {
int value; /* Current value in usecs */
};
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
/*
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
*/
struct em_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
struct resource *res;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* Transmit ring: one per queue
*/
struct tx_ring {
struct adapter *adapter;
u32 me;
struct mtx tx_mtx;
char mtx_name[16];
struct em_dma_alloc txdma;
struct e1000_tx_desc *tx_base;
u32 next_avail_desc;
u32 next_to_clean;
volatile u16 tx_avail;
struct em_tx_buffer *tx_buffers;
#if __FreeBSD_version >= 800000
struct buf_ring *br;
struct buf_ring *br;
#endif
struct e1000_hw hw;
bus_dma_tag_t txtag;
/* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
struct device *dev;
u32 bytes;
u32 packets;
struct resource *memory;
struct resource *flash;
struct resource *msix;
struct resource *ioport;
int io_rid;
/* 82574 may use 3 int vectors */
struct resource *res[3];
void *tag[3];
int rid[3];
struct ifmedia media;
struct callout timer;
struct callout tx_fifo_timer;
bool watchdog_check;
int watchdog_time;
int msi;
int if_flags;
int max_frame_size;
int min_frame_size;
struct mtx core_mtx;
struct mtx tx_mtx;
struct mtx rx_mtx;
int em_insert_vlan_header;
/* Task for FAST handling */
struct task link_task;
struct task rxtx_task;
struct task rx_task;
struct task tx_task;
struct taskqueue *tq; /* private task queue */
#if __FreeBSD_version >= 700029
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u32 num_vlans;
#endif
/* Management and WOL features */
u32 wol;
bool has_manage;
bool has_amt;
/* Info about the board itself */
uint8_t link_active;
uint16_t link_speed;
uint16_t link_duplex;
uint32_t smartspeed;
struct em_int_delay_info tx_int_delay;
struct em_int_delay_info tx_abs_int_delay;
struct em_int_delay_info rx_int_delay;
struct em_int_delay_info rx_abs_int_delay;
/*
* Transmit definitions
*
* We have an array of num_tx_desc descriptors (handled
* by the controller) paired with an array of tx_buffers
* (at tx_buffer_area).
* The index of the next available descriptor is next_avail_tx_desc.
* The number of remaining tx_desc is num_tx_desc_avail.
*/
struct em_dma_alloc txdma; /* bus_dma glue for tx desc */
struct e1000_tx_desc *tx_desc_base;
uint32_t next_avail_tx_desc;
uint32_t next_tx_to_clean;
volatile uint16_t num_tx_desc_avail;
uint16_t num_tx_desc;
uint16_t last_hw_offload;
uint32_t txd_cmd;
struct em_buffer *tx_buffer_area;
bus_dma_tag_t txtag; /* dma tag for tx */
uint32_t tx_tso; /* last tx was tso */
/*
* Receive definitions
*
* we have an array of num_rx_desc rx_desc (handled by the
* controller), and paired with an array of rx_buffers
* (at rx_buffer_area).
* The next pair to check on receive is at offset next_rx_desc_to_check
*/
struct em_dma_alloc rxdma; /* bus_dma glue for rx desc */
struct e1000_rx_desc *rx_desc_base;
uint32_t next_rx_desc_to_check;
uint32_t rx_buffer_len;
uint16_t num_rx_desc;
int rx_process_limit;
struct em_buffer *rx_buffer_area;
bus_dma_tag_t rxtag;
bus_dmamap_t rx_sparemap;
bool watchdog_check;
int watchdog_time;
u64 no_desc_avail;
u64 tx_packets;
};
/*
* Receive ring: one per queue
*/
struct rx_ring {
struct adapter *adapter;
u32 me;
struct em_dma_alloc rxdma;
union e1000_adv_rx_desc *rx_base;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
bool discard;
struct mtx rx_mtx;
char mtx_name[16];
u32 last_cleaned;
u32 next_to_check;
struct em_rx_buf *rx_buffers;
bus_dma_tag_t rx_htag; /* dma tag for rx head */
bus_dmamap_t rx_hspare_map;
bus_dma_tag_t rx_ptag; /* dma tag for rx packet */
bus_dmamap_t rx_pspare_map;
/*
* First/last mbuf pointers, for
* collecting multisegment RX packets.
@ -394,19 +357,88 @@ struct adapter {
struct mbuf *fmp;
struct mbuf *lmp;
/* Temporary stats used by AIM */
u32 bytes;
u32 packets;
/* Soft stats */
u64 rx_split_packets;
u64 rx_discarded;
u64 rx_packets;
u64 rx_bytes;
};
struct adapter {
struct ifnet *ifp;
struct e1000_hw hw;
struct e1000_osdep osdep;
struct device *dev;
struct resource *pci_mem;
struct resource *msix_mem;
struct resource *res;
void *tag;
u32 eims_mask;
int linkvec;
int link_mask;
int link_irq;
struct ifmedia media;
struct callout timer;
int msix; /* total vectors allocated */
int if_flags;
int max_frame_size;
int min_frame_size;
struct mtx core_mtx;
int em_insert_vlan_header;
struct task rxtx_task;
struct taskqueue *tq; /* private task queue */
u16 num_queues;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u32 num_vlans;
/* Management and WOL features */
int wol;
int has_manage;
/* Info about the board itself */
u8 link_active;
u16 link_speed;
u16 link_duplex;
u32 smartspeed;
/* Interface queues */
struct em_queue *queues;
/*
* Transmit rings
*/
struct tx_ring *tx_rings;
u16 num_tx_desc;
/*
* Receive rings
*/
struct rx_ring *rx_rings;
bool rx_hdr_split;
u16 num_rx_desc;
int rx_process_limit;
u32 rx_mbuf_sz;
u32 rx_mask;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_alloc_failed;
unsigned long mbuf_cluster_failed;
unsigned long no_tx_desc_avail1;
unsigned long no_tx_desc_avail2;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long rx_overruns;
unsigned long rx_irq;
unsigned long tx_irq;
unsigned long link_irq;
/* 82547 workaround */
uint32_t tx_fifo_size;
@ -416,10 +448,9 @@ struct adapter {
uint64_t tx_fifo_wrk_cnt;
uint32_t tx_head_addr;
/* For 82544 PCIX Workaround */
boolean_t pcix_82544;
boolean_t in_detach;
/* For 82544 PCIX Workaround */
boolean_t pcix_82544;
boolean_t in_detach;
struct e1000_hw_stats stats;
};

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -48,7 +48,7 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_TXD 80
#define IGB_DEFAULT_TXD 1024
#define IGB_DEFAULT_TXD 256
#define IGB_MAX_TXD 4096
/*
@ -63,7 +63,7 @@
* (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
*/
#define IGB_MIN_RXD 80
#define IGB_DEFAULT_RXD 1024
#define IGB_DEFAULT_RXD 256
#define IGB_MAX_RXD 4096
/*
@ -173,10 +173,16 @@
#define IGB_SMARTSPEED_DOWNSHIFT 3
#define IGB_SMARTSPEED_MAX 15
#define IGB_MAX_LOOP 10
#define IGB_RX_PTHRESH 16
#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
#define IGB_RX_HTHRESH 8
#define IGB_RX_WTHRESH 1
#define IGB_TX_PTHRESH 8
#define IGB_TX_HTHRESH 1
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
adapter->msix_mem) ? 1 : 16)
#define MAX_NUM_MULTICAST_ADDRESSES 128
#define PCI_ANY_ID (~0U)
#define ETHER_ALIGN 2
@ -236,12 +242,16 @@
#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP)
#endif
/*
* Interrupt Moderation parameters
*/
#define IGB_LOW_LATENCY 128
#define IGB_AVE_LATENCY 450
#define IGB_BULK_LATENCY 1200
/* Define the starting Interrupt rate per Queue */
#define IGB_INTS_PER_SEC 8000
#define IGB_DEFAULT_ITR 1000000000/(IGB_INTS_PER_SEC * 256)
/* Header split codes for get_buf */
#define IGB_CLEAN_HEADER 0x01
#define IGB_CLEAN_PAYLOAD 0x02
#define IGB_CLEAN_BOTH (IGB_CLEAN_HEADER | IGB_CLEAN_PAYLOAD)
#define IGB_LINK_ITR 2000
/* Precision Time Sync (IEEE 1588) defines */
@ -264,19 +274,33 @@ struct igb_dma_alloc {
/*
* Transmit ring: one per tx queue
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
*/
struct igb_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
struct resource *res;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* Transmit ring: one per queue
*/
struct tx_ring {
struct adapter *adapter;
u32 me;
u32 msix; /* This ring's MSIX vector */
u32 eims; /* This ring's EIMS bit */
struct mtx tx_mtx;
char mtx_name[16];
struct igb_dma_alloc txdma; /* bus_dma glue for tx desc */
struct igb_dma_alloc txdma;
struct e1000_tx_desc *tx_base;
struct task tx_task; /* cleanup tasklet */
struct taskqueue *tq;
u32 next_avail_desc;
u32 next_to_clean;
volatile u16 tx_avail;
@ -284,39 +308,38 @@ struct tx_ring {
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
bus_dma_tag_t txtag; /* dma tag for tx */
struct resource *res;
void *tag;
bus_dma_tag_t txtag;
u32 bytes;
u32 packets;
bool watchdog_check;
int watchdog_time;
u64 no_desc_avail;
u64 tx_irq;
u64 tx_packets;
};
/*
* Receive ring: one per rx queue
* Receive ring: one per queue
*/
struct rx_ring {
struct adapter *adapter;
u32 me;
u32 msix; /* This ring's MSIX vector */
u32 eims; /* This ring's EIMS bit */
struct igb_dma_alloc rxdma; /* bus_dma glue for tx desc */
struct igb_dma_alloc rxdma;
union e1000_adv_rx_desc *rx_base;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
struct task rx_task; /* cleanup tasklet */
struct taskqueue *tq;
bool discard;
struct mtx rx_mtx;
char mtx_name[16];
u32 last_cleaned;
u32 next_to_check;
struct igb_rx_buf *rx_buffers;
bus_dma_tag_t rxtag; /* dma tag for tx */
bus_dmamap_t spare_map;
bus_dma_tag_t rx_htag; /* dma tag for rx head */
bus_dmamap_t rx_hspare_map;
bus_dma_tag_t rx_ptag; /* dma tag for rx packet */
bus_dmamap_t rx_pspare_map;
/*
* First/last mbuf pointers, for
* collecting multisegment RX packets.
@ -325,14 +348,11 @@ struct rx_ring {
struct mbuf *lmp;
u32 bytes;
u32 eitr_setting;
struct resource *res;
void *tag;
u32 packets;
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
u64 rx_discarded;
u64 rx_packets;
u64 rx_bytes;
};
@ -341,7 +361,6 @@ struct adapter {
struct ifnet *ifp;
struct e1000_hw hw;
/* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
struct device *dev;
@ -381,12 +400,14 @@ struct adapter {
u16 link_duplex;
u32 smartspeed;
/* Interface queues */
struct igb_queue *queues;
/*
* Transmit rings
*/
struct tx_ring *tx_rings;
u16 num_tx_desc;
u32 txd_cmd;
/*
* Receive rings
@ -446,22 +467,26 @@ struct igb_tx_buffer {
struct igb_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
bus_dmamap_t map; /* bus_dma map for packet */
bus_dmamap_t head_map; /* bus_dma map for packet */
bus_dmamap_t pack_map; /* bus_dma map for packet */
};
#define IGB_CORE_LOCK_INIT(_sc, _name) \
mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF)
#define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx)
#define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx)
#define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx)
#define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
#define IGB_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED)
#define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx)
#define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx)
#define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx)
#define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx)
#define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
#endif /* _IGB_H_DEFINED_ */