Revert r323008 and its conversion of e1000/iflib to using SX locks.

This seems to be missing something on the 82574L causing NFS root mounts
to hang.

Reported by:	kib
This commit is contained in:
sbruno 2017-08-30 18:56:24 +00:00
parent 5a5e5d4adb
commit 6330970227
13 changed files with 633 additions and 481 deletions

View File

@ -59,6 +59,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw);
static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
@ -67,6 +68,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data);
static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
@ -297,7 +299,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_phy_80003es2lan");
mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
return e1000_acquire_swfw_sync(hw, mask);
return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -313,7 +315,7 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_release_phy_80003es2lan");
mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
e1000_release_swfw_sync(hw, mask);
e1000_release_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -331,7 +333,7 @@ static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
mask = E1000_SWFW_CSR_SM;
return e1000_acquire_swfw_sync(hw, mask);
return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -348,7 +350,7 @@ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
mask = E1000_SWFW_CSR_SM;
e1000_release_swfw_sync(hw, mask);
e1000_release_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -363,14 +365,14 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_80003es2lan");
ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
if (ret_val)
return ret_val;
ret_val = e1000_acquire_nvm_generic(hw);
if (ret_val)
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
return ret_val;
}
@ -386,7 +388,78 @@ static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_release_nvm_80003es2lan");
e1000_release_nvm_generic(hw);
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
}
/**
* e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 i = 0;
s32 timeout = 50;
DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan");
while (i < timeout) {
if (e1000_get_hw_semaphore_generic(hw))
return -E1000_ERR_SWFW_SYNC;
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore_generic(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
return -E1000_ERR_SWFW_SYNC;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
return E1000_SUCCESS;
}
/**
* e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync_80003es2lan");
while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
}
/**

View File

@ -70,8 +70,11 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw,
@ -122,8 +125,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
phy->ops.read_reg = e1000_read_phy_reg_igp;
phy->ops.write_reg = e1000_write_phy_reg_igp;
phy->ops.acquire = e1000_get_hw_semaphore;
phy->ops.release = e1000_put_hw_semaphore;
phy->ops.acquire = e1000_get_hw_semaphore_82571;
phy->ops.release = e1000_put_hw_semaphore_82571;
break;
case e1000_82573:
phy->type = e1000_phy_m88;
@ -135,11 +138,12 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.read_reg = e1000_read_phy_reg_m88;
phy->ops.write_reg = e1000_write_phy_reg_m88;
phy->ops.acquire = e1000_get_hw_semaphore;
phy->ops.release = e1000_put_hw_semaphore;
phy->ops.acquire = e1000_get_hw_semaphore_82571;
phy->ops.release = e1000_put_hw_semaphore_82571;
break;
case e1000_82574:
case e1000_82583:
E1000_MUTEX_INIT(&hw->dev_spec._82571.swflag_mutex);
phy->type = e1000_phy_bm;
phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
@ -502,21 +506,99 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
}
/**
* e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
* e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
s32 sw_timeout = hw->nvm.word_size + 1;
s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore_82571");
/* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
* we try for the semaphore to interwork with this
* older code.
*/
if (hw->dev_spec._82571.smb_counter > 2)
sw_timeout = 1;
/* Get the SW semaphore */
while (i < sw_timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == sw_timeout) {
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
hw->dev_spec._82571.smb_counter++;
}
/* Get the FW semaphore. */
for (i = 0; i < fw_timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == fw_timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_82571(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**
* e1000_put_hw_semaphore_82571 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
DEBUGFUNC("e1000_put_hw_semaphore_generic");
swsm = E1000_READ_REG(hw, E1000_SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
/**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore during reset.
*
**/
static s32
e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
s32 i = 0;
/* XXX assert that mutex is held */
DEBUGFUNC("e1000_get_hw_semaphore_82573");
ASSERT_CTX_LOCK_HELD(hw);
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
do {
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@ -532,7 +614,7 @@ e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
if (i == MDIO_OWNERSHIP_TIMEOUT) {
/* Release semaphores */
e1000_put_hw_semaphore_82574(hw);
e1000_put_hw_semaphore_82573(hw);
DEBUGOUT("Driver can't access the PHY\n");
return -E1000_ERR_PHY;
}
@ -541,24 +623,58 @@ e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
}
/**
* e1000_put_hw_semaphore_82574 - Release hardware semaphore
* e1000_put_hw_semaphore_82573 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used during reset.
*
**/
static void
e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
DEBUGFUNC("e1000_put_hw_semaphore_82574");
DEBUGFUNC("e1000_put_hw_semaphore_82573");
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
}
/**
* e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM.
*
**/
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
{
s32 ret_val;
DEBUGFUNC("e1000_get_hw_semaphore_82574");
E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex);
ret_val = e1000_get_hw_semaphore_82573(hw);
if (ret_val)
E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
return ret_val;
}
/**
* e1000_put_hw_semaphore_82574 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
*
**/
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_put_hw_semaphore_82574");
e1000_put_hw_semaphore_82573(hw);
E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
}
/**
* e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
@ -630,7 +746,7 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_82571");
ret_val = e1000_get_hw_semaphore(hw);
ret_val = e1000_get_hw_semaphore_82571(hw);
if (ret_val)
return ret_val;
@ -643,7 +759,7 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
}
if (ret_val)
e1000_put_hw_semaphore(hw);
e1000_put_hw_semaphore_82571(hw);
return ret_val;
}
@ -659,7 +775,7 @@ static void e1000_release_nvm_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_release_nvm_82571");
e1000_release_nvm_generic(hw);
e1000_put_hw_semaphore(hw);
e1000_put_hw_semaphore_82571(hw);
}
/**
@ -976,6 +1092,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82573:
ret_val = e1000_get_hw_semaphore_82573(hw);
break;
case e1000_82574:
case e1000_82583:
ret_val = e1000_get_hw_semaphore_82574(hw);
@ -992,6 +1110,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Must release MDIO ownership and mutex after MAC reset. */
switch (hw->mac.type) {
case e1000_82573:
/* Release mutex only if the hw semaphore is acquired */
if (!ret_val)
e1000_put_hw_semaphore_82573(hw);
break;
case e1000_82574:
case e1000_82583:
/* Release mutex only if the hw semaphore is acquired */
@ -999,7 +1121,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
e1000_put_hw_semaphore_82574(hw);
break;
default:
panic("unknown mac type %x\n", hw->mac.type);
break;
}

View File

@ -79,9 +79,11 @@ static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
u32 offset, u16 data);
static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
u16 *speed, u16 *duplex);
static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
@ -509,8 +511,12 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
/* link info */
mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
/* acquire SW_FW sync */
mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync;
mac->ops.release_swfw_sync = e1000_release_swfw_sync;
mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
if (mac->type >= e1000_i210) {
mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
}
/* set lan id for port to determine which phy lock to use */
hw->mac.ops.set_lan_id(hw);
@ -982,7 +988,7 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_82575");
ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
if (ret_val)
goto out;
@ -1013,7 +1019,7 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
ret_val = e1000_acquire_nvm_generic(hw);
if (ret_val)
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
out:
return ret_val;
@ -1032,7 +1038,83 @@ static void e1000_release_nvm_82575(struct e1000_hw *hw)
e1000_release_nvm_generic(hw);
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
}
/**
* e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200;
DEBUGFUNC("e1000_acquire_swfw_sync_82575");
while (i < timeout) {
if (e1000_get_hw_semaphore_generic(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore_generic(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
out:
return ret_val;
}
/**
* e1000_release_swfw_sync_82575 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync_82575");
while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
}
/**

View File

@ -934,6 +934,7 @@ struct e1000_dev_spec_82543 {
struct e1000_dev_spec_82571 {
bool laa_is_present;
u32 smb_counter;
E1000_MUTEX swflag_mutex;
};
struct e1000_dev_spec_80003es2lan {
@ -957,6 +958,8 @@ enum e1000_ulp_state {
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
E1000_MUTEX nvm_mutex;
E1000_MUTEX swflag_mutex;
bool nvm_k1_enabled;
bool disable_k1_off;
bool eee_disable;

View File

@ -37,6 +37,7 @@
static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
static void e1000_release_nvm_i210(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
@ -57,7 +58,7 @@ static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_i210");
ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
return ret_val;
}
@ -73,7 +74,152 @@ static void e1000_release_nvm_i210(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_release_nvm_i210");
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
/**
* e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
DEBUGFUNC("e1000_acquire_swfw_sync_i210");
while (i < timeout) {
if (e1000_get_hw_semaphore_i210(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore_generic(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
out:
return ret_val;
}
/**
* e1000_release_swfw_sync_i210 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync_i210");
while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
}
/**
* e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore_i210");
/* Get the SW semaphore */
while (i < timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == timeout) {
/* In rare circumstances, the SW semaphore may already be held
* unintentionally. Clear the semaphore once before giving up.
*/
if (hw->dev_spec._82575.clear_semaphore_once) {
hw->dev_spec._82575.clear_semaphore_once = FALSE;
e1000_put_hw_semaphore_generic(hw);
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
}
}
/* If we do not have the semaphore here, we have to give up. */
if (i == timeout) {
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
return -E1000_ERR_NVM;
}
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_generic(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**

View File

@ -43,6 +43,8 @@ s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 *data);
s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,

View File

@ -694,6 +694,9 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
/* Function Pointers */
nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
nvm->ops.release = e1000_release_nvm_ich8lan;
@ -1844,7 +1847,7 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_acquire_nvm_ich8lan");
ASSERT_CTX_LOCK_HELD(hw);
E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
return E1000_SUCCESS;
}
@ -1859,7 +1862,9 @@ static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_release_nvm_ich8lan");
ASSERT_CTX_LOCK_HELD(hw);
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
return;
}
/**
@ -1876,7 +1881,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_swflag_ich8lan");
ASSERT_CTX_LOCK_HELD(hw);
E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
while (timeout) {
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
@ -1917,6 +1922,9 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
}
out:
if (ret_val)
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
return ret_val;
}
@ -1941,6 +1949,10 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
} else {
DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
}
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
return;
}
/**
@ -5010,6 +5022,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
}
if (!ret_val)
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);

View File

@ -1706,6 +1706,76 @@ s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSED
return E1000_SUCCESS;
}
/**
* e1000_get_hw_semaphore_generic - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
{
u32 swsm;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore_generic");
/* Get the SW semaphore */
while (i < timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
return -E1000_ERR_NVM;
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_generic(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**
* e1000_put_hw_semaphore_generic - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
{
u32 swsm;
DEBUGFUNC("e1000_put_hw_semaphore_generic");
swsm = E1000_READ_REG(hw, E1000_SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
/**
* e1000_get_auto_rd_done_generic - Check for auto read completion
* @hw: pointer to the HW structure
@ -2181,186 +2251,3 @@ s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
return E1000_SUCCESS;
}
/**
* e1000_get_hw_semaphore - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
{
u32 swsm;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore");
#ifdef notyet
/* _82571 */
/* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
* we try for the semaphore to interwork with this
* older code.
*/
if (hw->dev_spec._82571.smb_counter > 2)
sw_timeout = 1;
#endif
/* Get the SW semaphore */
while (i < timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == timeout) {
#ifdef notyet
/*
* XXX This sounds more like a driver bug whereby we either
* recursed accidentally or missed clearing it previously
*/
/* In rare circumstances, the SW semaphore may already be held
* unintentionally. Clear the semaphore once before giving up.
*/
if (hw->dev_spec._82575.clear_semaphore_once) {
hw->dev_spec._82575.clear_semaphore_once = FALSE;
e1000_put_hw_semaphore_generic(hw);
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
}
}
#endif
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
return -E1000_ERR_NVM;
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
e1000_put_hw_semaphore(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**
* e1000_put_hw_semaphore - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
void e1000_put_hw_semaphore(struct e1000_hw *hw)
{
u32 swsm;
DEBUGFUNC("e1000_put_hw_semaphore");
swsm = E1000_READ_REG(hw, E1000_SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
/**
* e1000_acquire_swfw_sync - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
s32
e1000_acquire_swfw_sync(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200;
DEBUGFUNC("e1000_acquire_swfw_sync");
ASSERT_NO_LOCKS();
while (i < timeout) {
if (e1000_get_hw_semaphore(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore(hw);
out:
return ret_val;
}
/**
* e1000_release_swfw_sync - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
void
e1000_release_swfw_sync(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync");
while (e1000_get_hw_semaphore(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore(hw);
}

View File

@ -60,6 +60,7 @@ s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
void e1000_set_lan_id_single_port(struct e1000_hw *hw);
void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
@ -84,15 +85,11 @@ void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
void e1000_reset_adaptive_generic(struct e1000_hw *hw);
void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
void e1000_update_adaptive_generic(struct e1000_hw *hw);
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
s32 e1000_get_hw_semaphore(struct e1000_hw *hw);
void e1000_put_hw_semaphore(struct e1000_hw *hw);
s32 e1000_acquire_swfw_sync(struct e1000_hw *hw, u16 mask);
void e1000_release_swfw_sync(struct e1000_hw *hw, u16 mask);
#endif

View File

@ -39,7 +39,6 @@
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
@ -48,14 +47,6 @@
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/iflib.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
@ -67,41 +58,11 @@
#define ASSERT(x) if(!(x)) panic("EM: x")
#define us_scale(x) max(1, (x/(1000000/hz)))
static inline int
ms_scale(int x) {
if (hz == 1000) {
return (x);
} else if (hz > 1000) {
return (x*(hz/1000));
} else {
return (max(1, x/(1000/hz)));
}
}
extern int cold;
static inline void
safe_pause_us(int x) {
if (cold) {
DELAY(x);
} else {
pause("e1000_delay", max(1, x/(1000000/hz)));
}
}
static inline void
safe_pause_ms(int x) {
if (cold) {
DELAY(x*1000);
} else {
pause("e1000_delay", ms_scale(x));
}
}
#define usec_delay(x) safe_pause_us(x)
#define usec_delay(x) DELAY(x)
#define usec_delay_irq(x) usec_delay(x)
#define msec_delay(x) safe_pause_ms(x)
#define msec_delay_irq(x) msec_delay(x)
#define msec_delay(x) DELAY(1000*(x))
#define msec_delay_irq(x) DELAY(1000*(x))
/* Enable/disable debugging statements in shared code */
#define DBG 0
@ -120,6 +81,16 @@ safe_pause_ms(int x) {
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
/* Mutex used in the shared code */
#define E1000_MUTEX struct mtx
#define E1000_MUTEX_INIT(mutex) mtx_init((mutex), #mutex, \
MTX_NETWORK_LOCK, \
MTX_DEF | MTX_DUPOK)
#define E1000_MUTEX_DESTROY(mutex) mtx_destroy(mutex)
#define E1000_MUTEX_LOCK(mutex) mtx_lock(mutex)
#define E1000_MUTEX_TRYLOCK(mutex) mtx_trylock(mutex)
#define E1000_MUTEX_UNLOCK(mutex) mtx_unlock(mutex)
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
@ -145,12 +116,6 @@ typedef int8_t s8;
#endif
#endif /*__FreeBSD_version < 800000 */
#ifdef INVARIANTS
#define ASSERT_CTX_LOCK_HELD(hw) (sx_assert(iflib_ctx_lock_get(((struct e1000_osdep *)hw->back)->ctx), SX_XLOCKED))
#else
#define ASSERT_CTX_LOCK_HELD(hw)
#endif
#if defined(__i386__) || defined(__amd64__)
static __inline
void prefetch(void *x)
@ -170,7 +135,6 @@ struct e1000_osdep
bus_space_tag_t flash_bus_space_tag;
bus_space_handle_t flash_bus_space_handle;
device_t dev;
if_ctx_t ctx;
};
#define E1000_REGISTER(hw, reg) (((hw)->mac.type >= e1000_82543) \
@ -252,22 +216,5 @@ struct e1000_osdep
bus_space_write_2(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \
((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value)
#if defined(INVARIANTS)
#include <sys/proc.h>
#define ASSERT_NO_LOCKS() \
do { \
int unknown_locks = curthread->td_locks - mtx_owned(&Giant); \
if (unknown_locks > 0) { \
WITNESS_WARN(WARN_GIANTOK|WARN_SLEEPOK|WARN_PANIC, NULL, "unexpected non-sleepable lock"); \
} \
MPASS(curthread->td_rw_rlocks == 0); \
MPASS(curthread->td_lk_slocks == 0); \
} while (0)
#else
#define ASSERT_NO_LOCKS()
#endif
#endif /* _FREEBSD_OS_H_ */

View File

@ -717,7 +717,7 @@ em_if_attach_pre(if_ctx_t ctx)
return (ENXIO);
}
adapter->ctx = adapter->osdep.ctx = ctx;
adapter->ctx = ctx;
adapter->dev = adapter->osdep.dev = dev;
scctx = adapter->shared = iflib_get_softc_ctx(ctx);
adapter->media = iflib_get_media(ctx);
@ -1664,6 +1664,13 @@ em_if_timer(if_ctx_t ctx, uint16_t qid)
return;
iflib_admin_intr_deferred(ctx);
/* Reset LAA into RAR[0] on 82571 */
if ((adapter->hw.mac.type == e1000_82571) &&
e1000_get_laa_state_82571(&adapter->hw))
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
if (adapter->hw.mac.type < em_mac_min)
lem_smartspeed(adapter);
/* Mask to use in the irq trigger */
if (adapter->intr_type == IFLIB_INTR_MSIX) {
@ -1774,14 +1781,6 @@ em_if_update_admin_status(if_ctx_t ctx)
}
em_update_stats_counters(adapter);
/* Reset LAA into RAR[0] on 82571 */
if ((adapter->hw.mac.type == e1000_82571) &&
e1000_get_laa_state_82571(&adapter->hw))
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
if (adapter->hw.mac.type < em_mac_min)
lem_smartspeed(adapter);
E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC);
}

View File

@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/taskqueue.h>
#include <sys/limits.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
@ -156,7 +157,7 @@ struct iflib_ctx {
if_shared_ctx_t ifc_sctx;
struct if_softc_ctx ifc_softc_ctx;
struct sx ifc_sx;
struct mtx ifc_mtx;
uint16_t ifc_nhwtxqs;
uint16_t ifc_nhwrxqs;
@ -526,11 +527,12 @@ rxd_info_zero(if_rxd_info_t ri)
#define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING))
#define CTX_LOCK_INIT(_sc, _name) sx_init(&(_sc)->ifc_sx, _name)
#define CTX_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_mtx, _name, "iflib ctx lock", MTX_DEF)
#define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_mtx)
#define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_mtx)
#define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_mtx)
#define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_sx)
#define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_sx)
#define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_sx)
#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
@ -687,14 +689,7 @@ iflib_debug_reset(void)
static void iflib_debug_reset(void) {}
#endif
typedef void async_gtask_fn_t(if_ctx_t ctx, void *arg);
struct async_task_arg {
async_gtask_fn_t *ata_fn;
if_ctx_t ata_ctx;
void *ata_arg;
struct grouptask *ata_gtask;
};
#define IFLIB_DEBUG 0
@ -716,12 +711,6 @@ static void iflib_ifmp_purge(iflib_txq_t txq);
static void _iflib_pre_assert(if_softc_ctx_t scctx);
static void iflib_stop(if_ctx_t ctx);
static void iflib_if_init_locked(if_ctx_t ctx);
static int async_if_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
static int iflib_config_async_gtask_dispatch(if_ctx_t ctx, async_gtask_fn_t *fn, char *name, void *arg);
static void iflib_admin_reset_deferred(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf * iflib_fixup_rx(struct mbuf *m);
#endif
@ -1104,12 +1093,13 @@ iflib_netmap_intr(struct netmap_adapter *na, int onoff)
struct ifnet *ifp = na->ifp;
if_ctx_t ctx = ifp->if_softc;
/* XXX - do we need synchronization here?*/
CTX_LOCK(ctx);
if (onoff) {
IFDI_INTR_ENABLE(ctx);
} else {
IFDI_INTR_DISABLE(ctx);
}
CTX_UNLOCK(ctx);
}
@ -2100,25 +2090,6 @@ iflib_rx_sds_free(iflib_rxq_t rxq)
}
}
/* CONFIG context only */
static void
iflib_handle_hang(if_ctx_t ctx, void *arg)
{
iflib_txq_t txq = arg;
CTX_LOCK(ctx);
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n",
txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
IFDI_WATCHDOG_RESET(ctx);
ctx->ifc_watchdog_events++;
ctx->ifc_flags |= IFC_DO_RESET;
iflib_admin_intr_deferred(ctx);
CTX_UNLOCK(ctx);
}
/*
* MI independent logic
*
@ -2155,7 +2126,17 @@ iflib_timer(void *arg)
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
return;
hung:
iflib_config_async_gtask_dispatch(ctx, iflib_handle_hang, "hang handler", txq);
CTX_LOCK(ctx);
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n",
txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
IFDI_WATCHDOG_RESET(ctx);
ctx->ifc_watchdog_events++;
ctx->ifc_flags |= IFC_DO_RESET;
iflib_admin_intr_deferred(ctx);
CTX_UNLOCK(ctx);
}
static void
@ -2221,7 +2202,6 @@ iflib_init_locked(if_ctx_t ctx)
txq->ift_timer.c_cpu);
}
/* CONFIG context only */
static int
iflib_media_change(if_t ifp)
{
@ -2235,19 +2215,17 @@ iflib_media_change(if_t ifp)
return (err);
}
/* CONFIG context only */
static void
iflib_media_status(if_t ifp, struct ifmediareq *ifmr)
{
if_ctx_t ctx = if_getsoftc(ifp);
iflib_admin_intr_deferred(ctx);
CTX_LOCK(ctx);
IFDI_UPDATE_ADMIN_STATUS(ctx);
IFDI_MEDIA_STATUS(ctx, ifmr);
CTX_UNLOCK(ctx);
}
/* CONFIG context only */
static void
iflib_stop(if_ctx_t ctx)
{
@ -2262,7 +2240,9 @@ iflib_stop(if_ctx_t ctx)
if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING);
IFDI_INTR_DISABLE(ctx);
DELAY(1000);
IFDI_STOP(ctx);
DELAY(1000);
iflib_debug_reset();
/* Wait for current tx queue users to exit to disarm watchdog timer. */
@ -2601,7 +2581,10 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
return true;
return (iflib_rxd_avail(ctx, rxq, *cidxp, 1));
err:
iflib_admin_reset_deferred(ctx);
CTX_LOCK(ctx);
ctx->ifc_flags |= IFC_DO_RESET;
iflib_admin_intr_deferred(ctx);
CTX_UNLOCK(ctx);
return (false);
}
@ -3564,16 +3547,19 @@ _task_fn_rx(void *context)
GROUPTASK_ENQUEUE(&rxq->ifr_task);
}
/* CONFIG context only */
static void
_task_fn_admin(void *context)
{
if_ctx_t ctx = context;
if_softc_ctx_t sctx = &ctx->ifc_softc_ctx;
iflib_txq_t txq;
int i, running;
int i;
running = !!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING);
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) {
if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) {
return;
}
}
CTX_LOCK(ctx);
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
@ -3581,27 +3567,23 @@ _task_fn_admin(void *context)
callout_stop(&txq->ift_timer);
CALLOUT_UNLOCK(txq);
}
if (running) {
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer,
txq, txq->ift_timer.c_cpu);
IFDI_LINK_INTR_ENABLE(ctx);
}
IFDI_UPDATE_ADMIN_STATUS(ctx);
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
IFDI_LINK_INTR_ENABLE(ctx);
if (ctx->ifc_flags & IFC_DO_RESET) {
ctx->ifc_flags &= ~IFC_DO_RESET;
iflib_if_init_locked(ctx);
}
IFDI_UPDATE_ADMIN_STATUS(ctx);
CTX_UNLOCK(ctx);
if (LINK_ACTIVE(ctx) == 0 || !running)
if (LINK_ACTIVE(ctx) == 0)
return;
for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
}
/* CONFIG context only */
static void
_task_fn_iov(void *context)
{
@ -3731,7 +3713,6 @@ iflib_if_transmit(if_t ifp, struct mbuf *m)
return (err);
}
/* CONFIG context only */
static void
iflib_if_qflush(if_t ifp)
{
@ -3815,12 +3796,29 @@ iflib_if_ioctl(if_t ifp, u_long command, caddr_t data)
CTX_UNLOCK(ctx);
break;
case SIOCSIFFLAGS:
err = async_if_ioctl(ctx, command, data);
CTX_LOCK(ctx);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
}
} else
reinit = 1;
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
iflib_stop(ctx);
}
ctx->ifc_if_flags = if_getflags(ifp);
CTX_UNLOCK(ctx);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
err = async_if_ioctl(ctx, command, data);
CTX_LOCK(ctx);
IFDI_INTR_DISABLE(ctx);
IFDI_MULTI_SET(ctx);
IFDI_INTR_ENABLE(ctx);
CTX_UNLOCK(ctx);
}
break;
case SIOCSIFMEDIA:
@ -3914,7 +3912,6 @@ iflib_if_get_counter(if_t ifp, ift_counter cnt)
*
**********************************************************************/
/* CONFIG context only */
static void
iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
{
@ -3934,7 +3931,6 @@ iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag)
CTX_UNLOCK(ctx);
}
/* CONFIG context only */
static void
iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
{
@ -3954,7 +3950,6 @@ iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag)
CTX_UNLOCK(ctx);
}
/* CONFIG context only */
static void
iflib_led_func(void *arg, int onoff)
{
@ -4099,10 +4094,8 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i];
}
}
CTX_LOCK(ctx);
err = IFDI_ATTACH_PRE(ctx);
CTX_UNLOCK(ctx);
if (err) {
if ((err = IFDI_ATTACH_PRE(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err);
return (err);
}
@ -4142,7 +4135,6 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
if (!powerof2(scctx->isc_nrxd[i])) {
/* round down instead? */
device_printf(dev, "# rx descriptors must be a power of 2\n");
err = EINVAL;
goto fail;
}
@ -4240,10 +4232,7 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
}
}
ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac);
CTX_LOCK(ctx);
err = IFDI_ATTACH_POST(ctx);
CTX_UNLOCK(ctx);
if (err) {
if ((err = IFDI_ATTACH_POST(ctx)) != 0) {
device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err);
goto fail_detach;
}
@ -4265,9 +4254,7 @@ fail_intr_free:
fail_queues:
/* XXX free queues */
fail:
CTX_LOCK(ctx);
IFDI_DETACH(ctx);
CTX_UNLOCK(ctx);
return (err);
}
@ -5125,22 +5112,6 @@ iflib_admin_intr_deferred(if_ctx_t ctx)
GROUPTASK_ENQUEUE(&ctx->ifc_admin_task);
}
/* CONFIG context only */
static void
iflib_handle_reset(if_ctx_t ctx, void *arg)
{
CTX_LOCK(ctx);
ctx->ifc_flags |= IFC_DO_RESET;
iflib_admin_intr_deferred(ctx);
CTX_UNLOCK(ctx);
}
static void
iflib_admin_reset_deferred(if_ctx_t ctx)
{
iflib_config_async_gtask_dispatch(ctx, iflib_handle_reset, "reset handler", NULL);
}
void
iflib_iov_intr_deferred(if_ctx_t ctx)
{
@ -5164,101 +5135,11 @@ iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn,
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
}
static void
iflib_multi_set(if_ctx_t ctx, void *arg)
{
CTX_LOCK(ctx);
IFDI_INTR_DISABLE(ctx);
IFDI_MULTI_SET(ctx);
IFDI_INTR_ENABLE(ctx);
CTX_UNLOCK(ctx);
}
static void
iflib_flags_set(if_ctx_t ctx, void *arg)
{
int reinit, err;
if_t ifp = ctx->ifc_ifp;
err = reinit = 0;
CTX_LOCK(ctx);
if (if_getflags(ifp) & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
if ((if_getflags(ifp) ^ ctx->ifc_if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
err = IFDI_PROMISC_SET(ctx, if_getflags(ifp));
}
} else
reinit = 1;
} else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
iflib_stop(ctx);
}
ctx->ifc_if_flags = if_getflags(ifp);
if (reinit)
iflib_if_init_locked(ctx);
CTX_UNLOCK(ctx);
if (err)
log(LOG_WARNING, "IFDI_PROMISC_SET returned %d\n", err);
}
static void
async_gtask(void *ctx)
{
struct async_task_arg *at_arg = ctx;
if_ctx_t if_ctx = at_arg->ata_ctx;
void *arg = at_arg->ata_arg;
at_arg->ata_fn(if_ctx, arg);
taskqgroup_detach(qgroup_if_config_tqg, at_arg->ata_gtask);
free(at_arg->ata_gtask, M_IFLIB);
}
static int
iflib_config_async_gtask_dispatch(if_ctx_t ctx, async_gtask_fn_t *fn, char *name, void *arg)
{
struct grouptask *gtask;
struct async_task_arg *at_arg;
if ((gtask = malloc(sizeof(struct grouptask) + sizeof(struct async_task_arg), M_IFLIB, M_NOWAIT|M_ZERO)) == NULL)
return (ENOMEM);
at_arg = (struct async_task_arg *)(gtask + 1);
at_arg->ata_fn = fn;
at_arg->ata_ctx = ctx;
at_arg->ata_arg = arg;
at_arg->ata_gtask = gtask;
GROUPTASK_INIT(gtask, 0, async_gtask, at_arg);
taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name);
GROUPTASK_ENQUEUE(gtask);
return (0);
}
static int
async_if_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
{
int rc;
switch (command) {
case SIOCADDMULTI:
case SIOCDELMULTI:
rc = iflib_config_async_gtask_dispatch(ctx, iflib_multi_set, "async_if_multi", NULL);
break;
case SIOCSIFFLAGS:
rc = iflib_config_async_gtask_dispatch(ctx, iflib_flags_set, "async_if_flags", NULL);
break;
default:
panic("unknown command %lx", command);
}
return (rc);
}
void
iflib_config_gtask_deinit(struct grouptask *gtask)
{
taskqgroup_detach(qgroup_if_config_tqg, gtask);
taskqgroup_detach(qgroup_if_config_tqg, gtask);
}
void
@ -5325,11 +5206,11 @@ iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name,
info, 0, iflib_sysctl_int_delay, "I", description);
}
struct sx *
struct mtx *
iflib_ctx_lock_get(if_ctx_t ctx)
{
return (&ctx->ifc_sx);
return (&ctx->ifc_mtx);
}
static int

View File

@ -381,7 +381,7 @@ int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, i
void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count);
struct sx *iflib_ctx_lock_get(if_ctx_t);
struct mtx *iflib_ctx_lock_get(if_ctx_t);
struct mtx *iflib_qset_lock_get(if_ctx_t, uint16_t);
void iflib_led_create(if_ctx_t ctx);