Revert r323516 (iflib rollup)

This was really too big of a commit even if everything worked, but there
are multiple new issues introduced in the one huge commit, so it's not
worth keeping this until it's fixed.

I'll work on splitting this up into logical chunks and introduce them one
at a time over the next week or two.

Approved by:	sbruno (mentor)
Sponsored by:	Limelight Networks
This commit is contained in:
Stephen Hurd 2017-09-16 02:41:38 +00:00
parent 9fb35c8d7b
commit ab2e3f7958
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=323635
19 changed files with 1017 additions and 1534 deletions

View File

@ -1640,8 +1640,7 @@ bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
}
for (i=0; i<softc->scctx->isc_ntxqsets; i++)
/* TODO: Benchmark and see if tying to the RX irqs helps */
iflib_softirq_alloc_generic(ctx, -1, IFLIB_INTR_TX, NULL, i,
iflib_softirq_alloc_generic(ctx, i + 1, IFLIB_INTR_TX, NULL, i,
"tx_cp");
return rc;

View File

@ -59,6 +59,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw);
static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
@ -67,6 +68,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data);
static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
@ -297,7 +299,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_phy_80003es2lan");
mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
return e1000_acquire_swfw_sync(hw, mask);
return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -313,7 +315,7 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_release_phy_80003es2lan");
mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
e1000_release_swfw_sync(hw, mask);
e1000_release_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -331,7 +333,7 @@ static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
mask = E1000_SWFW_CSR_SM;
return e1000_acquire_swfw_sync(hw, mask);
return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -348,7 +350,7 @@ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
mask = E1000_SWFW_CSR_SM;
e1000_release_swfw_sync(hw, mask);
e1000_release_swfw_sync_80003es2lan(hw, mask);
}
/**
@ -363,14 +365,14 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_80003es2lan");
ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
if (ret_val)
return ret_val;
ret_val = e1000_acquire_nvm_generic(hw);
if (ret_val)
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
return ret_val;
}
@ -386,7 +388,78 @@ static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_release_nvm_80003es2lan");
e1000_release_nvm_generic(hw);
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
}
/**
* e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 i = 0;
s32 timeout = 50;
DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan");
while (i < timeout) {
if (e1000_get_hw_semaphore_generic(hw))
return -E1000_ERR_SWFW_SYNC;
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore_generic(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
return -E1000_ERR_SWFW_SYNC;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
return E1000_SUCCESS;
}
/**
* e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync_80003es2lan");
while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
}
/**

View File

@ -70,8 +70,11 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw,
@ -122,8 +125,8 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
phy->ops.read_reg = e1000_read_phy_reg_igp;
phy->ops.write_reg = e1000_write_phy_reg_igp;
phy->ops.acquire = e1000_get_hw_semaphore;
phy->ops.release = e1000_put_hw_semaphore;
phy->ops.acquire = e1000_get_hw_semaphore_82571;
phy->ops.release = e1000_put_hw_semaphore_82571;
break;
case e1000_82573:
phy->type = e1000_phy_m88;
@ -135,11 +138,12 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.read_reg = e1000_read_phy_reg_m88;
phy->ops.write_reg = e1000_write_phy_reg_m88;
phy->ops.acquire = e1000_get_hw_semaphore;
phy->ops.release = e1000_put_hw_semaphore;
phy->ops.acquire = e1000_get_hw_semaphore_82571;
phy->ops.release = e1000_put_hw_semaphore_82571;
break;
case e1000_82574:
case e1000_82583:
E1000_MUTEX_INIT(&hw->dev_spec._82571.swflag_mutex);
phy->type = e1000_phy_bm;
phy->ops.get_cfg_done = e1000_get_cfg_done_generic;
@ -502,21 +506,99 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
}
/**
* e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
* e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
s32 sw_timeout = hw->nvm.word_size + 1;
s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore_82571");
/* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
* we try for the semaphore to interwork with this
* older code.
*/
if (hw->dev_spec._82571.smb_counter > 2)
sw_timeout = 1;
/* Get the SW semaphore */
while (i < sw_timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == sw_timeout) {
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
hw->dev_spec._82571.smb_counter++;
}
/* Get the FW semaphore. */
for (i = 0; i < fw_timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == fw_timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_82571(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**
* e1000_put_hw_semaphore_82571 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
DEBUGFUNC("e1000_put_hw_semaphore_generic");
swsm = E1000_READ_REG(hw, E1000_SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
/**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore during reset.
*
**/
static s32
e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
s32 i = 0;
/* XXX assert that mutex is held */
DEBUGFUNC("e1000_get_hw_semaphore_82573");
ASSERT_CTX_LOCK_HELD(hw);
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
do {
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@ -532,7 +614,7 @@ e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
if (i == MDIO_OWNERSHIP_TIMEOUT) {
/* Release semaphores */
e1000_put_hw_semaphore_82574(hw);
e1000_put_hw_semaphore_82573(hw);
DEBUGOUT("Driver can't access the PHY\n");
return -E1000_ERR_PHY;
}
@ -541,24 +623,58 @@ e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
}
/**
* e1000_put_hw_semaphore_82574 - Release hardware semaphore
* e1000_put_hw_semaphore_82573 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used during reset.
*
**/
static void
e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
DEBUGFUNC("e1000_put_hw_semaphore_82574");
DEBUGFUNC("e1000_put_hw_semaphore_82573");
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
}
/**
* e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM.
*
**/
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
{
s32 ret_val;
DEBUGFUNC("e1000_get_hw_semaphore_82574");
E1000_MUTEX_LOCK(&hw->dev_spec._82571.swflag_mutex);
ret_val = e1000_get_hw_semaphore_82573(hw);
if (ret_val)
E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
return ret_val;
}
/**
* e1000_put_hw_semaphore_82574 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
*
**/
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_put_hw_semaphore_82574");
e1000_put_hw_semaphore_82573(hw);
E1000_MUTEX_UNLOCK(&hw->dev_spec._82571.swflag_mutex);
}
/**
* e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
@ -630,7 +746,7 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_82571");
ret_val = e1000_get_hw_semaphore(hw);
ret_val = e1000_get_hw_semaphore_82571(hw);
if (ret_val)
return ret_val;
@ -643,7 +759,7 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
}
if (ret_val)
e1000_put_hw_semaphore(hw);
e1000_put_hw_semaphore_82571(hw);
return ret_val;
}
@ -659,7 +775,7 @@ static void e1000_release_nvm_82571(struct e1000_hw *hw)
DEBUGFUNC("e1000_release_nvm_82571");
e1000_release_nvm_generic(hw);
e1000_put_hw_semaphore(hw);
e1000_put_hw_semaphore_82571(hw);
}
/**
@ -976,6 +1092,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_82573:
ret_val = e1000_get_hw_semaphore_82573(hw);
break;
case e1000_82574:
case e1000_82583:
ret_val = e1000_get_hw_semaphore_82574(hw);
@ -992,6 +1110,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Must release MDIO ownership and mutex after MAC reset. */
switch (hw->mac.type) {
case e1000_82573:
/* Release mutex only if the hw semaphore is acquired */
if (!ret_val)
e1000_put_hw_semaphore_82573(hw);
break;
case e1000_82574:
case e1000_82583:
/* Release mutex only if the hw semaphore is acquired */
@ -999,7 +1121,6 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
e1000_put_hw_semaphore_82574(hw);
break;
default:
panic("unknown mac type %x\n", hw->mac.type);
break;
}

View File

@ -79,9 +79,11 @@ static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
u32 offset, u16 data);
static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
u16 *speed, u16 *duplex);
static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
@ -509,8 +511,12 @@ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
/* link info */
mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
/* acquire SW_FW sync */
mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync;
mac->ops.release_swfw_sync = e1000_release_swfw_sync;
mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575;
mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575;
if (mac->type >= e1000_i210) {
mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210;
mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210;
}
/* set lan id for port to determine which phy lock to use */
hw->mac.ops.set_lan_id(hw);
@ -982,7 +988,7 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_82575");
ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
if (ret_val)
goto out;
@ -1013,7 +1019,7 @@ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
ret_val = e1000_acquire_nvm_generic(hw);
if (ret_val)
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
out:
return ret_val;
@ -1032,7 +1038,83 @@ static void e1000_release_nvm_82575(struct e1000_hw *hw)
e1000_release_nvm_generic(hw);
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
}
/**
* e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200;
DEBUGFUNC("e1000_acquire_swfw_sync_82575");
while (i < timeout) {
if (e1000_get_hw_semaphore_generic(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore_generic(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
out:
return ret_val;
}
/**
* e1000_release_swfw_sync_82575 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync_82575");
while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
}
/**

View File

@ -934,6 +934,7 @@ struct e1000_dev_spec_82543 {
struct e1000_dev_spec_82571 {
bool laa_is_present;
u32 smb_counter;
E1000_MUTEX swflag_mutex;
};
struct e1000_dev_spec_80003es2lan {
@ -957,6 +958,8 @@ enum e1000_ulp_state {
struct e1000_dev_spec_ich8lan {
bool kmrn_lock_loss_workaround_enabled;
struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
E1000_MUTEX nvm_mutex;
E1000_MUTEX swflag_mutex;
bool nvm_k1_enabled;
bool disable_k1_off;
bool eee_disable;

View File

@ -37,6 +37,7 @@
static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
static void e1000_release_nvm_i210(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
@ -57,7 +58,7 @@ static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_nvm_i210");
ret_val = e1000_acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
return ret_val;
}
@ -73,7 +74,152 @@ static void e1000_release_nvm_i210(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_release_nvm_i210");
e1000_release_swfw_sync(hw, E1000_SWFW_EEP_SM);
e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
/**
* e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
DEBUGFUNC("e1000_acquire_swfw_sync_i210");
while (i < timeout) {
if (e1000_get_hw_semaphore_i210(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore_generic(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
out:
return ret_val;
}
/**
* e1000_release_swfw_sync_i210 - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync_i210");
while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore_generic(hw);
}
/**
* e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
{
u32 swsm;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore_i210");
/* Get the SW semaphore */
while (i < timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == timeout) {
/* In rare circumstances, the SW semaphore may already be held
* unintentionally. Clear the semaphore once before giving up.
*/
if (hw->dev_spec._82575.clear_semaphore_once) {
hw->dev_spec._82575.clear_semaphore_once = FALSE;
e1000_put_hw_semaphore_generic(hw);
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
}
}
/* If we do not have the semaphore here, we have to give up. */
if (i == timeout) {
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
return -E1000_ERR_NVM;
}
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_generic(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**

View File

@ -43,6 +43,8 @@ s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 *data);
s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,

View File

@ -694,6 +694,9 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
dev_spec->shadow_ram[i].value = 0xFFFF;
}
E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
/* Function Pointers */
nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
nvm->ops.release = e1000_release_nvm_ich8lan;
@ -1844,7 +1847,7 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_acquire_nvm_ich8lan");
ASSERT_CTX_LOCK_HELD(hw);
E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
return E1000_SUCCESS;
}
@ -1859,7 +1862,9 @@ static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
{
DEBUGFUNC("e1000_release_nvm_ich8lan");
ASSERT_CTX_LOCK_HELD(hw);
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
return;
}
/**
@ -1876,7 +1881,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
DEBUGFUNC("e1000_acquire_swflag_ich8lan");
ASSERT_CTX_LOCK_HELD(hw);
E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
while (timeout) {
extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
@ -1917,6 +1922,9 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
}
out:
if (ret_val)
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
return ret_val;
}
@ -1941,6 +1949,10 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
} else {
DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
}
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
return;
}
/**
@ -5010,6 +5022,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
}
if (!ret_val)
E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
if (ctrl & E1000_CTRL_PHY_RST) {
ret_val = hw->phy.ops.get_cfg_done(hw);

View File

@ -1706,6 +1706,76 @@ s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSED
return E1000_SUCCESS;
}
/**
* e1000_get_hw_semaphore_generic - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
{
u32 swsm;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore_generic");
/* Get the SW semaphore */
while (i < timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
return -E1000_ERR_NVM;
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_generic(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**
* e1000_put_hw_semaphore_generic - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
{
u32 swsm;
DEBUGFUNC("e1000_put_hw_semaphore_generic");
swsm = E1000_READ_REG(hw, E1000_SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
/**
* e1000_get_auto_rd_done_generic - Check for auto read completion
* @hw: pointer to the HW structure
@ -2181,186 +2251,3 @@ s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
return E1000_SUCCESS;
}
/**
* e1000_get_hw_semaphore - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
s32 e1000_get_hw_semaphore(struct e1000_hw *hw)
{
u32 swsm;
s32 timeout = hw->nvm.word_size + 1;
s32 i = 0;
DEBUGFUNC("e1000_get_hw_semaphore");
#ifdef notyet
/* _82571 */
/* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
* we try for the semaphore to interwork with this
* older code.
*/
if (hw->dev_spec._82571.smb_counter > 2)
sw_timeout = 1;
#endif
/* Get the SW semaphore */
while (i < timeout) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == timeout) {
#ifdef notyet
/*
* XXX This sounds more like a driver bug whereby we either
* recursed accidentally or missed clearing it previously
*/
/* In rare circumstances, the SW semaphore may already be held
* unintentionally. Clear the semaphore once before giving up.
*/
if (hw->dev_spec._82575.clear_semaphore_once) {
hw->dev_spec._82575.clear_semaphore_once = FALSE;
e1000_put_hw_semaphore_generic(hw);
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
usec_delay(50);
}
}
#endif
DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
return -E1000_ERR_NVM;
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = E1000_READ_REG(hw, E1000_SWSM);
E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
e1000_put_hw_semaphore(hw);
DEBUGOUT("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return E1000_SUCCESS;
}
/**
* e1000_put_hw_semaphore - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
void e1000_put_hw_semaphore(struct e1000_hw *hw)
{
u32 swsm;
DEBUGFUNC("e1000_put_hw_semaphore");
swsm = E1000_READ_REG(hw, E1000_SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
E1000_WRITE_REG(hw, E1000_SWSM, swsm);
}
/**
* e1000_acquire_swfw_sync - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Acquire the SW/FW semaphore to access the PHY or NVM. The mask
* will also specify which port we're acquiring the lock for.
**/
s32
e1000_acquire_swfw_sync(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = E1000_SUCCESS;
s32 i = 0, timeout = 200;
DEBUGFUNC("e1000_acquire_swfw_sync");
ASSERT_NO_LOCKS();
while (i < timeout) {
if (e1000_get_hw_semaphore(hw)) {
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000_put_hw_semaphore(hw);
msec_delay_irq(5);
i++;
}
if (i == timeout) {
DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -E1000_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore(hw);
out:
return ret_val;
}
/**
* e1000_release_swfw_sync - Release SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
*
* Release the SW/FW semaphore used to access the PHY or NVM. The mask
* will also specify which port we're releasing the lock for.
**/
void
e1000_release_swfw_sync(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
DEBUGFUNC("e1000_release_swfw_sync");
while (e1000_get_hw_semaphore(hw) != E1000_SUCCESS)
; /* Empty */
swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
e1000_put_hw_semaphore(hw);
}

View File

@ -60,6 +60,7 @@ s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
void e1000_set_lan_id_single_port(struct e1000_hw *hw);
void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
u16 *duplex);
s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
@ -84,15 +85,11 @@ void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
void e1000_clear_vfta_generic(struct e1000_hw *hw);
void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
void e1000_reset_adaptive_generic(struct e1000_hw *hw);
void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
void e1000_update_adaptive_generic(struct e1000_hw *hw);
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
s32 e1000_get_hw_semaphore(struct e1000_hw *hw);
void e1000_put_hw_semaphore(struct e1000_hw *hw);
s32 e1000_acquire_swfw_sync(struct e1000_hw *hw, u16 mask);
void e1000_release_swfw_sync(struct e1000_hw *hw, u16 mask);
#endif

View File

@ -39,7 +39,6 @@
#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
@ -48,14 +47,6 @@
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/iflib.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
@ -67,40 +58,11 @@
#define ASSERT(x) if(!(x)) panic("EM: x")
#define us_scale(x) max(1, (x/(1000000/hz)))
static inline int
ms_scale(int x) {
if (hz == 1000) {
return (x);
} else if (hz > 1000) {
return (x*(hz/1000));
} else {
return (max(1, x/(1000/hz)));
}
}
static inline void
safe_pause_us(int x) {
if (cold) {
DELAY(x);
} else {
pause("e1000_delay", max(1, x/(1000000/hz)));
}
}
static inline void
safe_pause_ms(int x) {
if (cold) {
DELAY(x*1000);
} else {
pause("e1000_delay", ms_scale(x));
}
}
#define usec_delay(x) safe_pause_us(x)
#define usec_delay(x) DELAY(x)
#define usec_delay_irq(x) usec_delay(x)
#define msec_delay(x) safe_pause_ms(x)
#define msec_delay_irq(x) msec_delay(x)
#define msec_delay(x) DELAY(1000*(x))
#define msec_delay_irq(x) DELAY(1000*(x))
/* Enable/disable debugging statements in shared code */
#define DBG 0
@ -119,6 +81,16 @@ safe_pause_ms(int x) {
#define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */
#define PCI_COMMAND_REGISTER PCIR_COMMAND
/* Mutex used in the shared code */
#define E1000_MUTEX struct mtx
#define E1000_MUTEX_INIT(mutex) mtx_init((mutex), #mutex, \
MTX_NETWORK_LOCK, \
MTX_DEF | MTX_DUPOK)
#define E1000_MUTEX_DESTROY(mutex) mtx_destroy(mutex)
#define E1000_MUTEX_LOCK(mutex) mtx_lock(mutex)
#define E1000_MUTEX_TRYLOCK(mutex) mtx_trylock(mutex)
#define E1000_MUTEX_UNLOCK(mutex) mtx_unlock(mutex)
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
@ -144,12 +116,6 @@ typedef int8_t s8;
#endif
#endif /*__FreeBSD_version < 800000 */
#ifdef INVARIANTS
#define ASSERT_CTX_LOCK_HELD(hw) (sx_assert(iflib_ctx_lock_get(((struct e1000_osdep *)hw->back)->ctx), SX_XLOCKED))
#else
#define ASSERT_CTX_LOCK_HELD(hw)
#endif
#if defined(__i386__) || defined(__amd64__)
static __inline
void prefetch(void *x)
@ -169,7 +135,6 @@ struct e1000_osdep
bus_space_tag_t flash_bus_space_tag;
bus_space_handle_t flash_bus_space_handle;
device_t dev;
if_ctx_t ctx;
};
#define E1000_REGISTER(hw, reg) (((hw)->mac.type >= e1000_82543) \
@ -251,22 +216,5 @@ struct e1000_osdep
bus_space_write_2(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \
((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value)
#if defined(INVARIANTS)
#include <sys/proc.h>
#define ASSERT_NO_LOCKS() \
do { \
int unknown_locks = curthread->td_locks - mtx_owned(&Giant); \
if (unknown_locks > 0) { \
WITNESS_WARN(WARN_GIANTOK|WARN_SLEEPOK|WARN_PANIC, NULL, "unexpected non-sleepable lock"); \
} \
MPASS(curthread->td_rw_rlocks == 0); \
MPASS(curthread->td_lk_slocks == 0); \
} while (0)
#else
#define ASSERT_NO_LOCKS()
#endif
#endif /* _FREEBSD_OS_H_ */

View File

@ -66,7 +66,6 @@ static void em_receive_checksum(uint32_t status, if_rxd_info_t ri);
static int em_determine_rsstype(u32 pkt_info);
extern int em_intr(void *arg);
struct if_txrx em_txrx = {
em_isc_txd_encap,
em_isc_txd_flush,
@ -75,7 +74,7 @@ struct if_txrx em_txrx = {
em_isc_rxd_pkt_get,
em_isc_rxd_refill,
em_isc_rxd_flush,
em_intr,
em_intr
};
struct if_txrx lem_txrx = {
@ -86,7 +85,7 @@ struct if_txrx lem_txrx = {
lem_isc_rxd_pkt_get,
lem_isc_rxd_refill,
em_isc_rxd_flush,
em_intr,
em_intr
};
extern if_shared_ctx_t em_sctx;
@ -524,8 +523,8 @@ em_isc_rxd_refill(void *arg, if_rxd_update_t iru)
for (i = 0, next_pidx = pidx; i < count; i++) {
rxd = &rxr->rx_base[next_pidx];
rxd->read.buffer_addr = htole64(paddrs[i]);
/* Zero out rx desc status */
rxd->wb.upper.status_error &= htole32(~0xFF);
/* DD bits must be cleared */
rxd->wb.upper.status_error = 0;
if (++next_pidx == scctx->isc_nrxd[0])
next_pidx = 0;
@ -552,9 +551,14 @@ lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
struct e1000_rx_desc *rxd;
u32 staterr = 0;
int cnt, i;
budget = min(budget, scctx->isc_nrxd[0]);
for (cnt = 0, i = idx; cnt <= budget;) {
if (budget == 1) {
rxd = (struct e1000_rx_desc *)&rxr->rx_base[idx];
staterr = rxd->status;
return (staterr & E1000_RXD_STAT_DD);
}
for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
rxd = (struct e1000_rx_desc *)&rxr->rx_base[i];
staterr = rxd->status;
@ -567,7 +571,6 @@ lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
if (staterr & E1000_RXD_STAT_EOP)
cnt++;
}
MPASS(cnt <= scctx->isc_nrxd[0]);
return (cnt);
}
@ -581,9 +584,14 @@ em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
union e1000_rx_desc_extended *rxd;
u32 staterr = 0;
int cnt, i;
budget = min(budget, scctx->isc_nrxd[0]);
for (cnt = 0, i = idx; cnt <= budget;) {
if (budget == 1) {
rxd = &rxr->rx_base[idx];
staterr = le32toh(rxd->wb.upper.status_error);
return (staterr & E1000_RXD_STAT_DD);
}
for (cnt = 0, i = idx; cnt < scctx->isc_nrxd[0] && cnt <= budget;) {
rxd = &rxr->rx_base[i];
staterr = le32toh(rxd->wb.upper.status_error);
@ -598,7 +606,6 @@ em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
cnt++;
}
MPASS(cnt <= scctx->isc_nrxd[0]);
return (cnt);
}
@ -687,8 +694,7 @@ em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
pkt_info = le32toh(rxd->wb.lower.mrq);
/* Error Checking then decrement count */
KASSERT(staterr & E1000_RXD_STAT_DD,
("cidx=%d i=%d iri_len=%d", cidx, i, ri->iri_len));
MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
len = le16toh(rxd->wb.upper.length);
ri->iri_len += len;

View File

@ -483,7 +483,7 @@ static struct if_shared_ctx em_sctx_init = {
.isc_vendor_info = em_vendor_info_array,
.isc_driver_version = em_driver_version,
.isc_driver = &em_if_driver,
.isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP,
.isc_nrxd_min = {EM_MIN_RXD},
.isc_ntxd_min = {EM_MIN_TXD},
@ -511,7 +511,7 @@ static struct if_shared_ctx igb_sctx_init = {
.isc_vendor_info = igb_vendor_info_array,
.isc_driver_version = em_driver_version,
.isc_driver = &em_if_driver,
.isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP,
.isc_nrxd_min = {EM_MIN_RXD},
.isc_ntxd_min = {EM_MIN_TXD},
@ -723,7 +723,7 @@ em_if_attach_pre(if_ctx_t ctx)
return (ENXIO);
}
adapter->ctx = adapter->osdep.ctx = ctx;
adapter->ctx = ctx;
adapter->dev = adapter->osdep.dev = dev;
scctx = adapter->shared = iflib_get_softc_ctx(ctx);
adapter->media = iflib_get_media(ctx);
@ -1405,9 +1405,7 @@ em_msix_link(void *arg)
{
struct adapter *adapter = arg;
u32 reg_icr;
int is_igb;
is_igb = (adapter->hw.mac.type >= igb_mac_min);
++adapter->link_irq;
MPASS(adapter->hw.back != NULL);
reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
@ -1415,29 +1413,26 @@ em_msix_link(void *arg)
if (reg_icr & E1000_ICR_RXO)
adapter->rx_overruns++;
if (is_igb) {
if (reg_icr & E1000_ICR_LSC)
em_handle_link(adapter->ctx);
E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
em_handle_link(adapter->ctx);
} else {
if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
em_handle_link(adapter->ctx);
}
E1000_WRITE_REG(&adapter->hw, E1000_IMS,
EM_MSIX_LINK | E1000_IMS_LSC);
/*
* Because we must read the ICR for this interrupt
* it may clear other causes using autoclear, for
* this reason we simply create a soft interrupt
* for all these vectors.
*/
if (reg_icr) {
E1000_WRITE_REG(&adapter->hw,
E1000_ICS, adapter->ims);
}
EM_MSIX_LINK | E1000_IMS_LSC);
if (adapter->hw.mac.type >= igb_mac_min)
E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
}
/*
* Because we must read the ICR for this interrupt
* it may clear other causes using autoclear, for
* this reason we simply create a soft interrupt
* for all these vectors.
*/
if (reg_icr && adapter->hw.mac.type < igb_mac_min) {
E1000_WRITE_REG(&adapter->hw,
E1000_ICS, adapter->ims);
}
return (FILTER_HANDLED);
}
@ -1675,6 +1670,13 @@ em_if_timer(if_ctx_t ctx, uint16_t qid)
return;
iflib_admin_intr_deferred(ctx);
/* Reset LAA into RAR[0] on 82571 */
if ((adapter->hw.mac.type == e1000_82571) &&
e1000_get_laa_state_82571(&adapter->hw))
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
if (adapter->hw.mac.type < em_mac_min)
lem_smartspeed(adapter);
/* Mask to use in the irq trigger */
if (adapter->intr_type == IFLIB_INTR_MSIX) {
@ -1785,14 +1787,6 @@ em_if_update_admin_status(if_ctx_t ctx)
}
em_update_stats_counters(adapter);
/* Reset LAA into RAR[0] on 82571 */
if ((adapter->hw.mac.type == e1000_82571) &&
e1000_get_laa_state_82571(&adapter->hw))
e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
if (adapter->hw.mac.type < em_mac_min)
lem_smartspeed(adapter);
E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC);
}
@ -1908,87 +1902,6 @@ em_allocate_pci_resources(if_ctx_t ctx)
return (0);
}
static int
igb_intr_assign(if_ctx_t ctx, int msix)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rx_que = adapter->rx_queues;
struct em_tx_queue *tx_que = adapter->tx_queues;
int error, rid, i, vector = 0, rx_vectors;
char buf[16];
/* First set up ring resources */
for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX,
em_msix_que, rx_que, rx_que->me, buf);
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d\n", i, error);
adapter->rx_num_queues = i;
goto fail;
}
rx_que->msix = vector;
/*
* Set the bit to enable interrupt
* in E1000_IMS -- bits 20 and 21
* are for RX0 and RX1, note this has
* NOTHING to do with the MSIX vector
*/
if (adapter->hw.mac.type == e1000_82574) {
rx_que->eims = 1 << (20 + i);
adapter->ims |= rx_que->eims;
adapter->ivars |= (8 | rx_que->msix) << (i * 4);
} else if (adapter->hw.mac.type == e1000_82575)
rx_que->eims = E1000_EICR_TX_QUEUE0 << vector;
else
rx_que->eims = 1 << vector;
}
rx_vectors = vector;
vector = 0;
for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) {
snprintf(buf, sizeof(buf), "txq%d", i);
tx_que = &adapter->tx_queues[i];
tx_que->msix = adapter->rx_queues[i % adapter->rx_num_queues].msix;
rid = rman_get_start(adapter->rx_queues[i % adapter->rx_num_queues].que_irq.ii_res);
iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->me, buf);
if (adapter->hw.mac.type == e1000_82574) {
tx_que->eims = 1 << (22 + i);
adapter->ims |= tx_que->eims;
adapter->ivars |= (8 | tx_que->msix) << (8 + (i * 4));
} else if (adapter->hw.mac.type == e1000_82575) {
tx_que->eims = E1000_EICR_TX_QUEUE0 << (i % adapter->tx_num_queues);
} else {
tx_que->eims = 1 << (i % adapter->tx_num_queues);
}
}
/* Link interrupt */
rid = rx_vectors + 1;
error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq");
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
goto fail;
}
adapter->linkvec = rx_vectors;
if (adapter->hw.mac.type < igb_mac_min) {
adapter->ivars |= (8 | rx_vectors) << 16;
adapter->ivars |= 0x80000000;
}
return (0);
fail:
iflib_irq_free(ctx, &adapter->irq);
rx_que = adapter->rx_queues;
for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++)
iflib_irq_free(ctx, &rx_que->que_irq);
return (error);
}
/*********************************************************************
*
* Setup the MSIX Interrupt handlers
@ -2000,18 +1913,14 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rx_que = adapter->rx_queues;
struct em_tx_queue *tx_que = adapter->tx_queues;
int error, rid, i, vector = 0;
int error, rid, i, vector = 0, rx_vectors;
char buf[16];
if (adapter->hw.mac.type >= igb_mac_min) {
return igb_intr_assign(ctx, msix);
}
/* First set up ring resources */
for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RX, em_msix_que, rx_que, rx_que->me, buf);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, em_msix_que, rx_que, rx_que->me, buf);
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
adapter->rx_num_queues = i + 1;
@ -2035,19 +1944,16 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
else
rx_que->eims = 1 << vector;
}
rx_vectors = vector;
vector = 0;
for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "txq%d", i);
tx_que = &adapter->tx_queues[i];
iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->me, buf);
error = iflib_irq_alloc_generic(ctx, &tx_que->que_irq, rid, IFLIB_INTR_TX, em_msix_que, tx_que, tx_que->me, buf);
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
adapter->tx_num_queues = i + 1;
goto fail;
}
tx_que->msix = vector;
tx_que->msix = (vector % adapter->tx_num_queues);
/*
* Set the bit to enable interrupt
@ -2060,24 +1966,23 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
adapter->ims |= tx_que->eims;
adapter->ivars |= (8 | tx_que->msix) << (8 + (i * 4));
} else if (adapter->hw.mac.type == e1000_82575) {
tx_que->eims = E1000_EICR_TX_QUEUE0 << vector;
tx_que->eims = E1000_EICR_TX_QUEUE0 << (i % adapter->tx_num_queues);
} else {
tx_que->eims = 1 << vector;
tx_que->eims = 1 << (i % adapter->tx_num_queues);
}
}
/* Link interrupt */
rid = vector + 1;
rid = rx_vectors + 1;
error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, adapter, 0, "aq");
if (error) {
device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
goto fail;
}
adapter->linkvec = vector;
adapter->linkvec = rx_vectors;
if (adapter->hw.mac.type < igb_mac_min) {
adapter->ivars |= (8 | vector) << 16;
adapter->ivars |= (8 | rx_vectors) << 16;
adapter->ivars |= 0x80000000;
}
return (0);
@ -2234,24 +2139,15 @@ static void
em_free_pci_resources(if_ctx_t ctx)
{
struct adapter *adapter = iflib_get_softc(ctx);
struct em_rx_queue *rxque = adapter->rx_queues;
struct em_tx_queue *txque = adapter->tx_queues;
struct em_rx_queue *que = adapter->rx_queues;
device_t dev = iflib_get_dev(ctx);
int is_igb;
is_igb = (adapter->hw.mac.type >= igb_mac_min);
/* Release all msix queue resources */
if (adapter->intr_type == IFLIB_INTR_MSIX)
iflib_irq_free(ctx, &adapter->irq);
for (int i = 0; i < adapter->rx_num_queues; i++, rxque++) {
iflib_irq_free(ctx, &rxque->que_irq);
}
if (!is_igb) {
for (int i = 0; i < adapter->tx_num_queues; i++, txque++) {
iflib_irq_free(ctx, &txque->que_irq);
}
for (int i = 0; i < adapter->rx_num_queues; i++, que++) {
iflib_irq_free(ctx, &que->que_irq);
}
/* First release all the interrupt resources */

View File

@ -434,7 +434,6 @@ struct em_tx_queue {
u32 eims; /* This queue's EIMS bit */
u32 me;
struct tx_ring txr;
struct if_irq que_irq;
};
struct em_rx_queue {
@ -444,7 +443,7 @@ struct em_rx_queue {
u32 eims;
struct rx_ring rxr;
u64 irqs;
struct if_irq que_irq;
struct if_irq que_irq;
};
/* Our adapter structure */

View File

@ -48,26 +48,17 @@ __FBSDID("$FreeBSD$");
#include <sys/unistd.h>
#include <machine/stdarg.h>
static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
static MALLOC_DEFINE(M_GTASKQUEUE, "taskqueue", "Task Queues");
static void gtaskqueue_thread_enqueue(void *);
static void gtaskqueue_thread_loop(void *arg);
static int _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread, int pri);
TASKQGROUP_DEFINE(softirq, mp_ncpus, 1, false, PI_SOFT);
TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
struct gtaskqueue_busy {
struct gtask *tb_running;
TAILQ_ENTRY(gtaskqueue_busy) tb_link;
};
struct gt_intr_thread {
int git_flags; /* (j) IT_* flags. */
int git_need; /* Needs service. */
};
/* Interrupt thread flags kept in it_flags */
#define IT_DEAD 0x000001 /* Thread is waiting to exit. */
#define IT_WAIT 0x000002 /* Thread is waiting for completion. */
static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
struct gtaskqueue {
@ -78,7 +69,6 @@ struct gtaskqueue {
TAILQ_HEAD(, gtaskqueue_busy) tq_active;
struct mtx tq_mutex;
struct thread **tq_threads;
struct gt_intr_thread *tq_gt_intrs;
int tq_tcount;
int tq_spin;
int tq_flags;
@ -90,7 +80,6 @@ struct gtaskqueue {
#define TQ_FLAGS_ACTIVE (1 << 0)
#define TQ_FLAGS_BLOCKED (1 << 1)
#define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
#define TQ_FLAGS_INTR (1 << 3)
#define DT_CALLOUT_ARMED (1 << 0)
@ -191,32 +180,6 @@ gtaskqueue_free(struct gtaskqueue *queue)
free(queue, M_GTASKQUEUE);
}
static void
schedule_ithread(struct gtaskqueue *queue)
{
struct proc *p;
struct thread *td;
struct gt_intr_thread *git;
MPASS(queue->tq_tcount == 1);
td = queue->tq_threads[0];
git = &queue->tq_gt_intrs[0];
p = td->td_proc;
atomic_store_rel_int(&git->git_need, 1);
thread_lock(td);
if (TD_AWAITING_INTR(td)) {
CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid,
td->td_name);
TD_CLR_IWAIT(td);
sched_add(td, SRQ_INTR);
} else {
CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
__func__, p->p_pid, td->td_name, git->git_need, td->td_state);
}
thread_unlock(td);
}
int
grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
{
@ -234,13 +197,8 @@ grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
gtask->ta_flags |= TASK_ENQUEUED;
TQ_UNLOCK(queue);
if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0) {
if (queue->tq_flags & TQ_FLAGS_INTR) {
schedule_ithread(queue);
} else {
queue->tq_enqueue(queue->tq_context);
}
}
if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
queue->tq_enqueue(queue->tq_context);
return (0);
}
@ -445,7 +403,7 @@ gtaskqueue_drain_all(struct gtaskqueue *queue)
static int
_gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
cpuset_t *mask, bool intr, const char *name, va_list ap)
cpuset_t *mask, const char *name, va_list ap)
{
char ktname[MAXCOMLEN + 1];
struct thread *td;
@ -464,12 +422,6 @@ _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
printf("%s: no memory for %s threads\n", __func__, ktname);
return (ENOMEM);
}
tq->tq_gt_intrs = malloc(sizeof(struct gt_intr_thread) * count, M_GTASKQUEUE,
M_NOWAIT | M_ZERO);
if (tq->tq_gt_intrs == NULL) {
printf("%s: no memory for %s intr info\n", __func__, ktname);
return (ENOMEM);
}
for (i = 0; i < count; i++) {
if (count == 1)
@ -487,9 +439,6 @@ _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
} else
tq->tq_tcount++;
}
if (intr)
tq->tq_flags |= TQ_FLAGS_INTR;
for (i = 0; i < count; i++) {
if (tq->tq_threads[i] == NULL)
continue;
@ -509,14 +458,7 @@ _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
}
thread_lock(td);
sched_prio(td, pri);
if (intr) {
/* we need to schedule the thread from the interrupt handler for this to work */
TD_SET_IWAIT(td);
sched_class(td, PRI_ITHD);
td->td_pflags |= TDP_ITHREAD;
} else {
sched_add(td, SRQ_BORING);
}
sched_add(td, SRQ_BORING);
thread_unlock(td);
}
@ -525,13 +467,13 @@ _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
static int
gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
bool intr, const char *name, ...)
const char *name, ...)
{
va_list ap;
int error;
va_start(ap, name);
error = _gtaskqueue_start_threads(tqp, count, pri, NULL, intr, name, ap);
error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
va_end(ap);
return (error);
}
@ -549,58 +491,16 @@ gtaskqueue_run_callback(struct gtaskqueue *tq,
}
static void
intr_thread_loop(struct gtaskqueue *tq)
gtaskqueue_thread_loop(void *arg)
{
struct gt_intr_thread *git;
struct thread *td;
git = &tq->tq_gt_intrs[0];
td = tq->tq_threads[0];
MPASS(tq->tq_tcount == 1);
struct gtaskqueue **tqp, *tq;
tqp = arg;
tq = *tqp;
gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
TQ_LOCK(tq);
while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
THREAD_NO_SLEEPING();
while (atomic_cmpset_acq_int(&git->git_need, 1, 0) != 0) {
gtaskqueue_run_locked(tq);
}
THREAD_SLEEPING_OK();
/*
* Because taskqueue_run() can drop tq_mutex, we need to
* check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
* meantime, which means we missed a wakeup.
*/
if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
break;
TQ_UNLOCK(tq);
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
mtx_assert(&Giant, MA_NOTOWNED);
thread_lock(td);
if (atomic_load_acq_int(&git->git_need) == 0 &&
(git->git_flags & (IT_DEAD | IT_WAIT)) == 0) {
TD_SET_IWAIT(td);
mi_switch(SW_VOL | SWT_IWAIT, NULL);
}
#if 0
/* XXX is this something we want? */
if (git->git_flags & IT_WAIT) {
wake = 1;
git->git_flags &= ~IT_WAIT;
}
#endif
thread_unlock(td);
TQ_LOCK(tq);
}
THREAD_NO_SLEEPING();
gtaskqueue_run_locked(tq);
THREAD_SLEEPING_OK();
}
static void
timeshare_thread_loop(struct gtaskqueue *tq)
{
while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
/* XXX ? */
gtaskqueue_run_locked(tq);
/*
* Because taskqueue_run() can drop tq_mutex, we need to
@ -612,23 +512,6 @@ timeshare_thread_loop(struct gtaskqueue *tq)
TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
}
gtaskqueue_run_locked(tq);
}
static void
gtaskqueue_thread_loop(void *arg)
{
struct gtaskqueue **tqp, *tq;
tqp = arg;
tq = *tqp;
gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
TQ_LOCK(tq);
if (curthread->td_pflags & TDP_ITHREAD) {
intr_thread_loop(tq);
} else {
timeshare_thread_loop(tq);
}
/*
* This thread is on its way out, so just drop the lock temporarily
* in order to call the shutdown callback. This allows the callback
@ -675,17 +558,11 @@ struct taskqgroup_cpu {
struct taskqgroup {
struct taskqgroup_cpu tqg_queue[MAXCPU];
struct mtx tqg_lock;
void (*adjust_func)(void*);
char * tqg_name;
int tqg_adjusting;
int tqg_stride;
int tqg_cnt;
int tqg_pri;
int tqg_flags;
bool tqg_intr;
};
#define TQG_NEED_ADJUST 0x1
#define TQG_ADJUSTED 0x2
struct taskq_bind_task {
struct gtask bt_task;
@ -693,16 +570,16 @@ struct taskq_bind_task {
};
static void
taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu, bool intr, int pri)
taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
{
struct taskqgroup_cpu *qcpu;
qcpu = &qgroup->tqg_queue[idx];
LIST_INIT(&qcpu->tgc_tasks);
qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK | M_ZERO,
qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
taskqueue_thread_enqueue, &qcpu->tgc_taskq);
gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, pri,
intr, "%s_%d", qgroup->tqg_name, idx);
gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
"%s_%d", qgroup->tqg_name, idx);
qcpu->tgc_cpu = cpu;
}
@ -786,20 +663,12 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
void *uniq, int irq, char *name)
{
cpuset_t mask;
int qid, error;
int qid;
gtask->gt_uniq = uniq;
gtask->gt_name = name;
gtask->gt_irq = irq;
gtask->gt_cpu = -1;
mtx_lock(&qgroup->tqg_lock);
qgroup->tqg_flags |= TQG_NEED_ADJUST;
mtx_unlock(&qgroup->tqg_lock);
if (tqg_smp_started && !(qgroup->tqg_flags & TQG_ADJUSTED))
qgroup->adjust_func(NULL);
mtx_lock(&qgroup->tqg_lock);
qid = taskqgroup_find(qgroup, uniq);
qgroup->tqg_queue[qid].tgc_cnt++;
@ -810,9 +679,7 @@ taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
CPU_ZERO(&mask);
CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
mtx_unlock(&qgroup->tqg_lock);
error = intr_setaffinity(irq, CPU_WHICH_INTRHANDLER, &mask);
if (error)
printf("taskqgroup_attach: setaffinity failed: %d\n", error);
intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
} else
mtx_unlock(&qgroup->tqg_lock);
}
@ -821,7 +688,7 @@ static void
taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
{
cpuset_t mask;
int qid, cpu, error;
int qid, cpu;
mtx_lock(&qgroup->tqg_lock);
qid = taskqgroup_find(qgroup, gtask->gt_uniq);
@ -831,10 +698,9 @@ taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_INTRHANDLER, &mask);
intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask);
mtx_lock(&qgroup->tqg_lock);
if (error)
printf("taskqgroup_attach_deferred: setaffinity failed: %d\n", error);
}
qgroup->tqg_queue[qid].tgc_cnt++;
@ -845,79 +711,27 @@ taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
mtx_unlock(&qgroup->tqg_lock);
}
static int
taskqgroup_adjust_deferred(struct taskqgroup *qgroup, int cpu)
{
int i, error = 0, cpu_max = -1;
mtx_lock(&qgroup->tqg_lock);
for (i = 0; i < qgroup->tqg_cnt; i++)
if (qgroup->tqg_queue[i].tgc_cpu > cpu_max)
cpu_max = qgroup->tqg_queue[i].tgc_cpu;
if (cpu_max >= cpu) {
mtx_unlock(&qgroup->tqg_lock);
return (0);
}
MPASS(cpu <= mp_maxid);
error = _taskqgroup_adjust(qgroup, cpu + 1, qgroup->tqg_stride,
qgroup->tqg_intr, qgroup->tqg_pri);
if (error) {
printf("%s: _taskqgroup_adjust(%p, %d, %d, %d, %d) => %d\n\n",
__func__, qgroup, cpu + 1, qgroup->tqg_stride, qgroup->tqg_intr,
qgroup->tqg_pri, error);
goto out;
}
for (i = 0; i < qgroup->tqg_cnt; i++)
if (qgroup->tqg_queue[i].tgc_cpu > cpu_max)
cpu_max = qgroup->tqg_queue[i].tgc_cpu;
MPASS(cpu_max >= cpu);
out:
mtx_unlock(&qgroup->tqg_lock);
return (error);
}
int
taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
void *uniq, int cpu, int irq, char *name)
{
cpuset_t mask;
int i, error, qid;
int i, qid;
qid = -1;
gtask->gt_uniq = uniq;
gtask->gt_name = name;
gtask->gt_irq = irq;
gtask->gt_cpu = cpu;
MPASS(cpu >= 0);
mtx_lock(&qgroup->tqg_lock);
qgroup->tqg_flags |= TQG_NEED_ADJUST;
mtx_unlock(&qgroup->tqg_lock);
if (tqg_smp_started && !(qgroup->tqg_flags & TQG_ADJUSTED)) {
uintptr_t cpuid = cpu + 1;
qgroup->adjust_func((void *)cpuid);
}
if ((error = taskqgroup_adjust_deferred(qgroup, cpu)))
return (error);
mtx_lock(&qgroup->tqg_lock);
if (tqg_smp_started) {
for (i = 0; i < qgroup->tqg_cnt; i++) {
for (i = 0; i < qgroup->tqg_cnt; i++)
if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
qid = i;
break;
}
#ifdef INVARIANTS
else
printf("qgroup->tqg_queue[%d].tgc_cpu=0x%x tgc_cnt=0x%x\n",
i, qgroup->tqg_queue[i].tgc_cpu, qgroup->tqg_queue[i].tgc_cnt);
#endif
}
if (qid == -1) {
mtx_unlock(&qgroup->tqg_lock);
printf("%s: qid not found for cpu=%d\n", __func__, cpu);
return (EINVAL);
}
} else
@ -930,11 +744,8 @@ taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
if (irq != -1 && tqg_smp_started) {
error = intr_setaffinity(irq, CPU_WHICH_INTRHANDLER, &mask);
if (error)
printf("taskqgroup_attach_cpu: setaffinity failed: %d\n", error);
}
if (irq != -1 && tqg_smp_started)
intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
return (0);
}
@ -942,18 +753,13 @@ static int
taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
{
cpuset_t mask;
int i, qid, irq, cpu, error;
int i, qid, irq, cpu;
qid = -1;
irq = gtask->gt_irq;
cpu = gtask->gt_cpu;
MPASS(tqg_smp_started);
if ((error = taskqgroup_adjust_deferred(qgroup, cpu)))
return (error);
mtx_lock(&qgroup->tqg_lock);
/* adjust as needed */
MPASS(cpu <= mp_maxid);
for (i = 0; i < qgroup->tqg_cnt; i++)
if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
qid = i;
@ -961,7 +767,6 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtas
}
if (qid == -1) {
mtx_unlock(&qgroup->tqg_lock);
printf("%s: qid not found for cpu=%d\n", __func__, cpu);
return (EINVAL);
}
qgroup->tqg_queue[qid].tgc_cnt++;
@ -973,11 +778,8 @@ taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtas
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
if (irq != -1) {
error = intr_setaffinity(irq, CPU_WHICH_INTRHANDLER, &mask);
if (error)
printf("taskqgroup_attach_cpu: setaffinity failed: %d\n", error);
}
if (irq != -1)
intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
return (0);
}
@ -1016,25 +818,8 @@ taskqgroup_binder(void *ctx)
printf("taskqgroup_binder: setaffinity failed: %d\n",
error);
free(gtask, M_DEVBUF);
}
static void
taskqgroup_ithread_binder(void *ctx)
{
struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx;
cpuset_t mask;
int error;
CPU_ZERO(&mask);
CPU_SET(gtask->bt_cpuid, &mask);
error = cpuset_setthread(curthread->td_tid, &mask);
if (error)
printf("taskqgroup_binder: setaffinity failed: %d\n",
error);
free(gtask, M_DEVBUF);
}
static void
taskqgroup_bind(struct taskqgroup *qgroup)
{
@ -1050,10 +835,7 @@ taskqgroup_bind(struct taskqgroup *qgroup)
for (i = 0; i < qgroup->tqg_cnt; i++) {
gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK);
if (qgroup->tqg_intr)
GTASK_INIT(&gtask->bt_task, 0, 0, taskqgroup_ithread_binder, gtask);
else
GTASK_INIT(&gtask->bt_task, 0, 0, taskqgroup_binder, gtask);
GTASK_INIT(&gtask->bt_task, 0, 0, taskqgroup_binder, gtask);
gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
&gtask->bt_task);
@ -1061,7 +843,7 @@ taskqgroup_bind(struct taskqgroup *qgroup)
}
static int
_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread, int pri)
_taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
{
LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
struct grouptask *gtask;
@ -1076,22 +858,14 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread,
return (EINVAL);
}
if (qgroup->tqg_adjusting) {
printf("%s: failed: adjusting\n", __func__);
printf("taskqgroup_adjust failed: adjusting\n");
return (EBUSY);
}
/* No work to be done */
if (qgroup->tqg_cnt == cnt)
return (0);
qgroup->tqg_adjusting = 1;
old_cnt = qgroup->tqg_cnt;
old_cpu = 0;
if (old_cnt < cnt) {
int old_max_idx = max(0, old_cnt-1);
old_cpu = qgroup->tqg_queue[old_max_idx].tgc_cpu;
if (old_cnt > 0)
for (k = 0; k < stride; k++)
old_cpu = CPU_NEXT(old_cpu);
}
if (old_cnt < cnt)
old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu;
mtx_unlock(&qgroup->tqg_lock);
/*
* Set up queue for tasks added before boot.
@ -1107,7 +881,7 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread,
*/
cpu = old_cpu;
for (i = old_cnt; i < cnt; i++) {
taskqgroup_cpu_create(qgroup, i, cpu, ithread, pri);
taskqgroup_cpu_create(qgroup, i, cpu);
for (k = 0; k < stride; k++)
cpu = CPU_NEXT(cpu);
@ -1115,8 +889,6 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread,
mtx_lock(&qgroup->tqg_lock);
qgroup->tqg_cnt = cnt;
qgroup->tqg_stride = stride;
qgroup->tqg_intr = ithread;
qgroup->tqg_pri = pri;
/*
* Adjust drivers to use new taskqs.
@ -1162,34 +934,12 @@ _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread,
}
int
taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread, int pri)
taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
{
int error;
mtx_lock(&qgroup->tqg_lock);
error = _taskqgroup_adjust(qgroup, cnt, stride, ithread, pri);
mtx_unlock(&qgroup->tqg_lock);
return (error);
}
void
taskqgroup_set_adjust(struct taskqgroup *qgroup, void (*adjust_func)(void*))
{
qgroup-> adjust_func = adjust_func;
}
int
taskqgroup_adjust_once(struct taskqgroup *qgroup, int cnt, int stride, bool ithread, int pri)
{
int error = 0;
mtx_lock(&qgroup->tqg_lock);
if ((qgroup->tqg_flags & (TQG_ADJUSTED|TQG_NEED_ADJUST)) == TQG_NEED_ADJUST) {
qgroup->tqg_flags |= TQG_ADJUSTED;
error = _taskqgroup_adjust(qgroup, cnt, stride, ithread, pri);
MPASS(error == 0);
}
error = _taskqgroup_adjust(qgroup, cnt, stride);
mtx_unlock(&qgroup->tqg_lock);
return (error);
@ -1204,9 +954,7 @@ taskqgroup_create(char *name)
mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
qgroup->tqg_name = name;
LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
MPASS(qgroup->tqg_queue[0].tgc_cnt == 0);
MPASS(qgroup->tqg_queue[0].tgc_cpu == 0);
MPASS(qgroup->tqg_queue[0].tgc_taskq == 0);
return (qgroup);
}

File diff suppressed because it is too large Load Diff

View File

@ -119,7 +119,6 @@ typedef struct if_pkt_info {
qidx_t ipi_pidx; /* start pidx for encap */
qidx_t ipi_new_pidx; /* next available pidx post-encap */
/* offload handling */
caddr_t ipi_hdr_data; /* raw header */
uint8_t ipi_ehdrlen; /* ether header length */
uint8_t ipi_ip_hlen; /* ip header length */
uint8_t ipi_tcp_hlen; /* tcp header length */
@ -184,7 +183,6 @@ typedef struct if_txrx {
void (*ift_rxd_refill) (void * , if_rxd_update_t iru);
void (*ift_rxd_flush) (void *, uint16_t qsidx, uint8_t flidx, qidx_t pidx);
int (*ift_legacy_intr) (void *);
int (*ift_txd_errata) (void *, struct mbuf **mp);
} *if_txrx_t;
typedef struct if_softc_ctx {
@ -296,9 +294,9 @@ typedef enum {
*/
#define IFLIB_HAS_TXCQ 0x08
/*
*
* Interface does checksum in place
*/
#define IFLIB_UNUSED___0 0x10
#define IFLIB_NEED_SCRATCH 0x10
/*
* Interface doesn't expect in_pseudo for th_sum
*/
@ -307,10 +305,6 @@ typedef enum {
* Interface doesn't align IP header
*/
#define IFLIB_DO_RX_FIXUP 0x40
/*
* Driver needs csum zeroed for offloading
*/
#define IFLIB_NEED_ZERO_CSUM 0x80
@ -387,7 +381,7 @@ int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, i
void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count);
struct sx *iflib_ctx_lock_get(if_ctx_t);
struct mtx *iflib_ctx_lock_get(if_ctx_t);
struct mtx *iflib_qset_lock_get(if_ctx_t, uint16_t);
void iflib_led_create(if_ctx_t ctx);

View File

@ -226,15 +226,11 @@ drain_ring_lockless(struct ifmp_ring *r, union ring_state os, uint16_t prev, int
if (cidx != pidx && pending < 64 && total < budget)
continue;
critical_enter();
os.state = ns.state = r->state;
ns.cidx = cidx;
ns.flags = state_to_flags(ns, total >= budget);
while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0) {
cpu_spinwait();
do {
os.state = ns.state = r->state;
ns.cidx = cidx;
ns.flags = state_to_flags(ns, total >= budget);
}
} while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0);
critical_exit();
if (ns.flags == ABDICATED)
@ -458,12 +454,18 @@ ifmp_ring_enqueue(struct ifmp_ring *r, void **items, int n, int budget)
do {
os.state = ns.state = r->state;
ns.pidx_tail = pidx_stop;
if (os.flags == IDLE)
ns.flags = ABDICATED;
ns.flags = BUSY;
} while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
critical_exit();
counter_u64_add(r->enqueues, n);
/*
* Turn into a consumer if some other thread isn't active as a consumer
* already.
*/
if (os.flags != BUSY)
drain_ring_lockless(r, ns, os.flags, budget);
return (0);
}
#endif
@ -474,9 +476,7 @@ ifmp_ring_check_drainage(struct ifmp_ring *r, int budget)
union ring_state os, ns;
os.state = r->state;
if ((os.flags != STALLED && os.flags != ABDICATED) || // Only continue in STALLED and ABDICATED
os.pidx_head != os.pidx_tail || // Require work to be available
(os.flags != ABDICATED && r->can_drain(r) == 0)) // Can either drain, or everyone left
if (os.flags != STALLED || os.pidx_head != os.pidx_tail || r->can_drain(r) == 0)
return;
MPASS(os.cidx != os.pidx_tail); /* implied by STALLED */

View File

@ -58,9 +58,7 @@ int taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *grptask,
void taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask);
struct taskqgroup *taskqgroup_create(char *name);
void taskqgroup_destroy(struct taskqgroup *qgroup);
int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride, bool ithread, int pri);
int taskqgroup_adjust_once(struct taskqgroup *qgroup, int cnt, int stride, bool ithread, int pri);
void taskqgroup_set_adjust(struct taskqgroup *qgroup, void (*adjust_func)(void*));
int taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride);
#define TASK_ENQUEUED 0x1
#define TASK_SKIP_WAKEUP 0x2
@ -82,40 +80,27 @@ void taskqgroup_set_adjust(struct taskqgroup *qgroup, void (*adjust_func)(void*)
#define TASKQGROUP_DECLARE(name) \
extern struct taskqgroup *qgroup_##name
#define TASKQGROUP_DEFINE(name, cnt, stride, intr, pri) \
#define TASKQGROUP_DEFINE(name, cnt, stride) \
\
struct taskqgroup *qgroup_##name; \
\
static void \
taskqgroup_adjust_##name(void *arg) \
{ \
int max = (intr) ? 1 : (cnt); \
if (arg != NULL) { \
uintptr_t maxcpu = (uintptr_t) arg; \
max = maxcpu; \
} \
\
taskqgroup_adjust_once(qgroup_##name, max, (stride), (intr), (pri)); \
} \
\
SYSINIT(taskqgroup_adj_##name, SI_SUB_SMP, SI_ORDER_ANY, \
taskqgroup_adjust_##name, NULL); \
\
static void \
taskqgroup_define_##name(void *arg) \
{ \
qgroup_##name = taskqgroup_create(#name); \
taskqgroup_set_adjust(qgroup_##name, taskqgroup_adjust_##name); \
} \
\
SYSINIT(taskqgroup_##name, SI_SUB_TASKQ, SI_ORDER_FIRST, \
taskqgroup_define_##name, NULL)
taskqgroup_define_##name, NULL); \
\
static void \
taskqgroup_adjust_##name(void *arg) \
{ \
taskqgroup_adjust(qgroup_##name, (cnt), (stride)); \
} \
\
SYSINIT(taskqgroup_adj_##name, SI_SUB_SMP, SI_ORDER_ANY, \
taskqgroup_adjust_##name, NULL)
TASKQGROUP_DECLARE(net);
TASKQGROUP_DECLARE(softirq);