71625ec9ad
Remove /^/[*/]\s*\$FreeBSD\$.*\n/
221 lines
6.8 KiB
C
221 lines
6.8 KiB
C
/******************************************************************************
|
|
SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
Copyright (c) 2001-2020, Intel Corporation
|
|
All rights reserved.
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
modification, are permitted provided that the following conditions are met:
|
|
|
|
1. Redistributions of source code must retain the above copyright notice,
|
|
this list of conditions and the following disclaimer.
|
|
|
|
2. Redistributions in binary form must reproduce the above copyright
|
|
notice, this list of conditions and the following disclaimer in the
|
|
documentation and/or other materials provided with the distribution.
|
|
|
|
3. Neither the name of the Intel Corporation nor the names of its
|
|
contributors may be used to endorse or promote products derived from
|
|
this software without specific prior written permission.
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
|
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
******************************************************************************/
|
|
|
|
#include "e1000_hw.h"
|
|
#include "e1000_82575.h"
|
|
#include "e1000_mac.h"
|
|
#include "e1000_base.h"
|
|
#include "e1000_manage.h"
|
|
|
|
/**
|
|
* e1000_acquire_phy_base - Acquire rights to access PHY
|
|
* @hw: pointer to the HW structure
|
|
*
|
|
* Acquire access rights to the correct PHY.
|
|
**/
|
|
s32 e1000_acquire_phy_base(struct e1000_hw *hw)
|
|
{
|
|
u16 mask = E1000_SWFW_PHY0_SM;
|
|
|
|
DEBUGFUNC("e1000_acquire_phy_base");
|
|
|
|
if (hw->bus.func == E1000_FUNC_1)
|
|
mask = E1000_SWFW_PHY1_SM;
|
|
else if (hw->bus.func == E1000_FUNC_2)
|
|
mask = E1000_SWFW_PHY2_SM;
|
|
else if (hw->bus.func == E1000_FUNC_3)
|
|
mask = E1000_SWFW_PHY3_SM;
|
|
|
|
return hw->mac.ops.acquire_swfw_sync(hw, mask);
|
|
}
|
|
|
|
/**
|
|
* e1000_release_phy_base - Release rights to access PHY
|
|
* @hw: pointer to the HW structure
|
|
*
|
|
* A wrapper to release access rights to the correct PHY.
|
|
**/
|
|
void e1000_release_phy_base(struct e1000_hw *hw)
|
|
{
|
|
u16 mask = E1000_SWFW_PHY0_SM;
|
|
|
|
DEBUGFUNC("e1000_release_phy_base");
|
|
|
|
if (hw->bus.func == E1000_FUNC_1)
|
|
mask = E1000_SWFW_PHY1_SM;
|
|
else if (hw->bus.func == E1000_FUNC_2)
|
|
mask = E1000_SWFW_PHY2_SM;
|
|
else if (hw->bus.func == E1000_FUNC_3)
|
|
mask = E1000_SWFW_PHY3_SM;
|
|
|
|
hw->mac.ops.release_swfw_sync(hw, mask);
|
|
}
|
|
|
|
/**
|
|
* e1000_init_hw_base - Initialize hardware
|
|
* @hw: pointer to the HW structure
|
|
*
|
|
* This inits the hardware readying it for operation.
|
|
**/
|
|
s32 e1000_init_hw_base(struct e1000_hw *hw)
|
|
{
|
|
struct e1000_mac_info *mac = &hw->mac;
|
|
s32 ret_val;
|
|
u16 i, rar_count = mac->rar_entry_count;
|
|
|
|
DEBUGFUNC("e1000_init_hw_base");
|
|
|
|
/* Setup the receive address */
|
|
e1000_init_rx_addrs_generic(hw, rar_count);
|
|
|
|
/* Zero out the Multicast HASH table */
|
|
DEBUGOUT("Zeroing the MTA\n");
|
|
for (i = 0; i < mac->mta_reg_count; i++)
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
|
|
|
|
/* Zero out the Unicast HASH table */
|
|
DEBUGOUT("Zeroing the UTA\n");
|
|
for (i = 0; i < mac->uta_reg_count; i++)
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
|
|
|
|
/* Setup link and flow control */
|
|
ret_val = mac->ops.setup_link(hw);
|
|
|
|
/* Clear all of the statistics registers (clear on read). It is
|
|
* important that we do this after we have tried to establish link
|
|
* because the symbol error count will increment wildly if there
|
|
* is no link.
|
|
*/
|
|
e1000_clear_hw_cntrs_base_generic(hw);
|
|
|
|
return ret_val;
|
|
}
|
|
|
|
/**
|
|
* e1000_power_down_phy_copper_base - Remove link during PHY power down
|
|
* @hw: pointer to the HW structure
|
|
*
|
|
* In the case of a PHY power down to save power, or to turn off link during a
|
|
* driver unload, or wake on lan is not enabled, remove the link.
|
|
**/
|
|
void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
|
|
{
|
|
struct e1000_phy_info *phy = &hw->phy;
|
|
|
|
if (!(phy->ops.check_reset_block))
|
|
return;
|
|
|
|
/* If the management interface is not enabled, then power down */
|
|
if (phy->ops.check_reset_block(hw))
|
|
e1000_power_down_phy_copper(hw);
|
|
}
|
|
|
|
/**
|
|
* e1000_rx_fifo_flush_base - Clean Rx FIFO after Rx enable
|
|
* @hw: pointer to the HW structure
|
|
*
|
|
* After Rx enable, if manageability is enabled then there is likely some
|
|
* bad data at the start of the FIFO and possibly in the DMA FIFO. This
|
|
* function clears the FIFOs and flushes any packets that came in as Rx was
|
|
* being enabled.
|
|
**/
|
|
void e1000_rx_fifo_flush_base(struct e1000_hw *hw)
|
|
{
|
|
u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
|
|
int i, ms_wait;
|
|
|
|
DEBUGFUNC("e1000_rx_fifo_flush_base");
|
|
|
|
/* disable IPv6 options as per hardware errata */
|
|
rfctl = E1000_READ_REG(hw, E1000_RFCTL);
|
|
rfctl |= E1000_RFCTL_IPV6_EX_DIS;
|
|
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
|
|
|
|
if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
|
|
return;
|
|
|
|
/* Disable all Rx queues */
|
|
for (i = 0; i < 4; i++) {
|
|
rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
|
|
E1000_WRITE_REG(hw, E1000_RXDCTL(i),
|
|
rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
|
|
}
|
|
/* Poll all queues to verify they have shut down */
|
|
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
|
|
msec_delay(1);
|
|
rx_enabled = 0;
|
|
for (i = 0; i < 4; i++)
|
|
rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
|
|
if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
|
|
break;
|
|
}
|
|
|
|
if (ms_wait == 10)
|
|
DEBUGOUT("Queue disable timed out after 10ms\n");
|
|
|
|
/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
|
|
* incoming packets are rejected. Set enable and wait 2ms so that
|
|
* any packet that was coming in as RCTL.EN was set is flushed
|
|
*/
|
|
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
|
|
|
|
rlpml = E1000_READ_REG(hw, E1000_RLPML);
|
|
E1000_WRITE_REG(hw, E1000_RLPML, 0);
|
|
|
|
rctl = E1000_READ_REG(hw, E1000_RCTL);
|
|
temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
|
|
temp_rctl |= E1000_RCTL_LPE;
|
|
|
|
E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
|
|
E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
|
|
E1000_WRITE_FLUSH(hw);
|
|
msec_delay(2);
|
|
|
|
/* Enable Rx queues that were previously enabled and restore our
|
|
* previous state
|
|
*/
|
|
for (i = 0; i < 4; i++)
|
|
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
|
|
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
|
|
E1000_WRITE_FLUSH(hw);
|
|
|
|
E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
|
|
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
|
|
|
|
/* Flush receive errors generated by workaround */
|
|
E1000_READ_REG(hw, E1000_ROC);
|
|
E1000_READ_REG(hw, E1000_RNBC);
|
|
E1000_READ_REG(hw, E1000_MPC);
|
|
}
|