net/qede/base: use passed ptt handler

Use the ptt[PF translation table] handler that is passed rather than using
main ptt from the HW function.
In ecore_hw_get_resc()'s error flow, release the MFW generic resource lock
only if needed.
Change the verbosity level of GRC timeout from DP_INFO() to DP_NOTICE().
Reduce verbosity of print in ecore_hw_bar_size().

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
Rasesh Mody 2017-09-18 18:29:56 -07:00 committed by Ferruh Yigit
parent 3c6a3cf607
commit 739a5b2f2b
19 changed files with 186 additions and 119 deletions

View File

@ -23,13 +23,14 @@
/* Forward declaration */
struct ecore_dev;
struct ecore_hwfn;
struct ecore_ptt;
struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
enum ecore_mcp_protocol_type;
union ecore_mcp_protocol_stats;
enum ecore_hw_err_type;
void qed_link_update(struct ecore_hwfn *hwfn);
void qed_link_update(struct ecore_hwfn *hwfn, struct ecore_ptt *ptt);
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#undef __BIG_ENDIAN
@ -327,7 +328,7 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
#define OSAL_LINK_UPDATE(hwfn, ptt) qed_link_update(hwfn, ptt)
#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
/* SR-IOV channel */

View File

@ -1422,7 +1422,7 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
}
}
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct ecore_qm_iids iids;
@ -1430,7 +1430,7 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
p_hwfn->first_on_engine,
iids.cids, iids.vf_cids, iids.tids,
@ -1785,9 +1785,9 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
ecore_cdu_init_common(p_hwfn);
}
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
ecore_qm_init_pf(p_hwfn);
ecore_qm_init_pf(p_hwfn, p_ptt);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);

View File

@ -98,15 +98,17 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
* @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly

View File

@ -56,7 +56,9 @@ enum BAR_ID {
BAR_ID_1 /* Used for doorbells */
};
static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
@ -70,7 +72,7 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
return 1 << 17;
}
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
val = ecore_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@ -79,14 +81,12 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
* In older MFW versions they are set to 0 which means disabled.
*/
if (p_hwfn->p_dev->num_hwfns > 1) {
DP_NOTICE(p_hwfn, false,
"BAR size not configured. Assuming BAR size of 256kB"
" for GRC and 512kB for DB\n");
DP_INFO(p_hwfn,
"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
} else {
DP_NOTICE(p_hwfn, false,
"BAR size not configured. Assuming BAR size of 512kB"
" for GRC and 512kB for DB\n");
DP_INFO(p_hwfn,
"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
val = 512 * 1024;
}
@ -777,7 +777,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
ecore_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
ecore_qm_init_pf(p_hwfn);
ecore_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@ -1036,7 +1036,7 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_l2_setup(p_hwfn);
ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_iov_setup(p_hwfn);
}
}
@ -1327,11 +1327,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_init_cau_rt_data(p_dev);
/* Program GTT windows */
ecore_gtt_init(p_hwfn);
ecore_gtt_init(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev)) {
rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
rc = ecore_hw_init_chip(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
@ -1637,7 +1637,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
if (p_hwfn->p_dev->num_hwfns > 1)
db_bar_size /= 2;
@ -1808,7 +1808,7 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* Update rate limit once we'll actually have a link */
p_hwfn->qm_info.pf_rl = 100000;
}
ecore_cxt_hw_init_pf(p_hwfn);
ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
ecore_int_igu_init_rt(p_hwfn);
@ -1877,7 +1877,8 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
/* send function start command */
rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
p_hwfn->p_dev->mf_mode,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, true,
@ -2394,18 +2395,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
return rc2;
}
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
{
int j;
for_each_hwfn(p_dev, j) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
struct ecore_ptt *p_ptt;
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
continue;
}
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_AGAIN;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
"Shutting down the fastpath\n");
@ -2427,15 +2431,22 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
ecore_ptt_release(p_hwfn, p_ptt);
}
return ECORE_SUCCESS;
}
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
struct ecore_ptt *p_ptt;
if (IS_VF(p_hwfn->p_dev))
return;
return ECORE_SUCCESS;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_AGAIN;
/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
@ -2448,8 +2459,11 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
}
/* Re-open incoming traffic */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
ecore_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
ecore_ptt_release(p_hwfn, p_ptt);
return ECORE_SUCCESS;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@ -2589,12 +2603,14 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
static enum _ecore_status_t
__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
enum ecore_resources res_id, u32 resc_max_val,
struct ecore_ptt *p_ptt,
enum ecore_resources res_id,
u32 resc_max_val,
u32 *p_mcp_resp)
{
enum _ecore_status_t rc;
rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
@ -2612,7 +2628,8 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
u32 resc_max_val, mcp_resp;
@ -2632,7 +2649,7 @@ ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
continue;
}
rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
resc_max_val, &mcp_resp);
if (rc != ECORE_SUCCESS)
return rc;
@ -2821,6 +2838,7 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool drv_resc_alloc)
{
struct ecore_resc_unlock_params resc_unlock_params;
@ -2858,7 +2876,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
return rc;
} else if (rc == ECORE_NOTIMPL) {
@ -2870,7 +2888,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
rc = ECORE_BUSY;
goto unlock_and_exit;
} else {
rc = ecore_hw_set_soft_resc_size(p_hwfn);
rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
DP_NOTICE(p_hwfn, false,
"Failed to set the max values of the soft resources\n");
@ -2878,7 +2896,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
} else if (rc == ECORE_NOTIMPL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
@ -2891,7 +2909,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
goto unlock_and_exit;
if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
@ -2938,7 +2956,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
}
/* This will also learn the number of SBs from MFW */
if (ecore_int_igu_reset_cam(p_hwfn, p_hwfn->p_main_ptt))
if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
return ECORE_INVAL;
ecore_hw_set_feat(p_hwfn);
@ -2954,7 +2972,9 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
unlock_and_exit:
ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
return rc;
}
@ -3486,7 +3506,7 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
rc = ECORE_SUCCESS;
p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
@ -3495,9 +3515,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_dev *p_dev = p_hwfn->p_dev;
u16 device_id_mask;
u32 tmp;
@ -3522,16 +3543,15 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
return ECORE_ABORTED;
}
p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_REV);
p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_NUM);
p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, p_dev->chip_rev);
/* Learn number of HW-functions */
tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR);
tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
@ -3551,10 +3571,10 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
}
#endif
p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
p_dev->chip_bond_id = ecore_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_ptt,
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
DP_INFO(p_dev->hwfns,
@ -3571,12 +3591,10 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
if (CHIP_REV_IS_EMUL(p_dev)) {
tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_ECO_RESERVED);
tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
if (tmp & (1 << 29)) {
DP_NOTICE(p_hwfn, false,
"Emulation: Running on a FULL build\n");
@ -3656,7 +3674,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = ecore_get_dev_info(p_dev);
rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res =
@ -3785,11 +3803,15 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
/* adjust bar offset for second engine */
addr = (u8 OSAL_IOMEM *)p_dev->regview +
ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
ecore_hw_bar_size(p_hwfn,
p_hwfn->p_main_ptt,
BAR_ID_0) / 2;
p_regview = (void OSAL_IOMEM *)addr;
addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
ecore_hw_bar_size(p_hwfn,
p_hwfn->p_main_ptt,
BAR_ID_1) / 2;
p_doorbell = (void OSAL_IOMEM *)addr;
/* prepare second hw function */

View File

@ -142,8 +142,9 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
#ifndef LINUX_REMOVE
/**
@ -160,10 +161,11 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev);
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
* @param p_dev
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum ecore_hw_prepare_result {
ECORE_HW_PREPARE_SUCCESS,

View File

@ -71,8 +71,10 @@ enum _dmae_cmd_crc_mask {
* @brief ecore_gtt_init - Initialize GTT windows
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured

View File

@ -525,7 +525,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
return rc;
}
void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 gtt_base;
u32 i;
@ -543,7 +544,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* initialize PTT/GTT (poll for completion) */
if (!initialized) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_START_INIT_PTT_GTT, 1);
initialized = true;
}
@ -552,7 +553,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* ptt might be overrided by HW until this is done */
OSAL_UDELAY(10);
ecore_ptt_invalidate(p_hwfn);
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INIT_DONE_PTT_GTT);
} while ((val != 1) && --poll_cnt);

View File

@ -107,5 +107,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#endif /* __ECORE_INIT_OPS__ */

View File

@ -248,21 +248,21 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
DP_INFO(p_hwfn->p_dev,
"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
" [PF: %02x %s %02x]\n",
tmp2, tmp,
(tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
(tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
grc_timeout_attn_master_to_str((tmp &
ECORE_GRC_ATTENTION_MASTER_MASK) >>
ECORE_GRC_ATTENTION_MASTER_SHIFT),
(tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
(((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
DP_NOTICE(p_hwfn->p_dev, false,
"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
tmp2, tmp,
(tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
: "Read from",
(tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
grc_timeout_attn_master_to_str(
(tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
ECORE_GRC_ATTENTION_MASTER_SHIFT),
(tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
(((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
out:
/* Regardles of anything else, clean the validity bit */

View File

@ -1196,7 +1196,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
OSAL_LINK_UPDATE(p_hwfn);
OSAL_LINK_UPDATE(p_hwfn, p_ptt);
}
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
@ -1832,14 +1832,13 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
struct ecore_ptt *p_ptt;
/* TODO - Add support for VFs */
if (IS_VF(p_dev))
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
@ -1847,16 +1846,15 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
return ECORE_BUSY;
}
*p_media_type = MEDIA_UNSPECIFIED;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
*p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
OFFSETOF(struct public_port, media_type));
ecore_ptt_release(p_hwfn, p_ptt);
if (!p_ptt) {
*p_media_type = MEDIA_UNSPECIFIED;
return ECORE_INVAL;
} else {
*p_media_type = ecore_rd(p_hwfn, p_ptt,
p_hwfn->mcp_info->port_addr +
OFFSETOF(struct public_port,
media_type));
}
return ECORE_SUCCESS;
}

View File

@ -608,14 +608,16 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
* @brief Get media type value of the port.
*
* @param p_dev - ecore dev pointer
* @param p_ptt
* @param mfw_ver - media type value
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
u32 *media_type);
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *media_type);
/**
* @brief - Sends a command to the MCP mailbox.

View File

@ -49,6 +49,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
* for a physical function (PF).
*
* @param p_hwfn
* @param p_ptt
* @param p_tunn - pf update tunneling parameters
* @param comp_mode - completion mode
* @param p_comp_data - callback function
@ -58,6 +59,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);

View File

@ -232,6 +232,7 @@ static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
}
static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn)
{
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
@ -241,14 +242,14 @@ static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
}
if (p_tunn->vxlan_port.b_update_port)
ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
ecore_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
@ -294,6 +295,7 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
@ -390,7 +392,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (p_tunn)
ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
&p_hwfn->p_dev->tunnel);
return rc;
}
@ -465,6 +468,7 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
@ -505,7 +509,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
return rc;
}

View File

@ -59,6 +59,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
* @param p_ptt
* @param p_tunn - pf start tunneling configuration
* @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
@ -68,6 +69,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);

View File

@ -87,6 +87,7 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct ecore_spq_comp_done *comp_done;
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@ -103,8 +104,13 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
if (rc == ECORE_SUCCESS)
return ECORE_SUCCESS;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_AGAIN;
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
rc = ecore_mcp_drain(p_hwfn, p_ptt);
ecore_ptt_release(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
goto err;

View File

@ -601,7 +601,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
return ecore_iov_allocate_vfdb(p_hwfn);
}
void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
@ -2387,7 +2387,7 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
if (b_update_required) {
u16 geneve_port;
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
if (rc != ECORE_SUCCESS)

View File

@ -238,10 +238,8 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
* @brief ecore_iov_setup - setup sriov related resources
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
void ecore_iov_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_iov_free - free sriov related resources

View File

@ -2167,11 +2167,15 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
QEDE_VXLAN_DEF_PORT;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
struct ecore_ptt *p_ptt = IS_PF(edev) ?
ecore_ptt_acquire(p_hwfn) : NULL;
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Unable to config UDP port %u\n",
tunn.vxlan_port.port);
if (IS_PF(edev))
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
}
@ -2318,11 +2322,15 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
qede_set_cmn_tunn_param(&tunn, clss, true, true);
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
struct ecore_ptt *p_ptt = IS_PF(edev) ?
ecore_ptt_acquire(p_hwfn) : NULL;
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
&tunn, ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev, "Failed to update tunn_clss %u\n",
tunn.vxlan.tun_cls);
if (IS_PF(edev))
ecore_ptt_release(p_hwfn, p_ptt);
}
}
qdev->num_tunn_filters++; /* Filter added successfully */
@ -2352,12 +2360,17 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
qede_set_cmn_tunn_param(&tunn, clss, false, true);
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
ECORE_SPQ_MODE_CB, NULL);
struct ecore_ptt *p_ptt = IS_PF(edev) ?
ecore_ptt_acquire(p_hwfn) : NULL;
rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
&tunn, ECORE_SPQ_MODE_CB, NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev,
"Failed to update tunn_clss %u\n",
tunn.vxlan.tun_cls);
if (IS_PF(edev))
ecore_ptt_release(p_hwfn,
p_ptt);
break;
}
}

View File

@ -178,7 +178,7 @@ static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
qed_link_update(hwfn, NULL);
}
static void qede_vf_task(void *arg)
@ -489,6 +489,7 @@ qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
}
static void qed_fill_link(struct ecore_hwfn *hwfn,
__rte_unused struct ecore_ptt *ptt,
struct qed_link_output *if_link)
{
struct ecore_mcp_link_params params;
@ -559,12 +560,22 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,
static void
qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link)
{
qed_fill_link(&edev->hwfns[0], if_link);
struct ecore_hwfn *hwfn;
struct ecore_ptt *ptt;
#ifdef CONFIG_QED_SRIOV
for_each_hwfn(cdev, i)
qed_inform_vf_link_state(&cdev->hwfns[i]);
#endif
hwfn = &edev->hwfns[0];
if (IS_PF(edev)) {
ptt = ecore_ptt_acquire(hwfn);
if (!ptt)
DP_NOTICE(hwfn, true, "Failed to fill link; No PTT\n");
qed_fill_link(hwfn, ptt, if_link);
if (ptt)
ecore_ptt_release(hwfn, ptt);
} else {
qed_fill_link(hwfn, NULL, if_link);
}
}
static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
@ -614,11 +625,11 @@ static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
return rc;
}
void qed_link_update(struct ecore_hwfn *hwfn)
void qed_link_update(struct ecore_hwfn *hwfn, struct ecore_ptt *ptt)
{
struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link);
qed_fill_link(hwfn, ptt, &if_link);
}
static int qed_drain(struct ecore_dev *edev)