net/qede/base: refine error handling

Adjust the verbosity of the log messages and add preventive checks for
errors.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
Rasesh Mody 2018-04-08 21:48:05 -07:00 committed by Ferruh Yigit
parent 2d52085e4d
commit 98abf84ee0
10 changed files with 173 additions and 112 deletions

View File

@ -834,7 +834,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->t2_num_pages *
sizeof(struct ecore_dma_mem));
if (!p_mngr->t2) {
DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
rc = ECORE_NOMEM;
goto t2_fail;
}
@ -919,6 +919,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 ilt_size, i;
if (p_mngr->ilt_shadow == OSAL_NULL)
return;
ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
@ -931,6 +934,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
p_dma->p_virt = OSAL_NULL;
}
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
p_mngr->ilt_shadow = OSAL_NULL;
}
static enum _ecore_status_t
@ -1000,8 +1004,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
size * sizeof(struct ecore_dma_mem));
if (!p_mngr->ilt_shadow) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate ilt shadow table\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
rc = ECORE_NOMEM;
goto ilt_shadow_fail;
}
@ -1044,12 +1047,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].cid_map = OSAL_NULL;
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
OSAL_FREE(p_hwfn->p_dev,
p_mngr->acquired_vf[type][vf].cid_map);
p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
@ -1126,8 +1131,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
if (!p_mngr) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_cxt_mngr'\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
return ECORE_NOMEM;
}
@ -1189,21 +1193,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
/* Allocate the ILT shadow table */
rc = ecore_ilt_shadow_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
goto tables_alloc_fail;
}
/* Allocate the T2 table */
rc = ecore_cxt_src_t2_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
goto tables_alloc_fail;
}
/* Allocate and initialize the acquired cids bitmaps */
rc = ecore_cid_map_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
goto tables_alloc_fail;
}

View File

@ -910,7 +910,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_dcbx_info'");
return ECORE_NOMEM;
}

View File

@ -39,7 +39,7 @@
* there's more than a single compiled ecore component in system].
*/
static osal_spinlock_t qm_lock;
static bool qm_lock_init;
static u32 qm_lock_ref_cnt;
/******************** Doorbell Recovery *******************/
/* The doorbell recovery mechanism consists of a list of entries which represent
@ -227,7 +227,8 @@ enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
return ECORE_NOMEM;
#endif
OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
p_hwfn->db_recovery_info.db_recovery_counter = 0;
@ -411,7 +412,7 @@ void ecore_init_dp(struct ecore_dev *p_dev,
}
}
void ecore_init_struct(struct ecore_dev *p_dev)
enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
{
u8 i;
@ -423,7 +424,8 @@ void ecore_init_struct(struct ecore_dev *p_dev)
p_hwfn->b_active = false;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
goto handle_err;
#endif
OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
}
@ -433,6 +435,17 @@ void ecore_init_struct(struct ecore_dev *p_dev)
/* set the default cache alignment to 128 (may be overridden later) */
p_dev->cache_shift = 7;
return ECORE_SUCCESS;
#ifdef CONFIG_ECORE_LOCK_ALLOC
handle_err:
while (--i) {
struct ecore_hwfn *p_hwfn = OSAL_NULL;
p_hwfn = &p_dev->hwfns[i];
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
}
return ECORE_NOMEM;
#endif
}
static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
@ -1289,16 +1302,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate memory for dmae_info"
" structure\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
/* DCBX initialization */
rc = ecore_dcbx_info_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
@ -1307,7 +1318,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n");
goto alloc_no_mem;
}
@ -2211,42 +2222,43 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Function start ramrod failed\n");
} else {
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
(1 << 2));
ecore_wr(p_hwfn, p_ptt,
PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
0x100);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH registers after start PFn\n");
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TCP: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_UDP: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt,
PRS_REG_SEARCH_TCP_FIRST_FRAG);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
return rc;
}
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
(1 << 2));
ecore_wr(p_hwfn, p_ptt,
PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
0x100);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH registers after start PFn\n");
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TCP: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_UDP: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt,
PRS_REG_SEARCH_TCP_FIRST_FRAG);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
}
return rc;
return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
@ -2419,7 +2431,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed sending a LOAD_REQ command\n");
return rc;
}
@ -2452,10 +2464,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
if (!qm_lock_init) {
if (!qm_lock_ref_cnt) {
#ifdef CONFIG_ECORE_LOCK_ALLOC
rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
if (rc) {
DP_ERR(p_hwfn, "qm_lock allocation failed\n");
goto qm_lock_fail;
}
#endif
OSAL_SPIN_LOCK_INIT(&qm_lock);
qm_lock_init = true;
}
++qm_lock_ref_cnt;
/* Clean up chip from previous driver if such remains exist.
* This is not needed when the PF is the first one on the
@ -2510,15 +2529,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
goto load_err;
}
rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Sending load done failed, rc = %d\n", rc);
if (rc == ECORE_NOMEM) {
DP_NOTICE(p_hwfn, false,
"Sending load done was failed due to memory allocation failure\n");
goto load_err;
}
return rc;
}
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
@ -2528,7 +2555,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
&param);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to send DCBX attention request\n");
return rc;
}
@ -2561,6 +2588,12 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
return rc;
load_err:
--qm_lock_ref_cnt;
#ifdef CONFIG_ECORE_LOCK_ALLOC
if (!qm_lock_ref_cnt)
OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
qm_lock_fail:
#endif
/* The MFW load lock should be released regardless of success or failure
* of initialization.
* TODO: replace this with an attempt to send cancel_load.
@ -2595,8 +2628,8 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
if (i < ECORE_HW_STOP_RETRY_LIMIT)
return;
DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
" [Connection %02x Tasks %02x]\n",
DP_NOTICE(p_hwfn, false,
"Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
@ -2661,7 +2694,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
if (!p_dev->recov_in_prog) {
rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_REQ command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@ -2676,7 +2709,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
rc = ecore_sp_pf_stop(p_hwfn);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@ -2730,10 +2763,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
--qm_lock_ref_cnt;
#ifdef CONFIG_ECORE_LOCK_ALLOC
if (!qm_lock_ref_cnt)
OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
#endif
if (!p_dev->recov_in_prog) {
ecore_mcp_unload_done(p_hwfn, p_ptt);
rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
if (rc == ECORE_NOMEM) {
DP_NOTICE(p_hwfn, false,
"Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
}
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_DONE command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@ -2984,7 +3028,7 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"MFW response failure for a max value setting of resource %d [%s]\n",
res_id, ecore_hw_get_resc_name(res_id));
return rc;
@ -4085,7 +4129,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Allocate PTT pool */
rc = ecore_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err0;
@ -4110,7 +4154,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Initialize MCP structure */
rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err1;
@ -4120,7 +4164,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
p_params->personality, p_params);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
goto err2;
}
@ -4163,7 +4207,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Allocate the init RT array and initialize the init-ops engine */
rc = ecore_init_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err2;
@ -4253,8 +4297,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
ecore_mcp_free(p_hwfn);
ecore_hw_hwfn_free(p_hwfn);
} else {
DP_NOTICE(p_dev, true,
"What do we need to free when VF hwfn1 init fails\n");
DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
}
return rc;
}
@ -4416,7 +4459,7 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
DP_NOTICE(p_dev, true,
DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@ -4449,7 +4492,7 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@ -4473,7 +4516,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
size = page_cnt * sizeof(*pp_virt_addr_tbl);
pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
DP_NOTICE(p_dev, true,
DP_NOTICE(p_dev, false,
"Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
@ -4497,7 +4540,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt) {
DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
return ECORE_NOMEM;
}
@ -4505,7 +4548,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
DP_NOTICE(p_dev, true,
DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@ -4545,7 +4588,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
page_cnt);
if (rc) {
DP_NOTICE(p_dev, true,
DP_NOTICE(p_dev, false,
"Cannot allocate a chain with the given arguments:\n"
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);

View File

@ -32,7 +32,7 @@ void ecore_init_dp(struct ecore_dev *p_dev,
*
* @param p_dev
*/
void ecore_init_struct(struct ecore_dev *p_dev);
enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_free -

View File

@ -38,6 +38,12 @@ struct ecore_ptt_pool {
struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
p_hwfn->p_ptt_pool = OSAL_NULL;
}
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
@ -65,10 +71,12 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_ptt_pool = p_pool;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
__ecore_ptt_pool_free(p_hwfn);
return ECORE_NOMEM;
}
#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
return ECORE_SUCCESS;
}
@ -89,7 +97,7 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
__ecore_ptt_pool_free(p_hwfn);
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
@ -569,7 +577,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
if (*p_comp == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `p_completion_word'\n");
goto err;
}
@ -578,7 +586,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(struct dmae_cmd));
if (*p_cmd == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct dmae_cmd'\n");
goto err;
}
@ -587,7 +595,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(u32) * DMAE_MAX_RW_SIZE);
if (*p_buff == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `intermediate_buffer'\n");
goto err;
}

View File

@ -1406,8 +1406,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
/* SB struct */
p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_dev, true,
"Failed to allocate `struct ecore_sb_attn_info'\n");
DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
return ECORE_NOMEM;
}
@ -1415,8 +1414,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_dev, true,
"Failed to allocate status block (attentions)\n");
DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
OSAL_FREE(p_dev, p_sb);
return ECORE_NOMEM;
}
@ -1795,8 +1793,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_sb_info'\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
return ECORE_NOMEM;
}
@ -1804,7 +1801,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_phys, SB_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
OSAL_FREE(p_hwfn->p_dev, p_sb);
return ECORE_NOMEM;
}

View File

@ -77,7 +77,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
return ECORE_NOMEM;
#endif
return ECORE_SUCCESS;
@ -110,6 +111,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
break;
OSAL_VFREE(p_hwfn->p_dev,
p_hwfn->p_l2_info->pp_qid_usage[i]);
p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
@ -119,6 +121,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
#endif
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
out_l2_info:
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);

View File

@ -240,15 +240,24 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
/* Allocate mcp_info structure */
p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_hwfn->mcp_info));
if (!p_hwfn->mcp_info)
goto err;
sizeof(*p_hwfn->mcp_info));
if (!p_hwfn->mcp_info) {
DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
return ECORE_NOMEM;
}
p_info = p_hwfn->mcp_info;
/* Initialize the MFW spinlocks */
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_NOMEM;
}
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_NOMEM;
}
#endif
OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
@ -272,7 +281,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
err:
DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
ecore_mcp_free(p_hwfn);
return ECORE_NOMEM;
}

View File

@ -401,7 +401,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
/* Allocate EQ struct */
p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
if (!p_eq) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_eq'\n");
return ECORE_NOMEM;
}
@ -414,7 +414,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
num_elem,
sizeof(union event_ring_element),
&p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
@ -559,8 +559,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
if (!p_spq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_spq'\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
return ECORE_NOMEM;
}
@ -572,7 +571,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain, OSAL_NULL)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@ -588,7 +587,8 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_phys = p_phys;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
goto spq_allocate_fail;
#endif
p_hwfn->p_spq = p_spq;
@ -642,9 +642,7 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
if (!p_ent) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate an SPQ entry for a pending"
" ramrod\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = ECORE_NOMEM;
goto out_unlock;
}
@ -1025,7 +1023,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
p_consq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
if (!p_consq) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_consq'\n");
return ECORE_NOMEM;
}
@ -1038,7 +1036,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_PAGE_SIZE / 0x80,
0x80,
&p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
goto consq_allocate_fail;
}

View File

@ -590,8 +590,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
if (!p_sriov) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_sriov'\n");
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
return ECORE_NOMEM;
}
@ -648,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->p_iov_info));
if (!p_dev->p_iov_info) {
DP_NOTICE(p_hwfn, true,
DP_NOTICE(p_hwfn, false,
"Can't support IOV due to lack of memory\n");
return ECORE_NOMEM;
}