net/qede/base: fix code flow and remove unused code

Remove unused code to address coverity issues and
address a code flow issue.

Coverity issue: 1379468
Coverity issue: 1379521
Coverity issue: 1379522
Coverity issue: 1379523
Coverity issue: 1423918
Fixes: 86a2265e59 ("qede: add SRIOV support")
Fixes: ec94dbc573 ("qede: add base driver")
Fixes: 2ea6f76aff ("qede: add core driver")
Fixes: 29540be7ef ("net/qede: support LRO/TSO offloads")
Cc: stable@dpdk.org

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
This commit is contained in:
Rasesh Mody 2017-04-25 00:28:43 -07:00 committed by Ferruh Yigit
parent 738f56d41a
commit c2069af8f3
7 changed files with 9 additions and 144 deletions

View File

@ -2014,47 +2014,6 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
struct ecore_tid_mem *p_info)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 proto, seg, total_lines, i, shadow_line;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_fl_seg;
struct ecore_tid_seg *p_seg_info;
/* Verify the personality */
switch (p_hwfn->hw_info.personality) {
default:
return ECORE_INVAL;
}
p_cli = &p_mngr->clients[ILT_CLI_CDUT];
if (!p_cli->active)
return ECORE_INVAL;
p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
if (!p_seg_info->has_fl_mem)
return ECORE_INVAL;
p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
p_fl_seg->real_size_in_page);
for (i = 0; i < total_lines; i++) {
shadow_line = i + p_fl_seg->start_line -
p_hwfn->p_cxt_mngr->pf_start_line;
p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
}
p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
p_fl_seg->real_size_in_page;
p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
p_info->tid_size;
return ECORE_SUCCESS;
}
/* This function is very RoCE oriented, if another protocol in the future
* will want this feature we'll need to modify the function to be more generic
*/
@ -2292,52 +2251,3 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
return rc;
}
enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
u32 tid,
u8 ctx_type, void **pp_task_ctx)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_seg;
struct ecore_tid_seg *p_seg_info;
u32 proto, seg;
u32 total_lines;
u32 tid_size, ilt_idx;
u32 num_tids_per_block;
/* Verify the personality */
switch (p_hwfn->hw_info.personality) {
default:
return ECORE_INVAL;
}
p_cli = &p_mngr->clients[ILT_CLI_CDUT];
if (!p_cli->active)
return ECORE_INVAL;
p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
if (ctx_type == ECORE_CTX_WORKING_MEM) {
p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
} else if (ctx_type == ECORE_CTX_FL_MEM) {
if (!p_seg_info->has_fl_mem)
return ECORE_INVAL;
p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
} else {
return ECORE_INVAL;
}
total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
tid_size = p_mngr->task_type_size[p_seg_info->type];
num_tids_per_block = p_seg->real_size_in_page / tid_size;
if (total_lines < tid / num_tids_per_block)
return ECORE_INVAL;
ilt_idx = tid / num_tids_per_block + p_seg->start_line -
p_mngr->pf_start_line;
*pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
(tid % num_tids_per_block) * tid_size;
return ECORE_SUCCESS;
}

View File

@ -197,9 +197,5 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
#define ECORE_CTX_WORKING_MEM 0
#define ECORE_CTX_FL_MEM 1
enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
u32 tid,
u8 ctx_type,
void **task_ctx);
#endif /* _ECORE_CID_ */

View File

@ -37,15 +37,4 @@ struct ecore_tid_mem {
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info);
/**
* @brief ecore_cxt_get_tid_mem_info
*
* @param p_hwfn
* @param p_info
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
struct ecore_tid_mem *p_info);
#endif

View File

@ -693,17 +693,6 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
* @return - rate in Mbps
*/
int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief - Configure min rate for VF's vport.
* @param p_dev
* @param vfid
* @param - rate in Mbps
*
* @return
*/
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate);
#endif
/**

View File

@ -4386,30 +4386,6 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate)
{
struct ecore_vf_info *vf;
u8 vport_id;
int i;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
DP_NOTICE(p_hwfn, true,
"SR-IOV sanity check failed,"
" can't set min rate\n");
return ECORE_INVAL;
}
}
vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
vport_id = vf->vport_id;
return ecore_configure_vport_wfq(p_dev, vport_id, rate);
}
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,

View File

@ -265,7 +265,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
if (rc) {
DP_NOTICE(edev, true,
"Failed to allocate stream memory\n");
goto err2;
goto err1;
}
}
@ -306,7 +306,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
if (rc) {
DP_NOTICE(edev, true,
"Failed sending drv version command\n");
return rc;
goto err3;
}
}
@ -314,8 +314,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
return 0;
err3:
ecore_hw_stop(edev);
err2:
qed_stop_iov_task(edev);
#ifdef CONFIG_ECORE_ZIPPED_FW
qed_free_stream_mem(edev);
err1:
#endif
ecore_resc_free(edev);
err:
#ifdef CONFIG_ECORE_BINARY_FW

View File

@ -1459,7 +1459,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
bd3 = NULL;
hdr_size = 0;
mbuf = *tx_pkts;
mbuf = *tx_pkts++;
assert(mbuf);
/* Check minimum TX BDS availability against available BDs */
@ -1501,7 +1501,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = TX_PROD(txq);
*tx_pkts++;
txq->sw_tx_ring[idx].mbuf = mbuf;
/* BD1 */