net/qede/base: update FW to 8.40.25.0
This patch updates the FW to 8.40.25.0 and corresponding base driver changes. It also updates the PMD version to 2.11.0.1. The FW updates consists of enhancements and fixes as described below. - VF RX queue start ramrod can get stuck due to completion error. Return EQ completion with error, when fail to load VF data. Use VF FID in RX queue start ramrod - Fix big receive buffer initialization for 100G to address failure leading to BRB hardware assertion - GRE tunnel traffic doesn't run when non-L2 ethernet protocol is enabled, fix FW to not forward tunneled SYN packets to LL2. - Fix the FW assert that is caused during vport_update when tx-switching is enabled - Add initial FW support for VF Representors - Add ecore_get_hsi_def_val() API to get default HSI values - Move following from .c to .h files: TSTORM_QZONE_START and MSTORM_QZONE_START enum ilt_clients renamed struct ecore_dma_mem to phys_mem_desc and moved - Add ecore_cxt_set_cli() and ecore_cxt_set_blk() APIs to set client config and block details - Use SET_FIELD() macro where appropriate - Address spell check and code alignment issues Signed-off-by: Rasesh Mody <rmody@marvell.com>
This commit is contained in:
parent
7ed1cd53db
commit
3b307c55f2
@ -28,8 +28,8 @@
|
||||
#include "mcp_public.h"
|
||||
|
||||
#define ECORE_MAJOR_VERSION 8
|
||||
#define ECORE_MINOR_VERSION 37
|
||||
#define ECORE_REVISION_VERSION 20
|
||||
#define ECORE_MINOR_VERSION 40
|
||||
#define ECORE_REVISION_VERSION 18
|
||||
#define ECORE_ENGINEERING_VERSION 0
|
||||
|
||||
#define ECORE_VERSION \
|
||||
@ -467,6 +467,8 @@ struct ecore_wfq_data {
|
||||
bool configured;
|
||||
};
|
||||
|
||||
#define OFLD_GRP_SIZE 4
|
||||
|
||||
struct ecore_qm_info {
|
||||
struct init_qm_pq_params *qm_pq_params;
|
||||
struct init_qm_vport_params *qm_vport_params;
|
||||
@ -513,6 +515,8 @@ struct ecore_fw_data {
|
||||
const u8 *modes_tree_buf;
|
||||
union init_op *init_ops;
|
||||
const u32 *arr_data;
|
||||
const u32 *fw_overlays;
|
||||
u32 fw_overlays_len;
|
||||
u32 init_ops_size;
|
||||
};
|
||||
|
||||
@ -592,6 +596,7 @@ struct ecore_hwfn {
|
||||
|
||||
u8 num_funcs_on_engine;
|
||||
u8 enabled_func_idx;
|
||||
u8 num_funcs_on_port;
|
||||
|
||||
/* BAR access */
|
||||
void OSAL_IOMEM *regview;
|
||||
@ -745,7 +750,6 @@ struct ecore_dev {
|
||||
#endif
|
||||
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
|
||||
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
|
||||
#define ECORE_IS_E4(dev) (ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
|
||||
|
||||
u16 vendor_id;
|
||||
u16 device_id;
|
||||
@ -893,6 +897,7 @@ struct ecore_dev {
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
bool b_is_emul_full;
|
||||
bool b_is_emul_mac;
|
||||
#endif
|
||||
/* LLH info */
|
||||
u8 ppfid_bitmap;
|
||||
@ -911,16 +916,52 @@ struct ecore_dev {
|
||||
u8 engine_for_debug;
|
||||
};
|
||||
|
||||
#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
|
||||
: MAX_NUM_VFS_K2)
|
||||
#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
|
||||
: MAX_NUM_L2_QUEUES_K2)
|
||||
#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
|
||||
: MAX_NUM_PORTS_K2)
|
||||
#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
|
||||
: MAX_SB_PER_PATH_K2)
|
||||
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
|
||||
: MAX_NUM_PFS_K2)
|
||||
enum ecore_hsi_def_type {
|
||||
ECORE_HSI_DEF_MAX_NUM_VFS,
|
||||
ECORE_HSI_DEF_MAX_NUM_L2_QUEUES,
|
||||
ECORE_HSI_DEF_MAX_NUM_PORTS,
|
||||
ECORE_HSI_DEF_MAX_SB_PER_PATH,
|
||||
ECORE_HSI_DEF_MAX_NUM_PFS,
|
||||
ECORE_HSI_DEF_MAX_NUM_VPORTS,
|
||||
ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE,
|
||||
ECORE_HSI_DEF_MAX_QM_TX_QUEUES,
|
||||
ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS,
|
||||
ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
|
||||
ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS,
|
||||
ECORE_HSI_DEF_MAX_PBF_CMD_LINES,
|
||||
ECORE_HSI_DEF_MAX_BTB_BLOCKS,
|
||||
ECORE_NUM_HSI_DEFS
|
||||
};
|
||||
|
||||
u32 ecore_get_hsi_def_val(struct ecore_dev *p_dev,
|
||||
enum ecore_hsi_def_type type);
|
||||
|
||||
#define NUM_OF_VFS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VFS)
|
||||
#define NUM_OF_L2_QUEUES(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_L2_QUEUES)
|
||||
#define NUM_OF_PORTS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PORTS)
|
||||
#define NUM_OF_SBS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_SB_PER_PATH)
|
||||
#define NUM_OF_ENG_PFS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_PFS)
|
||||
#define NUM_OF_VPORTS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_NUM_VPORTS)
|
||||
#define NUM_OF_RSS_ENGINES(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_ETH_RSS_ENGINE)
|
||||
#define NUM_OF_QM_TX_QUEUES(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_TX_QUEUES)
|
||||
#define NUM_OF_PXP_ILT_RECORDS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_PXP_ILT_RECORDS)
|
||||
#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
|
||||
#define NUM_OF_QM_GLOBAL_RLS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_QM_GLOBAL_RLS)
|
||||
#define NUM_OF_PBF_CMD_LINES(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_PBF_CMD_LINES)
|
||||
#define NUM_OF_BTB_BLOCKS(dev) \
|
||||
ecore_get_hsi_def_val(dev, ECORE_HSI_DEF_MAX_BTB_BLOCKS)
|
||||
|
||||
#define CRC8_TABLE_SIZE 256
|
||||
|
||||
@ -948,7 +989,6 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
|
||||
}
|
||||
|
||||
#define PKT_LB_TC 9
|
||||
#define MAX_NUM_VOQS_E4 20
|
||||
|
||||
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
|
||||
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
|
||||
@ -1023,4 +1063,9 @@ enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
|
||||
enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid);
|
||||
enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);
|
||||
|
||||
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
|
||||
|
||||
#define MSTORM_QZONE_START(dev) \
|
||||
(TSTORM_QZONE_START + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
|
||||
|
||||
#endif /* __ECORE_H */
|
||||
|
@ -33,6 +33,10 @@
|
||||
/* Searcher constants */
|
||||
#define SRC_MIN_NUM_ELEMS 256
|
||||
|
||||
/* GFS constants */
|
||||
#define RGFS_MIN_NUM_ELEMS 256
|
||||
#define TGFS_MIN_NUM_ELEMS 256
|
||||
|
||||
/* Timers constants */
|
||||
#define TM_SHIFT 7
|
||||
#define TM_ALIGN (1 << TM_SHIFT)
|
||||
@ -114,16 +118,6 @@ struct ecore_conn_type_cfg {
|
||||
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
|
||||
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
|
||||
|
||||
enum ilt_clients {
|
||||
ILT_CLI_CDUC,
|
||||
ILT_CLI_CDUT,
|
||||
ILT_CLI_QM,
|
||||
ILT_CLI_TM,
|
||||
ILT_CLI_SRC,
|
||||
ILT_CLI_TSDM,
|
||||
ILT_CLI_MAX
|
||||
};
|
||||
|
||||
struct ilt_cfg_pair {
|
||||
u32 reg;
|
||||
u32 val;
|
||||
@ -133,6 +127,7 @@ struct ecore_ilt_cli_blk {
|
||||
u32 total_size; /* 0 means not active */
|
||||
u32 real_size_in_page;
|
||||
u32 start_line;
|
||||
u32 dynamic_line_offset;
|
||||
u32 dynamic_line_cnt;
|
||||
};
|
||||
|
||||
@ -153,17 +148,6 @@ struct ecore_ilt_client_cfg {
|
||||
u32 vf_total_lines;
|
||||
};
|
||||
|
||||
/* Per Path -
|
||||
* ILT shadow table
|
||||
* Protocol acquired CID lists
|
||||
* PF start line in ILT
|
||||
*/
|
||||
struct ecore_dma_mem {
|
||||
dma_addr_t p_phys;
|
||||
void *p_virt;
|
||||
osal_size_t size;
|
||||
};
|
||||
|
||||
#define MAP_WORD_SIZE sizeof(unsigned long)
|
||||
#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
|
||||
|
||||
@ -173,6 +157,13 @@ struct ecore_cid_acquired_map {
|
||||
unsigned long *cid_map;
|
||||
};
|
||||
|
||||
struct ecore_src_t2 {
|
||||
struct phys_mem_desc *dma_mem;
|
||||
u32 num_pages;
|
||||
u64 first_free;
|
||||
u64 last_free;
|
||||
};
|
||||
|
||||
struct ecore_cxt_mngr {
|
||||
/* Per protocl configuration */
|
||||
struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
|
||||
@ -193,17 +184,14 @@ struct ecore_cxt_mngr {
|
||||
struct ecore_cid_acquired_map *acquired_vf[MAX_CONN_TYPES];
|
||||
|
||||
/* ILT shadow table */
|
||||
struct ecore_dma_mem *ilt_shadow;
|
||||
struct phys_mem_desc *ilt_shadow;
|
||||
u32 pf_start_line;
|
||||
|
||||
/* Mutex for a dynamic ILT allocation */
|
||||
osal_mutex_t mutex;
|
||||
|
||||
/* SRC T2 */
|
||||
struct ecore_dma_mem *t2;
|
||||
u32 t2_num_pages;
|
||||
u64 first_free;
|
||||
u64 last_free;
|
||||
struct ecore_src_t2 src_t2;
|
||||
|
||||
/* The infrastructure originally was very generic and context/task
|
||||
* oriented - per connection-type we would set how many of those
|
||||
@ -280,15 +268,17 @@ struct ecore_tm_iids {
|
||||
u32 per_vf_tids;
|
||||
};
|
||||
|
||||
static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
|
||||
static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_cxt_mngr *p_mngr,
|
||||
struct ecore_tm_iids *iids)
|
||||
{
|
||||
struct ecore_conn_type_cfg *p_cfg;
|
||||
bool tm_vf_required = false;
|
||||
bool tm_required = false;
|
||||
u32 i, j;
|
||||
|
||||
for (i = 0; i < MAX_CONN_TYPES; i++) {
|
||||
struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
|
||||
p_cfg = &p_mngr->conn_cfg[i];
|
||||
|
||||
if (tm_cid_proto(i) || tm_required) {
|
||||
if (p_cfg->cid_count)
|
||||
@ -490,43 +480,84 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
|
||||
p_blk->start_line);
|
||||
}
|
||||
|
||||
static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
|
||||
enum ilt_clients ilt_client)
|
||||
static void ecore_ilt_get_dynamic_line_range(struct ecore_hwfn *p_hwfn,
|
||||
enum ilt_clients ilt_client,
|
||||
u32 *dynamic_line_offset,
|
||||
u32 *dynamic_line_cnt)
|
||||
{
|
||||
u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
|
||||
struct ecore_ilt_client_cfg *p_cli;
|
||||
u32 lines_to_skip = 0;
|
||||
struct ecore_conn_type_cfg *p_cfg;
|
||||
u32 cxts_per_p;
|
||||
|
||||
/* TBD MK: ILT code should be simplified once PROTO enum is changed */
|
||||
|
||||
*dynamic_line_offset = 0;
|
||||
*dynamic_line_cnt = 0;
|
||||
|
||||
if (ilt_client == ILT_CLI_CDUC) {
|
||||
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
|
||||
p_cfg = &p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE];
|
||||
|
||||
cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
|
||||
(u32)CONN_CXT_SIZE(p_hwfn);
|
||||
|
||||
lines_to_skip = cid_count / cxts_per_p;
|
||||
*dynamic_line_cnt = p_cfg->cid_count / cxts_per_p;
|
||||
}
|
||||
}
|
||||
|
||||
static struct ecore_ilt_client_cfg *
|
||||
ecore_cxt_set_cli(struct ecore_ilt_client_cfg *p_cli)
|
||||
{
|
||||
p_cli->active = false;
|
||||
p_cli->first.val = 0;
|
||||
p_cli->last.val = 0;
|
||||
return p_cli;
|
||||
}
|
||||
|
||||
static struct ecore_ilt_cli_blk *
|
||||
ecore_cxt_set_blk(struct ecore_ilt_cli_blk *p_blk)
|
||||
{
|
||||
p_blk->total_size = 0;
|
||||
return p_blk;
|
||||
}
|
||||
|
||||
return lines_to_skip;
|
||||
static u32
|
||||
ecore_cxt_src_elements(struct ecore_cxt_mngr *p_mngr)
|
||||
{
|
||||
struct ecore_src_iids src_iids;
|
||||
u32 elem_num = 0;
|
||||
|
||||
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
|
||||
ecore_cxt_src_iids(p_mngr, &src_iids);
|
||||
|
||||
/* Both the PF and VFs searcher connections are stored in the per PF
|
||||
* database. Thus sum the PF searcher cids and all the VFs searcher
|
||||
* cids.
|
||||
*/
|
||||
elem_num = src_iids.pf_cids +
|
||||
src_iids.per_vf_cids * p_mngr->vf_count;
|
||||
if (elem_num == 0)
|
||||
return elem_num;
|
||||
|
||||
elem_num = OSAL_MAX_T(u32, elem_num, SRC_MIN_NUM_ELEMS);
|
||||
elem_num = OSAL_ROUNDUP_POW_OF_TWO(elem_num);
|
||||
|
||||
return elem_num;
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
u32 curr_line, total, i, task_size, line, total_size, elem_size;
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
u32 curr_line, total, i, task_size, line;
|
||||
struct ecore_ilt_client_cfg *p_cli;
|
||||
struct ecore_ilt_cli_blk *p_blk;
|
||||
struct ecore_cdu_iids cdu_iids;
|
||||
struct ecore_src_iids src_iids;
|
||||
struct ecore_qm_iids qm_iids;
|
||||
struct ecore_tm_iids tm_iids;
|
||||
struct ecore_tid_seg *p_seg;
|
||||
|
||||
OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
|
||||
OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
|
||||
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
|
||||
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
|
||||
|
||||
p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
|
||||
@ -536,7 +567,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
|
||||
|
||||
/* CDUC */
|
||||
p_cli = &p_mngr->clients[ILT_CLI_CDUC];
|
||||
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
|
||||
|
||||
curr_line = p_mngr->pf_start_line;
|
||||
|
||||
@ -546,7 +577,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
/* get the counters for the CDUC,CDUC and QM clients */
|
||||
ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
|
||||
|
||||
p_blk = &p_cli->pf_blks[CDUC_BLK];
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
|
||||
|
||||
total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
|
||||
|
||||
@ -556,11 +587,12 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
|
||||
p_cli->pf_total_lines = curr_line - p_blk->start_line;
|
||||
|
||||
p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
|
||||
ILT_CLI_CDUC);
|
||||
ecore_ilt_get_dynamic_line_range(p_hwfn, ILT_CLI_CDUC,
|
||||
&p_blk->dynamic_line_offset,
|
||||
&p_blk->dynamic_line_cnt);
|
||||
|
||||
/* CDUC VF */
|
||||
p_blk = &p_cli->vf_blks[CDUC_BLK];
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
|
||||
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
|
||||
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
|
||||
@ -574,7 +606,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
ILT_CLI_CDUC);
|
||||
|
||||
/* CDUT PF */
|
||||
p_cli = &p_mngr->clients[ILT_CLI_CDUT];
|
||||
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
|
||||
p_cli->first.val = curr_line;
|
||||
|
||||
/* first the 'working' task memory */
|
||||
@ -583,7 +615,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
if (!p_seg || p_seg->count == 0)
|
||||
continue;
|
||||
|
||||
p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
|
||||
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
|
||||
p_mngr->task_type_size[p_seg->type]);
|
||||
@ -598,7 +630,8 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
if (!p_seg || p_seg->count == 0)
|
||||
continue;
|
||||
|
||||
p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
|
||||
p_blk =
|
||||
ecore_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
|
||||
|
||||
if (!p_seg->has_fl_mem) {
|
||||
/* The segment is active (total size pf 'working'
|
||||
@ -631,7 +664,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
|
||||
ILT_CLI_CDUT);
|
||||
}
|
||||
p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
|
||||
p_cli->pf_total_lines = curr_line - p_cli->first.val;
|
||||
|
||||
/* CDUT VF */
|
||||
p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
|
||||
@ -643,7 +676,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
/* 'working' memory */
|
||||
total = p_seg->count * p_mngr->task_type_size[p_seg->type];
|
||||
|
||||
p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk,
|
||||
curr_line, total,
|
||||
p_mngr->task_type_size[p_seg->type]);
|
||||
@ -652,7 +685,8 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
ILT_CLI_CDUT);
|
||||
|
||||
/* 'init' memory */
|
||||
p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
|
||||
p_blk =
|
||||
ecore_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
|
||||
if (!p_seg->has_fl_mem) {
|
||||
/* see comment above */
|
||||
line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
|
||||
@ -664,15 +698,17 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
|
||||
ILT_CLI_CDUT);
|
||||
}
|
||||
p_cli->vf_total_lines = curr_line -
|
||||
p_cli->vf_blks[0].start_line;
|
||||
p_cli->vf_total_lines = curr_line - (p_cli->first.val +
|
||||
p_cli->pf_total_lines);
|
||||
|
||||
/* Now for the rest of the VFs */
|
||||
for (i = 1; i < p_mngr->vf_count; i++) {
|
||||
/* don't set p_blk i.e. don't clear total_size */
|
||||
p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
|
||||
ILT_CLI_CDUT);
|
||||
|
||||
/* don't set p_blk i.e. don't clear total_size */
|
||||
p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
|
||||
ILT_CLI_CDUT);
|
||||
@ -680,13 +716,19 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
}
|
||||
|
||||
/* QM */
|
||||
p_cli = &p_mngr->clients[ILT_CLI_QM];
|
||||
p_blk = &p_cli->pf_blks[0];
|
||||
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
|
||||
|
||||
/* At this stage, after the first QM configuration, the PF PQs amount
|
||||
* is the highest possible. Save this value at qm_info->ilt_pf_pqs to
|
||||
* detect overflows in the future.
|
||||
* Even though VF PQs amount can be larger than VF count, use vf_count
|
||||
* because each VF requires only the full amount of CIDs.
|
||||
*/
|
||||
ecore_cxt_qm_iids(p_hwfn, &qm_iids);
|
||||
total = ecore_qm_pf_mem_size(qm_iids.cids,
|
||||
total = ecore_qm_pf_mem_size(p_hwfn, qm_iids.cids,
|
||||
qm_iids.vf_cids, qm_iids.tids,
|
||||
p_hwfn->qm_info.num_pqs,
|
||||
p_hwfn->qm_info.num_pqs + OFLD_GRP_SIZE,
|
||||
p_hwfn->qm_info.num_vf_pqs);
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
|
||||
@ -701,39 +743,15 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
|
||||
p_cli->pf_total_lines = curr_line - p_blk->start_line;
|
||||
|
||||
/* SRC */
|
||||
p_cli = &p_mngr->clients[ILT_CLI_SRC];
|
||||
ecore_cxt_src_iids(p_mngr, &src_iids);
|
||||
|
||||
/* Both the PF and VFs searcher connections are stored in the per PF
|
||||
* database. Thus sum the PF searcher cids and all the VFs searcher
|
||||
* cids.
|
||||
*/
|
||||
total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
|
||||
if (total) {
|
||||
u32 local_max = OSAL_MAX_T(u32, total,
|
||||
SRC_MIN_NUM_ELEMS);
|
||||
|
||||
total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
|
||||
|
||||
p_blk = &p_cli->pf_blks[0];
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
|
||||
total * sizeof(struct src_ent),
|
||||
sizeof(struct src_ent));
|
||||
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
|
||||
ILT_CLI_SRC);
|
||||
p_cli->pf_total_lines = curr_line - p_blk->start_line;
|
||||
}
|
||||
|
||||
/* TM PF */
|
||||
p_cli = &p_mngr->clients[ILT_CLI_TM];
|
||||
ecore_cxt_tm_iids(p_mngr, &tm_iids);
|
||||
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
|
||||
ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
|
||||
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
|
||||
if (total) {
|
||||
p_blk = &p_cli->pf_blks[0];
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
|
||||
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
|
||||
total * TM_ELEM_SIZE,
|
||||
TM_ELEM_SIZE);
|
||||
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
|
||||
ILT_CLI_TM);
|
||||
@ -743,7 +761,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
/* TM VF */
|
||||
total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
|
||||
if (total) {
|
||||
p_blk = &p_cli->vf_blks[0];
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->vf_blks[0]);
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
|
||||
total * TM_ELEM_SIZE, TM_ELEM_SIZE);
|
||||
|
||||
@ -757,12 +775,28 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
}
|
||||
}
|
||||
|
||||
/* SRC */
|
||||
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
|
||||
total = ecore_cxt_src_elements(p_mngr);
|
||||
|
||||
if (total) {
|
||||
total_size = total * sizeof(struct src_ent);
|
||||
elem_size = sizeof(struct src_ent);
|
||||
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
|
||||
total_size, elem_size);
|
||||
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
|
||||
ILT_CLI_SRC);
|
||||
p_cli->pf_total_lines = curr_line - p_blk->start_line;
|
||||
}
|
||||
|
||||
/* TSDM (SRQ CONTEXT) */
|
||||
total = ecore_cxt_get_srq_count(p_hwfn);
|
||||
|
||||
if (total) {
|
||||
p_cli = &p_mngr->clients[ILT_CLI_TSDM];
|
||||
p_blk = &p_cli->pf_blks[SRQ_BLK];
|
||||
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
|
||||
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
|
||||
ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
|
||||
total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
|
||||
|
||||
@ -783,29 +817,60 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
|
||||
|
||||
static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
struct ecore_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
|
||||
u32 i;
|
||||
|
||||
if (!p_mngr->t2)
|
||||
if (!p_t2 || !p_t2->dma_mem)
|
||||
return;
|
||||
|
||||
for (i = 0; i < p_mngr->t2_num_pages; i++)
|
||||
if (p_mngr->t2[i].p_virt)
|
||||
for (i = 0; i < p_t2->num_pages; i++)
|
||||
if (p_t2->dma_mem[i].virt_addr)
|
||||
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
|
||||
p_mngr->t2[i].p_virt,
|
||||
p_mngr->t2[i].p_phys,
|
||||
p_mngr->t2[i].size);
|
||||
p_t2->dma_mem[i].virt_addr,
|
||||
p_t2->dma_mem[i].phys_addr,
|
||||
p_t2->dma_mem[i].size);
|
||||
|
||||
OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
|
||||
OSAL_FREE(p_hwfn->p_dev, p_t2->dma_mem);
|
||||
p_t2->dma_mem = OSAL_NULL;
|
||||
}
|
||||
|
||||
static enum _ecore_status_t
|
||||
ecore_cxt_t2_alloc_pages(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_src_t2 *p_t2,
|
||||
u32 total_size, u32 page_size)
|
||||
{
|
||||
void **p_virt;
|
||||
u32 size, i;
|
||||
|
||||
if (!p_t2 || !p_t2->dma_mem)
|
||||
return ECORE_INVAL;
|
||||
|
||||
for (i = 0; i < p_t2->num_pages; i++) {
|
||||
size = OSAL_MIN_T(u32, total_size, page_size);
|
||||
p_virt = &p_t2->dma_mem[i].virt_addr;
|
||||
|
||||
*p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
|
||||
&p_t2->dma_mem[i].phys_addr,
|
||||
size);
|
||||
if (!p_t2->dma_mem[i].virt_addr)
|
||||
return ECORE_NOMEM;
|
||||
|
||||
OSAL_MEM_ZERO(*p_virt, size);
|
||||
p_t2->dma_mem[i].size = size;
|
||||
total_size -= size;
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
u32 conn_num, total_size, ent_per_page, psz, i;
|
||||
struct phys_mem_desc *p_t2_last_page;
|
||||
struct ecore_ilt_client_cfg *p_src;
|
||||
struct ecore_src_iids src_iids;
|
||||
struct ecore_dma_mem *p_t2;
|
||||
struct ecore_src_t2 *p_t2;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
|
||||
@ -823,49 +888,39 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
|
||||
|
||||
/* use the same page size as the SRC ILT client */
|
||||
psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
|
||||
p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
|
||||
p_t2 = &p_mngr->src_t2;
|
||||
p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
|
||||
|
||||
/* allocate t2 */
|
||||
p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
|
||||
p_mngr->t2_num_pages *
|
||||
sizeof(struct ecore_dma_mem));
|
||||
if (!p_mngr->t2) {
|
||||
p_t2->dma_mem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
|
||||
p_t2->num_pages *
|
||||
sizeof(struct phys_mem_desc));
|
||||
if (!p_t2->dma_mem) {
|
||||
DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
|
||||
rc = ECORE_NOMEM;
|
||||
goto t2_fail;
|
||||
}
|
||||
|
||||
/* allocate t2 pages */
|
||||
for (i = 0; i < p_mngr->t2_num_pages; i++) {
|
||||
u32 size = OSAL_MIN_T(u32, total_size, psz);
|
||||
void **p_virt = &p_mngr->t2[i].p_virt;
|
||||
|
||||
*p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
|
||||
&p_mngr->t2[i].p_phys, size);
|
||||
if (!p_mngr->t2[i].p_virt) {
|
||||
rc = ECORE_NOMEM;
|
||||
goto t2_fail;
|
||||
}
|
||||
OSAL_MEM_ZERO(*p_virt, size);
|
||||
p_mngr->t2[i].size = size;
|
||||
total_size -= size;
|
||||
}
|
||||
rc = ecore_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
|
||||
if (rc)
|
||||
goto t2_fail;
|
||||
|
||||
/* Set the t2 pointers */
|
||||
|
||||
/* entries per page - must be a power of two */
|
||||
ent_per_page = psz / sizeof(struct src_ent);
|
||||
|
||||
p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
|
||||
p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
|
||||
|
||||
p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
|
||||
p_mngr->last_free = (u64)p_t2->p_phys +
|
||||
((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
|
||||
p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
|
||||
p_t2->last_free = (u64)p_t2_last_page->phys_addr +
|
||||
((conn_num - 1) & (ent_per_page - 1)) *
|
||||
sizeof(struct src_ent);
|
||||
|
||||
for (i = 0; i < p_mngr->t2_num_pages; i++) {
|
||||
for (i = 0; i < p_t2->num_pages; i++) {
|
||||
u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
|
||||
struct src_ent *entries = p_mngr->t2[i].p_virt;
|
||||
u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
|
||||
struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
|
||||
u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
|
||||
u32 j;
|
||||
|
||||
for (j = 0; j < ent_num - 1; j++) {
|
||||
@ -873,8 +928,8 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
|
||||
entries[j].next = OSAL_CPU_TO_BE64(val);
|
||||
}
|
||||
|
||||
if (i < p_mngr->t2_num_pages - 1)
|
||||
val = (u64)p_mngr->t2[i + 1].p_phys;
|
||||
if (i < p_t2->num_pages - 1)
|
||||
val = (u64)p_t2->dma_mem[i + 1].phys_addr;
|
||||
else
|
||||
val = 0;
|
||||
entries[j].next = OSAL_CPU_TO_BE64(val);
|
||||
@ -921,13 +976,13 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
|
||||
ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
|
||||
|
||||
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
|
||||
struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
|
||||
struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
|
||||
|
||||
if (p_dma->p_virt)
|
||||
if (p_dma->virt_addr)
|
||||
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
|
||||
p_dma->p_virt,
|
||||
p_dma->p_phys, p_dma->size);
|
||||
p_dma->p_virt = OSAL_NULL;
|
||||
p_dma->phys_addr, p_dma->size);
|
||||
p_dma->virt_addr = OSAL_NULL;
|
||||
}
|
||||
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
|
||||
p_mngr->ilt_shadow = OSAL_NULL;
|
||||
@ -938,28 +993,33 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ilt_cli_blk *p_blk,
|
||||
enum ilt_clients ilt_client, u32 start_line_offset)
|
||||
{
|
||||
struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
|
||||
u32 lines, line, sz_left, lines_to_skip = 0;
|
||||
struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
|
||||
u32 lines, line, sz_left, lines_to_skip, first_skipped_line;
|
||||
|
||||
/* Special handling for RoCE that supports dynamic allocation */
|
||||
if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
|
||||
return ECORE_SUCCESS;
|
||||
|
||||
lines_to_skip = p_blk->dynamic_line_cnt;
|
||||
|
||||
if (!p_blk->total_size)
|
||||
return ECORE_SUCCESS;
|
||||
|
||||
sz_left = p_blk->total_size;
|
||||
lines_to_skip = p_blk->dynamic_line_cnt;
|
||||
lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
|
||||
line = p_blk->start_line + start_line_offset -
|
||||
p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
|
||||
p_hwfn->p_cxt_mngr->pf_start_line;
|
||||
first_skipped_line = line + p_blk->dynamic_line_offset;
|
||||
|
||||
for (; lines; lines--) {
|
||||
while (lines) {
|
||||
dma_addr_t p_phys;
|
||||
void *p_virt;
|
||||
u32 size;
|
||||
|
||||
if (lines_to_skip && (line == first_skipped_line)) {
|
||||
line += lines_to_skip;
|
||||
continue;
|
||||
}
|
||||
|
||||
size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
|
||||
|
||||
/* @DPDK */
|
||||
@ -971,8 +1031,8 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
|
||||
return ECORE_NOMEM;
|
||||
OSAL_MEM_ZERO(p_virt, size);
|
||||
|
||||
ilt_shadow[line].p_phys = p_phys;
|
||||
ilt_shadow[line].p_virt = p_virt;
|
||||
ilt_shadow[line].phys_addr = p_phys;
|
||||
ilt_shadow[line].virt_addr = p_virt;
|
||||
ilt_shadow[line].size = size;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
|
||||
@ -982,6 +1042,7 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
sz_left -= size;
|
||||
line++;
|
||||
lines--;
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
@ -997,7 +1058,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
|
||||
|
||||
size = ecore_cxt_ilt_shadow_size(clients);
|
||||
p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
|
||||
size * sizeof(struct ecore_dma_mem));
|
||||
size * sizeof(struct phys_mem_desc));
|
||||
|
||||
if (!p_mngr->ilt_shadow) {
|
||||
DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
|
||||
@ -1007,7 +1068,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
|
||||
"Allocated 0x%x bytes for ilt shadow\n",
|
||||
(u32)(size * sizeof(struct ecore_dma_mem)));
|
||||
(u32)(size * sizeof(struct phys_mem_desc)));
|
||||
|
||||
for_each_ilt_valid_client(i, clients) {
|
||||
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
|
||||
@ -1058,7 +1119,7 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
|
||||
}
|
||||
|
||||
static enum _ecore_status_t
|
||||
ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
|
||||
__ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
|
||||
u32 cid_start, u32 cid_count,
|
||||
struct ecore_cid_acquired_map *p_map)
|
||||
{
|
||||
@ -1082,49 +1143,67 @@ ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
|
||||
static enum _ecore_status_t
|
||||
ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type, u32 start_cid,
|
||||
u32 vf_start_cid)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
u32 max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
|
||||
u32 start_cid = 0, vf_start_cid = 0;
|
||||
u32 type, vf;
|
||||
u32 vf, max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
|
||||
struct ecore_cid_acquired_map *p_map;
|
||||
struct ecore_conn_type_cfg *p_cfg;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
for (type = 0; type < MAX_CONN_TYPES; type++) {
|
||||
struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
|
||||
struct ecore_cid_acquired_map *p_map;
|
||||
p_cfg = &p_mngr->conn_cfg[type];
|
||||
|
||||
/* Handle PF maps */
|
||||
p_map = &p_mngr->acquired[type];
|
||||
if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
|
||||
p_cfg->cid_count, p_map))
|
||||
rc = __ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
|
||||
p_cfg->cid_count, p_map);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
/* Handle VF maps */
|
||||
for (vf = 0; vf < max_num_vfs; vf++) {
|
||||
p_map = &p_mngr->acquired_vf[type][vf];
|
||||
rc = __ecore_cid_map_alloc_single(p_hwfn, type, vf_start_cid,
|
||||
p_cfg->cids_per_vf, p_map);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
|
||||
u32 start_cid = 0, vf_start_cid = 0;
|
||||
u32 type;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
for (type = 0; type < MAX_CONN_TYPES; type++) {
|
||||
rc = ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
|
||||
vf_start_cid);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
goto cid_map_fail;
|
||||
|
||||
/* Handle VF maps */
|
||||
for (vf = 0; vf < max_num_vfs; vf++) {
|
||||
p_map = &p_mngr->acquired_vf[type][vf];
|
||||
if (ecore_cid_map_alloc_single(p_hwfn, type,
|
||||
vf_start_cid,
|
||||
p_cfg->cids_per_vf,
|
||||
p_map))
|
||||
goto cid_map_fail;
|
||||
}
|
||||
|
||||
start_cid += p_cfg->cid_count;
|
||||
vf_start_cid += p_cfg->cids_per_vf;
|
||||
start_cid += p_mngr->conn_cfg[type].cid_count;
|
||||
vf_start_cid += p_mngr->conn_cfg[type].cids_per_vf;
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
|
||||
cid_map_fail:
|
||||
ecore_cid_map_free(p_hwfn);
|
||||
return ECORE_NOMEM;
|
||||
return rc;
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_cid_acquired_map *acquired_vf;
|
||||
struct ecore_ilt_client_cfg *clients;
|
||||
struct ecore_cxt_mngr *p_mngr;
|
||||
u32 i;
|
||||
u32 i, max_num_vfs;
|
||||
|
||||
p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
|
||||
if (!p_mngr) {
|
||||
@ -1132,9 +1211,6 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
|
||||
return ECORE_NOMEM;
|
||||
}
|
||||
|
||||
/* Set the cxt mangr pointer prior to further allocations */
|
||||
p_hwfn->p_cxt_mngr = p_mngr;
|
||||
|
||||
/* Initialize ILT client registers */
|
||||
clients = p_mngr->clients;
|
||||
clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
|
||||
@ -1183,6 +1259,22 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
|
||||
#endif
|
||||
OSAL_MUTEX_INIT(&p_mngr->mutex);
|
||||
|
||||
/* Set the cxt mangr pointer prior to further allocations */
|
||||
p_hwfn->p_cxt_mngr = p_mngr;
|
||||
|
||||
max_num_vfs = NUM_OF_VFS(p_hwfn->p_dev);
|
||||
for (i = 0; i < MAX_CONN_TYPES; i++) {
|
||||
acquired_vf = OSAL_CALLOC(p_hwfn->p_dev, GFP_KERNEL,
|
||||
max_num_vfs, sizeof(*acquired_vf));
|
||||
if (!acquired_vf) {
|
||||
DP_NOTICE(p_hwfn, false,
|
||||
"Failed to allocate an array of `struct ecore_cid_acquired_map'\n");
|
||||
return ECORE_NOMEM;
|
||||
}
|
||||
|
||||
p_mngr->acquired_vf[i] = acquired_vf;
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
@ -1220,6 +1312,8 @@ tables_alloc_fail:
|
||||
|
||||
void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (!p_hwfn->p_cxt_mngr)
|
||||
return;
|
||||
|
||||
@ -1229,7 +1323,11 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
|
||||
#ifdef CONFIG_ECORE_LOCK_ALLOC
|
||||
OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
|
||||
#endif
|
||||
for (i = 0; i < MAX_CONN_TYPES; i++)
|
||||
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr->acquired_vf[i]);
|
||||
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
|
||||
|
||||
p_hwfn->p_cxt_mngr = OSAL_NULL;
|
||||
}
|
||||
|
||||
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
|
||||
@ -1435,14 +1533,10 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
|
||||
bool is_pf_loading)
|
||||
{
|
||||
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
|
||||
struct ecore_mcp_link_state *p_link;
|
||||
struct ecore_qm_iids iids;
|
||||
|
||||
OSAL_MEM_ZERO(&iids, sizeof(iids));
|
||||
ecore_cxt_qm_iids(p_hwfn, &iids);
|
||||
|
||||
p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
|
||||
|
||||
ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
|
||||
qm_info->max_phys_tcs_per_port,
|
||||
is_pf_loading,
|
||||
@ -1452,7 +1546,7 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
|
||||
qm_info->num_vf_pqs,
|
||||
qm_info->start_vport,
|
||||
qm_info->num_vports, qm_info->pf_wfq,
|
||||
qm_info->pf_rl, p_link->speed,
|
||||
qm_info->pf_rl,
|
||||
p_hwfn->qm_info.qm_pq_params,
|
||||
p_hwfn->qm_info.qm_vport_params);
|
||||
}
|
||||
@ -1601,7 +1695,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
struct ecore_ilt_client_cfg *clients;
|
||||
struct ecore_cxt_mngr *p_mngr;
|
||||
struct ecore_dma_mem *p_shdw;
|
||||
struct phys_mem_desc *p_shdw;
|
||||
u32 line, rt_offst, i;
|
||||
|
||||
ecore_ilt_bounds_init(p_hwfn);
|
||||
@ -1626,10 +1720,10 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
|
||||
/** p_virt could be OSAL_NULL incase of dynamic
|
||||
* allocation
|
||||
*/
|
||||
if (p_shdw[line].p_virt != OSAL_NULL) {
|
||||
if (p_shdw[line].virt_addr != OSAL_NULL) {
|
||||
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
|
||||
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
|
||||
(p_shdw[line].p_phys >> 12));
|
||||
(p_shdw[line].phys_addr >> 12));
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
|
||||
"Setting RT[0x%08x] from"
|
||||
@ -1637,7 +1731,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
|
||||
" Physical addr: 0x%lx\n",
|
||||
rt_offst, line, i,
|
||||
(unsigned long)(p_shdw[line].
|
||||
p_phys >> 12));
|
||||
phys_addr >> 12));
|
||||
}
|
||||
|
||||
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
|
||||
@ -1666,9 +1760,9 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
|
||||
OSAL_LOG2(rounded_conn_num));
|
||||
|
||||
STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
|
||||
p_hwfn->p_cxt_mngr->first_free);
|
||||
p_hwfn->p_cxt_mngr->src_t2.first_free);
|
||||
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
|
||||
p_hwfn->p_cxt_mngr->last_free);
|
||||
p_hwfn->p_cxt_mngr->src_t2.last_free);
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
|
||||
"Configured SEARCHER for 0x%08x connections\n",
|
||||
conn_num);
|
||||
@ -1699,18 +1793,18 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
|
||||
u8 i;
|
||||
|
||||
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
|
||||
ecore_cxt_tm_iids(p_mngr, &tm_iids);
|
||||
ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
|
||||
|
||||
/* @@@TBD No pre-scan for now */
|
||||
|
||||
/* Note: We assume consecutive VFs for a PF */
|
||||
for (i = 0; i < p_mngr->vf_count; i++) {
|
||||
cfg_word = 0;
|
||||
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
|
||||
SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
|
||||
SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
|
||||
SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
|
||||
SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
|
||||
|
||||
/* Note: We assume consecutive VFs for a PF */
|
||||
for (i = 0; i < p_mngr->vf_count; i++) {
|
||||
rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
|
||||
(sizeof(cfg_word) / sizeof(u32)) *
|
||||
(p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
|
||||
@ -1728,7 +1822,7 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
|
||||
(NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
|
||||
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
|
||||
|
||||
/* enale scan */
|
||||
/* enable scan */
|
||||
STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
|
||||
tm_iids.pf_cids ? 0x1 : 0x0);
|
||||
|
||||
@ -1972,10 +2066,10 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
|
||||
line = p_info->iid / cxts_per_p;
|
||||
|
||||
/* Make sure context is allocated (dynamic allocation) */
|
||||
if (!p_mngr->ilt_shadow[line].p_virt)
|
||||
if (!p_mngr->ilt_shadow[line].virt_addr)
|
||||
return ECORE_INVAL;
|
||||
|
||||
p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
|
||||
p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].virt_addr +
|
||||
p_info->iid % cxts_per_p * conn_cxt_size;
|
||||
|
||||
DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
|
||||
@ -2074,7 +2168,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
|
||||
|
||||
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
|
||||
if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
|
||||
goto out0;
|
||||
|
||||
p_ptt = ecore_ptt_acquire(p_hwfn);
|
||||
@ -2094,8 +2188,8 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
|
||||
}
|
||||
OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
|
||||
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
|
||||
p_blk->real_size_in_page;
|
||||
|
||||
@ -2107,7 +2201,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
|
||||
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
|
||||
SET_FIELD(ilt_hw_entry,
|
||||
ILT_ENTRY_PHY_ADDR,
|
||||
(p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
|
||||
(p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr >> 12));
|
||||
|
||||
/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
|
||||
|
||||
@ -2115,21 +2209,6 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
|
||||
reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
|
||||
OSAL_NULL /* default parameters */);
|
||||
|
||||
if (elem_type == ECORE_ELEM_CXT) {
|
||||
u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
|
||||
elems_per_p;
|
||||
|
||||
/* Update the relevant register in the parser */
|
||||
ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
|
||||
last_cid_allocated - 1);
|
||||
|
||||
if (!p_hwfn->b_rdma_enabled_in_prs) {
|
||||
/* Enable RoCE search */
|
||||
ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
|
||||
p_hwfn->b_rdma_enabled_in_prs = true;
|
||||
}
|
||||
}
|
||||
|
||||
out1:
|
||||
ecore_ptt_release(p_hwfn, p_ptt);
|
||||
out0:
|
||||
@ -2196,16 +2275,16 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
for (i = shadow_start_line; i < shadow_end_line; i++) {
|
||||
if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
|
||||
if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
|
||||
continue;
|
||||
|
||||
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr,
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
|
||||
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = OSAL_NULL;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
|
||||
p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
|
||||
|
||||
/* compute absolute offset */
|
||||
|
@ -22,6 +22,18 @@ enum ecore_cxt_elem_type {
|
||||
ECORE_ELEM_TASK
|
||||
};
|
||||
|
||||
enum ilt_clients {
|
||||
ILT_CLI_CDUC,
|
||||
ILT_CLI_CDUT,
|
||||
ILT_CLI_QM,
|
||||
ILT_CLI_TM,
|
||||
ILT_CLI_SRC,
|
||||
ILT_CLI_TSDM,
|
||||
ILT_CLI_RGFS,
|
||||
ILT_CLI_TGFS,
|
||||
ILT_CLI_MAX
|
||||
};
|
||||
|
||||
u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
|
||||
enum protocol_type type,
|
||||
u32 *vf_cid);
|
||||
|
@ -310,8 +310,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
|
||||
continue;
|
||||
|
||||
/* if no app tlv was present, don't override in FW */
|
||||
ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,
|
||||
priority, tc, type);
|
||||
ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
|
||||
p_data->arr[DCBX_PROTOCOL_ETH].enable,
|
||||
priority, tc, type);
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -6,7 +6,20 @@
|
||||
|
||||
#ifndef _INIT_FW_FUNCS_H
|
||||
#define _INIT_FW_FUNCS_H
|
||||
/* Forward declarations */
|
||||
#include "ecore_hsi_common.h"
|
||||
#include "ecore_hsi_eth.h"
|
||||
|
||||
/* Physical memory descriptor */
|
||||
struct phys_mem_desc {
|
||||
dma_addr_t phys_addr;
|
||||
void *virt_addr;
|
||||
u32 size; /* In bytes */
|
||||
};
|
||||
|
||||
/* Returns the VOQ based on port and TC */
|
||||
#define VOQ(port, tc, max_phys_tcs_per_port) \
|
||||
((tc) == PURE_LB_TC ? NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + (port) : \
|
||||
(port) * (max_phys_tcs_per_port) + (tc))
|
||||
|
||||
struct init_qm_pq_params;
|
||||
|
||||
@ -16,6 +29,7 @@ struct init_qm_pq_params;
|
||||
* Returns the required host memory size in 4KB units.
|
||||
* Must be called before all QM init HSI functions.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param num_pf_cids - number of connections used by this PF
|
||||
* @param num_vf_cids - number of connections used by VFs of this PF
|
||||
* @param num_tids - number of tasks used by this PF
|
||||
@ -24,7 +38,8 @@ struct init_qm_pq_params;
|
||||
*
|
||||
* @return The required host memory size in 4KB units.
|
||||
*/
|
||||
u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
|
||||
u32 ecore_qm_pf_mem_size(struct ecore_hwfn *p_hwfn,
|
||||
u32 num_pf_cids,
|
||||
u32 num_vf_cids,
|
||||
u32 num_tids,
|
||||
u16 num_pf_pqs,
|
||||
@ -39,20 +54,24 @@ u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
|
||||
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
|
||||
* @param pf_rl_en - enable per-PF rate limiters
|
||||
* @param pf_wfq_en - enable per-PF WFQ
|
||||
* @param vport_rl_en - enable per-VPORT rate limiters
|
||||
* @param global_rl_en - enable global rate limiters
|
||||
* @param vport_wfq_en - enable per-VPORT WFQ
|
||||
* @param port_params - array of size MAX_NUM_PORTS with params for each port
|
||||
* @param port_params - array with parameters for each port.
|
||||
* @param global_rl_params - array with parameters for each global RL.
|
||||
* If OSAL_NULL, global RLs are not configured.
|
||||
*
|
||||
* @return 0 on success, -1 on error.
|
||||
*/
|
||||
int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
|
||||
u8 max_ports_per_engine,
|
||||
u8 max_phys_tcs_per_port,
|
||||
bool pf_rl_en,
|
||||
bool pf_wfq_en,
|
||||
bool vport_rl_en,
|
||||
bool vport_wfq_en,
|
||||
struct init_qm_port_params port_params[MAX_NUM_PORTS]);
|
||||
u8 max_ports_per_engine,
|
||||
u8 max_phys_tcs_per_port,
|
||||
bool pf_rl_en,
|
||||
bool pf_wfq_en,
|
||||
bool global_rl_en,
|
||||
bool vport_wfq_en,
|
||||
struct init_qm_port_params port_params[MAX_NUM_PORTS],
|
||||
struct init_qm_global_rl_params
|
||||
global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]);
|
||||
|
||||
/**
|
||||
* @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
|
||||
@ -76,7 +95,6 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
|
||||
* be 0. otherwise, the weight must be non-zero.
|
||||
* @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
|
||||
* configure. ignored if PF RL is globally disabled.
|
||||
* @param link_speed - link speed in Mbps.
|
||||
* @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
|
||||
* each Tx PQ associated with the specified PF.
|
||||
* @param vport_params - array of size num_vports with parameters for each
|
||||
@ -95,11 +113,10 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
|
||||
u16 start_pq,
|
||||
u16 num_pf_pqs,
|
||||
u16 num_vf_pqs,
|
||||
u8 start_vport,
|
||||
u8 num_vports,
|
||||
u16 start_vport,
|
||||
u16 num_vports,
|
||||
u16 pf_wfq,
|
||||
u32 pf_rl,
|
||||
u32 link_speed,
|
||||
struct init_qm_pq_params *pq_params,
|
||||
struct init_qm_vport_params *vport_params);
|
||||
|
||||
@ -141,14 +158,30 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
|
||||
* @param first_tx_pq_id- An array containing the first Tx PQ ID associated
|
||||
* with the VPORT for each TC. This array is filled by
|
||||
* ecore_qm_pf_rt_init
|
||||
* @param vport_wfq - WFQ weight. Must be non-zero.
|
||||
* @param wfq - WFQ weight. Must be non-zero.
|
||||
*
|
||||
* @return 0 on success, -1 on error.
|
||||
*/
|
||||
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
u16 first_tx_pq_id[NUM_OF_TCS],
|
||||
u16 vport_wfq);
|
||||
u16 wfq);
|
||||
|
||||
/**
|
||||
* @brief ecore_init_global_rl - Initializes the rate limit of the specified
|
||||
* rate limiter.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers
|
||||
* @param rl_id - RL ID
|
||||
* @param rate_limit - rate limit in Mb/sec units
|
||||
*
|
||||
* @return 0 on success, -1 on error.
|
||||
*/
|
||||
int ecore_init_global_rl(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
u16 rl_id,
|
||||
u32 rate_limit);
|
||||
|
||||
/**
|
||||
* @brief ecore_init_vport_rl - Initializes the rate limit of the specified
|
||||
@ -283,8 +316,9 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
/**
|
||||
* @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
|
||||
* port
|
||||
* port.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
* @param dest_port - vxlan destination udp port.
|
||||
*/
|
||||
@ -295,6 +329,7 @@ void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
|
||||
/**
|
||||
* @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
* @param vxlan_enable - vxlan enable flag.
|
||||
*/
|
||||
@ -305,6 +340,7 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
|
||||
/**
|
||||
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
* @param eth_gre_enable - eth GRE enable enable flag.
|
||||
* @param ip_gre_enable - IP GRE enable enable flag.
|
||||
@ -318,6 +354,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
|
||||
* @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
|
||||
* udp port
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
* @param dest_port - geneve destination udp port.
|
||||
*/
|
||||
@ -326,8 +363,9 @@ void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
|
||||
u16 dest_port);
|
||||
|
||||
/**
|
||||
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
|
||||
* @brief ecore_set_geneve_enable - enable or disable GRE tunnel in HW
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
* @param eth_geneve_enable - eth GENEVE enable enable flag.
|
||||
* @param ip_geneve_enable - IP GENEVE enable enable flag.
|
||||
@ -347,7 +385,7 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt);
|
||||
|
||||
/**
|
||||
* @brief ecore_gft_disable - Disable and GFT
|
||||
* @brief ecore_gft_disable - Disable GFT
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
@ -360,6 +398,7 @@ void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
|
||||
/**
|
||||
* @brief ecore_gft_config - Enable and configure HW for GFT
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
* @param pf_id - pf on which to enable GFT.
|
||||
* @param tcp - set profile tcp packets.
|
||||
@ -382,12 +421,13 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
|
||||
* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
|
||||
* used before first ETH queue started.
|
||||
*
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers. Don't care
|
||||
* if runtime_init used
|
||||
* if runtime_init used.
|
||||
* @param mode - VF zone size mode. Use enum vf_zone_size_mode.
|
||||
* @param runtime_init - Set 1 to init runtime registers in engine phase. Set 0
|
||||
* if VF zone size mode configured after engine phase.
|
||||
* @param runtime_init - Set 1 to init runtime registers in engine phase.
|
||||
* Set 0 if VF zone size mode configured after engine
|
||||
* phase.
|
||||
*/
|
||||
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
|
||||
*p_ptt, u16 mode, bool runtime_init);
|
||||
@ -396,6 +436,7 @@ void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
|
||||
* @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
|
||||
* VF zone size mode.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param stat_cnt_id - statistic counter id
|
||||
* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
|
||||
*/
|
||||
@ -406,6 +447,7 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
|
||||
* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
|
||||
* size mode.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param vf_id - vf id.
|
||||
* @param vf_queue_id - per VF rx queue id.
|
||||
* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
|
||||
@ -416,6 +458,7 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
|
||||
* @brief ecore_enable_context_validation - Enable and configure context
|
||||
* validation.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ptt - ptt window used for writing the registers.
|
||||
*/
|
||||
void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
|
||||
@ -424,12 +467,14 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
|
||||
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
|
||||
* session context.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ctx_mem - pointer to context memory.
|
||||
* @param ctx_size - context size.
|
||||
* @param ctx_type - context type.
|
||||
* @param cid - context cid.
|
||||
*/
|
||||
void ecore_calc_session_ctx_validation(void *p_ctx_mem,
|
||||
void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
|
||||
void *p_ctx_mem,
|
||||
u16 ctx_size,
|
||||
u8 ctx_type,
|
||||
u32 cid);
|
||||
@ -438,12 +483,14 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem,
|
||||
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
|
||||
* context.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ctx_mem - pointer to context memory.
|
||||
* @param ctx_size - context size.
|
||||
* @param ctx_type - context type.
|
||||
* @param tid - context tid.
|
||||
*/
|
||||
void ecore_calc_task_ctx_validation(void *p_ctx_mem,
|
||||
void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
|
||||
void *p_ctx_mem,
|
||||
u16 ctx_size,
|
||||
u8 ctx_type,
|
||||
u32 tid);
|
||||
@ -457,18 +504,22 @@ void ecore_calc_task_ctx_validation(void *p_ctx_mem,
|
||||
* @param ctx_size - size to initialzie.
|
||||
* @param ctx_type - context type.
|
||||
*/
|
||||
void ecore_memset_session_ctx(void *p_ctx_mem,
|
||||
void ecore_memset_session_ctx(struct ecore_hwfn *p_hwfn,
|
||||
void *p_ctx_mem,
|
||||
u32 ctx_size,
|
||||
u8 ctx_type);
|
||||
|
||||
/**
|
||||
* @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
|
||||
* validation bytes.
|
||||
*
|
||||
* @param p_hwfn - HW device data
|
||||
* @param p_ctx_mem - pointer to context memory.
|
||||
* @param ctx_size - size to initialzie.
|
||||
* @param ctx_type - context type.
|
||||
*/
|
||||
void ecore_memset_task_ctx(void *p_ctx_mem,
|
||||
void ecore_memset_task_ctx(struct ecore_hwfn *p_hwfn,
|
||||
void *p_ctx_mem,
|
||||
u32 ctx_size,
|
||||
u8 ctx_type);
|
||||
|
||||
|
@ -15,7 +15,6 @@
|
||||
|
||||
#include "ecore_iro_values.h"
|
||||
#include "ecore_sriov.h"
|
||||
#include "ecore_gtt_values.h"
|
||||
#include "reg_addr.h"
|
||||
#include "ecore_init_ops.h"
|
||||
|
||||
@ -24,7 +23,7 @@
|
||||
|
||||
void ecore_init_iro_array(struct ecore_dev *p_dev)
|
||||
{
|
||||
p_dev->iro_arr = iro_arr;
|
||||
p_dev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
|
||||
}
|
||||
|
||||
/* Runtime configuration helpers */
|
||||
@ -473,9 +472,9 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
|
||||
int phase, int phase_id, int modes)
|
||||
{
|
||||
struct ecore_dev *p_dev = p_hwfn->p_dev;
|
||||
bool b_dmae = (phase != PHASE_ENGINE);
|
||||
u32 cmd_num, num_init_ops;
|
||||
union init_op *init;
|
||||
bool b_dmae = false;
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
|
||||
num_init_ops = p_dev->fw_data->init_ops_size;
|
||||
@ -511,7 +510,6 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
|
||||
case INIT_OP_IF_PHASE:
|
||||
cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
|
||||
phase_id);
|
||||
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
|
||||
break;
|
||||
case INIT_OP_DELAY:
|
||||
/* ecore_init_run is always invoked from
|
||||
@ -522,6 +520,9 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
case INIT_OP_CALLBACK:
|
||||
rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
|
||||
if (phase == PHASE_ENGINE &&
|
||||
cmd->callback.callback_id == DMAE_READY_CB)
|
||||
b_dmae = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -567,11 +568,17 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
|
||||
fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
|
||||
len = buf_hdr[BIN_BUF_INIT_CMD].length;
|
||||
fw->init_ops_size = len / sizeof(struct init_raw_op);
|
||||
offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
|
||||
fw->fw_overlays = (u32 *)(fw_data + offset);
|
||||
len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
|
||||
fw->fw_overlays_len = len;
|
||||
#else
|
||||
fw->init_ops = (union init_op *)init_ops;
|
||||
fw->arr_data = (u32 *)init_val;
|
||||
fw->modes_tree_buf = (u8 *)modes_tree_buf;
|
||||
fw->init_ops_size = init_ops_size;
|
||||
fw->fw_overlays = fw_overlays;
|
||||
fw->fw_overlays_len = sizeof(fw_overlays);
|
||||
#endif
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
|
@ -95,6 +95,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
|
||||
osal_size_t size);
|
||||
|
||||
#define STORE_RT_REG_AGG(hwfn, offset, val) \
|
||||
ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
|
||||
ecore_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
|
||||
|
||||
#endif /* __ECORE_INIT_OPS__ */
|
||||
|
@ -28,8 +28,10 @@ struct ecore_pi_info {
|
||||
|
||||
struct ecore_sb_sp_info {
|
||||
struct ecore_sb_info sb_info;
|
||||
/* per protocol index data */
|
||||
|
||||
/* Per protocol index data */
|
||||
struct ecore_pi_info pi_info_arr[MAX_PIS_PER_SB];
|
||||
osal_size_t pi_info_arr_size;
|
||||
};
|
||||
|
||||
enum ecore_attention_type {
|
||||
@ -58,10 +60,10 @@ struct aeu_invert_reg_bit {
|
||||
#define ATTENTION_OFFSET_MASK (0x000ff000)
|
||||
#define ATTENTION_OFFSET_SHIFT (12)
|
||||
|
||||
#define ATTENTION_BB_MASK (0x00700000)
|
||||
#define ATTENTION_BB_MASK (0xf)
|
||||
#define ATTENTION_BB_SHIFT (20)
|
||||
#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
|
||||
#define ATTENTION_BB_DIFFERENT (1 << 23)
|
||||
#define ATTENTION_BB_DIFFERENT (1 << 24)
|
||||
|
||||
#define ATTENTION_CLEAR_ENABLE (1 << 28)
|
||||
unsigned int flags;
|
||||
@ -606,6 +608,8 @@ enum aeu_invert_reg_special_type {
|
||||
AEU_INVERT_REG_SPECIAL_CNIG_1,
|
||||
AEU_INVERT_REG_SPECIAL_CNIG_2,
|
||||
AEU_INVERT_REG_SPECIAL_CNIG_3,
|
||||
AEU_INVERT_REG_SPECIAL_MCP_UMP_TX,
|
||||
AEU_INVERT_REG_SPECIAL_MCP_SCPAD,
|
||||
AEU_INVERT_REG_SPECIAL_MAX,
|
||||
};
|
||||
|
||||
@ -615,6 +619,8 @@ aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
|
||||
{"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
|
||||
{"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
|
||||
{"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
|
||||
{"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
|
||||
{"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
|
||||
};
|
||||
|
||||
/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
|
||||
@ -678,10 +684,15 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
|
||||
{"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
|
||||
{"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
|
||||
{"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
|
||||
{"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
|
||||
MAX_BLOCK_ID},
|
||||
{"OPTE", ATTENTION_PAR, OSAL_NULL, BLOCK_OPTE},
|
||||
{"MCP", ATTENTION_PAR, OSAL_NULL, BLOCK_MCP},
|
||||
{"MS", ATTENTION_SINGLE, OSAL_NULL, BLOCK_MS},
|
||||
{"UMAC", ATTENTION_SINGLE, OSAL_NULL, BLOCK_UMAC},
|
||||
{"LED", ATTENTION_SINGLE, OSAL_NULL, BLOCK_LED},
|
||||
{"BMBN", ATTENTION_SINGLE, OSAL_NULL, BLOCK_BMBN},
|
||||
{"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
|
||||
{"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
|
||||
{"BMB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
|
||||
{"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
|
||||
{"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
|
||||
{"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
|
||||
@ -784,10 +795,17 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
|
||||
{"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
|
||||
{"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
|
||||
MAX_BLOCK_ID},
|
||||
{"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
|
||||
{"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
|
||||
{"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
|
||||
MAX_BLOCK_ID},
|
||||
{"AVS", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
|
||||
ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_UMP_TX), OSAL_NULL,
|
||||
BLOCK_AVS_WRAP},
|
||||
{"AVS", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
|
||||
ATTENTION_BB(AEU_INVERT_REG_SPECIAL_MCP_SCPAD), OSAL_NULL,
|
||||
BLOCK_AVS_WRAP},
|
||||
{"PCIe core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
|
||||
{"PCIe link up", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
|
||||
{"PCIe hot reset", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
|
||||
{"Reserved %d", (9 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
|
||||
MAX_BLOCK_ID},
|
||||
}
|
||||
},
|
||||
|
||||
@ -955,14 +973,22 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
|
||||
/* @DPDK */
|
||||
/* Reach assertion if attention is fatal */
|
||||
if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
|
||||
#ifndef ASIC_ONLY
|
||||
DP_NOTICE(p_hwfn, !CHIP_REV_IS_EMUL(p_hwfn->p_dev),
|
||||
"`%s': Fatal attention\n", p_bit_name);
|
||||
#else
|
||||
DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
|
||||
p_bit_name);
|
||||
#endif
|
||||
|
||||
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
|
||||
}
|
||||
|
||||
/* Prevent this Attention from being asserted in the future */
|
||||
if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
|
||||
#ifndef ASIC_ONLY
|
||||
CHIP_REV_IS_EMUL(p_hwfn->p_dev) ||
|
||||
#endif
|
||||
p_hwfn->p_dev->attn_clr_en) {
|
||||
u32 val;
|
||||
u32 mask = ~bitmask;
|
||||
@ -1013,6 +1039,13 @@ static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
|
||||
p_aeu->bit_name);
|
||||
}
|
||||
|
||||
#define MISC_REG_AEU_AFTER_INVERT_IGU(n) \
|
||||
(MISC_REG_AEU_AFTER_INVERT_1_IGU + (n) * 0x4)
|
||||
|
||||
#define MISC_REG_AEU_ENABLE_IGU_OUT(n, group) \
|
||||
(MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (n) * 0x4 + \
|
||||
(group) * 0x4 * NUM_ATTN_REGS)
|
||||
|
||||
/**
|
||||
* @brief - handles deassertion of previously asserted attentions.
|
||||
*
|
||||
@ -1032,8 +1065,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
|
||||
/* Read the attention registers in the AEU */
|
||||
for (i = 0; i < NUM_ATTN_REGS; i++) {
|
||||
aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
|
||||
MISC_REG_AEU_AFTER_INVERT_1_IGU +
|
||||
i * 0x4);
|
||||
MISC_REG_AEU_AFTER_INVERT_IGU(i));
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
|
||||
"Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
|
||||
}
|
||||
@ -1043,7 +1075,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
|
||||
struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
|
||||
u32 parities;
|
||||
|
||||
aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
|
||||
aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, 0);
|
||||
en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
|
||||
parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
|
||||
|
||||
@ -1074,9 +1106,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
|
||||
for (i = 0; i < NUM_ATTN_REGS; i++) {
|
||||
u32 bits;
|
||||
|
||||
aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
|
||||
i * sizeof(u32) +
|
||||
k * sizeof(u32) * NUM_ATTN_REGS;
|
||||
aeu_en = MISC_REG_AEU_ENABLE_IGU_OUT(i, k);
|
||||
en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
|
||||
bits = aeu_inv_arr[i] & en;
|
||||
|
||||
@ -1249,7 +1279,6 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
|
||||
struct ecore_pi_info *pi_info = OSAL_NULL;
|
||||
struct ecore_sb_attn_info *sb_attn;
|
||||
struct ecore_sb_info *sb_info;
|
||||
int arr_size;
|
||||
u16 rc = 0;
|
||||
|
||||
if (!p_hwfn)
|
||||
@ -1261,7 +1290,6 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
|
||||
}
|
||||
|
||||
sb_info = &p_hwfn->p_sp_sb->sb_info;
|
||||
arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
|
||||
if (!sb_info) {
|
||||
DP_ERR(p_hwfn->p_dev,
|
||||
"Status block is NULL - cannot ack interrupts\n");
|
||||
@ -1326,14 +1354,14 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
|
||||
ecore_int_attentions(p_hwfn);
|
||||
|
||||
if (rc & ECORE_SB_IDX) {
|
||||
int pi;
|
||||
osal_size_t pi;
|
||||
|
||||
/* Since we only looked at the SB index, it's possible more
|
||||
* than a single protocol-index on the SB incremented.
|
||||
* Iterate over all configured protocol indices and check
|
||||
* whether something happened for each.
|
||||
*/
|
||||
for (pi = 0; pi < arr_size; pi++) {
|
||||
for (pi = 0; pi < p_hwfn->p_sp_sb->pi_info_arr_size; pi++) {
|
||||
pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
|
||||
if (pi_info->comp_cb != OSAL_NULL)
|
||||
pi_info->comp_cb(p_hwfn, pi_info->cookie);
|
||||
@ -1514,7 +1542,7 @@ static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
|
||||
if (IS_VF(p_hwfn->p_dev))
|
||||
return;/* @@@TBD MichalK- VF CAU... */
|
||||
|
||||
sb_offset = igu_sb_id * MAX_PIS_PER_SB;
|
||||
sb_offset = igu_sb_id * PIS_PER_SB;
|
||||
OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
|
||||
|
||||
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
|
||||
@ -1623,7 +1651,7 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
|
||||
{
|
||||
/* zero status block and ack counter */
|
||||
sb_info->sb_ack = 0;
|
||||
OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
|
||||
OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size);
|
||||
|
||||
if (IS_PF(p_hwfn->p_dev))
|
||||
ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
|
||||
@ -1706,6 +1734,14 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
|
||||
dma_addr_t sb_phy_addr, u16 sb_id)
|
||||
{
|
||||
sb_info->sb_virt = sb_virt_addr;
|
||||
struct status_block *sb_virt;
|
||||
|
||||
sb_virt = (struct status_block *)sb_info->sb_virt;
|
||||
|
||||
sb_info->sb_size = sizeof(*sb_virt);
|
||||
sb_info->sb_pi_array = sb_virt->pi_array;
|
||||
sb_info->sb_prod_index = &sb_virt->prod_index;
|
||||
|
||||
sb_info->sb_phys = sb_phy_addr;
|
||||
|
||||
sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
|
||||
@ -1737,16 +1773,16 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
|
||||
/* The igu address will hold the absolute address that needs to be
|
||||
* written to for a specific status block
|
||||
*/
|
||||
if (IS_PF(p_hwfn->p_dev)) {
|
||||
if (IS_PF(p_hwfn->p_dev))
|
||||
sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
|
||||
GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
|
||||
GTT_BAR0_MAP_REG_IGU_CMD +
|
||||
(sb_info->igu_sb_id << 3);
|
||||
|
||||
} else {
|
||||
sb_info->igu_addr =
|
||||
(u8 OSAL_IOMEM *)p_hwfn->regview +
|
||||
else
|
||||
sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
|
||||
PXP_VF_BAR0_START_IGU +
|
||||
((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
|
||||
}
|
||||
((IGU_CMD_INT_ACK_BASE +
|
||||
sb_info->igu_sb_id) << 3);
|
||||
|
||||
sb_info->flags |= ECORE_SB_INFO_INIT;
|
||||
|
||||
@ -1767,7 +1803,7 @@ enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
/* zero status block and ack counter */
|
||||
sb_info->sb_ack = 0;
|
||||
OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
|
||||
OSAL_MEMSET(sb_info->sb_virt, 0, sb_info->sb_size);
|
||||
|
||||
if (IS_VF(p_hwfn->p_dev)) {
|
||||
ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
|
||||
@ -1816,11 +1852,10 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
|
||||
void *p_virt;
|
||||
|
||||
/* SB struct */
|
||||
p_sb =
|
||||
OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
|
||||
sizeof(*p_sb));
|
||||
p_sb = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb));
|
||||
if (!p_sb) {
|
||||
DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
|
||||
DP_NOTICE(p_hwfn, false,
|
||||
"Failed to allocate `struct ecore_sb_info'\n");
|
||||
return ECORE_NOMEM;
|
||||
}
|
||||
|
||||
@ -1838,7 +1873,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
|
||||
ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
|
||||
p_virt, p_phys, ECORE_SP_SB_ID);
|
||||
|
||||
OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
|
||||
p_sb->pi_info_arr_size = PIS_PER_SB;
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
@ -1853,14 +1888,14 @@ enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
|
||||
u8 pi;
|
||||
|
||||
/* Look for a free index */
|
||||
for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
|
||||
for (pi = 0; pi < p_sp_sb->pi_info_arr_size; pi++) {
|
||||
if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
|
||||
continue;
|
||||
|
||||
p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
|
||||
p_sp_sb->pi_info_arr[pi].cookie = cookie;
|
||||
*sb_idx = pi;
|
||||
*p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
|
||||
*p_fw_cons = &p_sp_sb->sb_info.sb_pi_array[pi];
|
||||
rc = ECORE_SUCCESS;
|
||||
break;
|
||||
}
|
||||
@ -1988,10 +2023,9 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
|
||||
bool cleanup_set,
|
||||
u16 opaque_fid)
|
||||
{
|
||||
u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
|
||||
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
|
||||
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
|
||||
u8 type = 0; /* FIXME MichalS type??? */
|
||||
u32 data = 0, cmd_ctrl = 0, sb_bit, sb_bit_addr, pxp_addr;
|
||||
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH, val;
|
||||
u8 type = 0;
|
||||
|
||||
OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
|
||||
IGU_REG_CLEANUP_STATUS_0) != 0x200);
|
||||
@ -2006,6 +2040,7 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
|
||||
SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
|
||||
|
||||
/* Set the control register */
|
||||
pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
|
||||
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
|
||||
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
|
||||
SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
|
||||
@ -2077,9 +2112,11 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
|
||||
igu_sb_id);
|
||||
|
||||
/* Clear the CAU for the SB */
|
||||
for (pi = 0; pi < 12; pi++)
|
||||
for (pi = 0; pi < PIS_PER_SB; pi++)
|
||||
ecore_wr(p_hwfn, p_ptt,
|
||||
CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
|
||||
CAU_REG_PI_MEMORY +
|
||||
(igu_sb_id * PIS_PER_SB + pi) * 4,
|
||||
0);
|
||||
}
|
||||
|
||||
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
|
||||
@ -2679,12 +2716,12 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_sb_info_dbg *p_info)
|
||||
{
|
||||
u16 sbid = p_sb->igu_sb_id;
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
if (IS_VF(p_hwfn->p_dev))
|
||||
return ECORE_INVAL;
|
||||
|
||||
if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
|
||||
if (sbid >= NUM_OF_SBS(p_hwfn->p_dev))
|
||||
return ECORE_INVAL;
|
||||
|
||||
p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
|
||||
@ -2692,10 +2729,10 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
|
||||
p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
|
||||
IGU_REG_CONSUMER_MEM + sbid * 4);
|
||||
|
||||
for (i = 0; i < MAX_PIS_PER_SB; i++)
|
||||
for (i = 0; i < PIS_PER_SB; i++)
|
||||
p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
|
||||
CAU_REG_PI_MEMORY +
|
||||
sbid * 4 * MAX_PIS_PER_SB +
|
||||
sbid * 4 * PIS_PER_SB +
|
||||
i * 4);
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
|
@ -24,7 +24,12 @@ enum ecore_int_mode {
|
||||
#endif
|
||||
|
||||
struct ecore_sb_info {
|
||||
struct status_block *sb_virt;
|
||||
void *sb_virt; /* ptr to "struct status_block_e{4,5}" */
|
||||
u32 sb_size; /* size of "struct status_block_e{4,5}" */
|
||||
__le16 *sb_pi_array; /* ptr to "sb_virt->pi_array" */
|
||||
__le32 *sb_prod_index; /* ptr to "sb_virt->prod_index" */
|
||||
#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
|
||||
|
||||
dma_addr_t sb_phys;
|
||||
u32 sb_ack; /* Last given ack */
|
||||
u16 igu_sb_id;
|
||||
@ -42,7 +47,7 @@ struct ecore_sb_info {
|
||||
struct ecore_sb_info_dbg {
|
||||
u32 igu_prod;
|
||||
u32 igu_cons;
|
||||
u16 pi[MAX_PIS_PER_SB];
|
||||
u16 pi[PIS_PER_SB];
|
||||
};
|
||||
|
||||
struct ecore_sb_cnt_info {
|
||||
@ -64,7 +69,7 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
|
||||
|
||||
/* barrier(); status block is written to by the chip */
|
||||
/* FIXME: need some sort of barrier. */
|
||||
prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
|
||||
prod = OSAL_LE32_TO_CPU(*sb_info->sb_prod_index) &
|
||||
STATUS_BLOCK_PROD_INDEX_MASK;
|
||||
if (sb_info->sb_ack != prod) {
|
||||
sb_info->sb_ack = prod;
|
||||
|
@ -2323,18 +2323,17 @@ ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
struct ecore_queue_cid *p_cid, u32 rate)
|
||||
{
|
||||
struct ecore_mcp_link_state *p_link;
|
||||
u16 rl_id;
|
||||
u8 vport;
|
||||
|
||||
vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
|
||||
p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
|
||||
"About to rate limit qm vport %d for queue %d with rate %d\n",
|
||||
vport, p_cid->rel.queue_id, rate);
|
||||
|
||||
return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
|
||||
p_link->speed);
|
||||
rl_id = vport; /* The "rl_id" is set as the "vport_id" */
|
||||
return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, rate);
|
||||
}
|
||||
|
||||
#define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT 100
|
||||
@ -2358,8 +2357,7 @@ ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
|
||||
GTT_BAR0_MAP_REG_TSDM_RAM +
|
||||
addr = (u8 *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM +
|
||||
TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn->rel_pf_id);
|
||||
|
||||
*(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr);
|
||||
|
@ -302,6 +302,8 @@ struct ecore_sp_vport_start_params {
|
||||
bool b_err_big_pkt;
|
||||
bool b_err_anti_spoof;
|
||||
bool b_err_ctrl_frame;
|
||||
bool b_en_rgfs;
|
||||
bool b_en_tgfs;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -22,13 +22,23 @@
|
||||
#include "ecore_sp_commands.h"
|
||||
#include "ecore_cxt.h"
|
||||
|
||||
#define CHIP_MCP_RESP_ITER_US 10
|
||||
#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
|
||||
#define GRCBASE_MCP 0xe00000
|
||||
|
||||
#define ECORE_MCP_RESP_ITER_US 10
|
||||
#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
|
||||
#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
/* Non-ASIC:
|
||||
* The waiting interval is multiplied by 100 to reduce the impact of the
|
||||
* built-in delay of 100usec in each ecore_rd().
|
||||
* In addition, a factor of 4 comparing to ASIC is applied.
|
||||
*/
|
||||
#define ECORE_EMUL_MCP_RESP_ITER_US (ECORE_MCP_RESP_ITER_US * 100)
|
||||
#define ECORE_EMUL_DRV_MB_MAX_RETRIES ((ECORE_DRV_MB_MAX_RETRIES / 100) * 4)
|
||||
#define ECORE_EMUL_MCP_RESET_RETRIES ((ECORE_MCP_RESET_RETRIES / 100) * 4)
|
||||
#endif
|
||||
|
||||
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
|
||||
ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
|
||||
_val)
|
||||
@ -186,22 +196,23 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt)
|
||||
{
|
||||
struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
|
||||
u32 drv_mb_offsize, mfw_mb_offsize, val;
|
||||
u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
|
||||
u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
|
||||
u32 drv_mb_offsize, mfw_mb_offsize;
|
||||
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
|
||||
|
||||
val = ecore_rd(p_hwfn, p_ptt, MCP_REG_CACHE_PAGING_ENABLE);
|
||||
p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
|
||||
if (!p_info->public_base) {
|
||||
DP_NOTICE(p_hwfn, false,
|
||||
"The address of the MCP scratch-pad is not configured\n");
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
|
||||
DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
|
||||
p_info->public_base = 0;
|
||||
/* Zeroed "public_base" implies no MFW */
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
DP_INFO(p_hwfn, "Emulation: Assume no MFW\n");
|
||||
#endif
|
||||
return ECORE_INVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
|
||||
if (!p_info->public_base)
|
||||
return ECORE_INVAL;
|
||||
|
||||
p_info->public_base |= GRCBASE_MCP;
|
||||
|
||||
@ -293,7 +304,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
|
||||
DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
|
||||
/* Do not free mcp_info here, since public_base indicate that
|
||||
/* Do not free mcp_info here, since "public_base" indicates that
|
||||
* the MCP is not initialized
|
||||
*/
|
||||
return ECORE_SUCCESS;
|
||||
@ -334,14 +345,16 @@ static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
|
||||
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt)
|
||||
{
|
||||
u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
|
||||
u32 prev_generic_por_0, seq, delay = ECORE_MCP_RESP_ITER_US, cnt = 0;
|
||||
u32 retries = ECORE_MCP_RESET_RETRIES;
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
delay = EMUL_MCP_RESP_ITER_US;
|
||||
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
|
||||
delay = ECORE_EMUL_MCP_RESP_ITER_US;
|
||||
retries = ECORE_EMUL_MCP_RESET_RETRIES;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (p_hwfn->mcp_info->b_block_cmd) {
|
||||
DP_NOTICE(p_hwfn, false,
|
||||
"The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
|
||||
@ -351,23 +364,24 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
|
||||
/* Ensure that only a single thread is accessing the mailbox */
|
||||
OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
|
||||
|
||||
org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
|
||||
prev_generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
|
||||
|
||||
/* Set drv command along with the updated sequence */
|
||||
ecore_mcp_reread_offsets(p_hwfn, p_ptt);
|
||||
seq = ++p_hwfn->mcp_info->drv_mb_seq;
|
||||
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
|
||||
|
||||
/* Give the MFW up to 500 second (50*1000*10usec) to resume */
|
||||
do {
|
||||
/* Wait for MFW response */
|
||||
OSAL_UDELAY(delay);
|
||||
/* Give the FW up to 500 second (50*1000*10usec) */
|
||||
} while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
|
||||
MISCS_REG_GENERIC_POR_0)) &&
|
||||
(cnt++ < ECORE_MCP_RESET_RETRIES));
|
||||
|
||||
if (org_mcp_reset_seq !=
|
||||
ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
|
||||
if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
|
||||
prev_generic_por_0)
|
||||
break;
|
||||
} while (cnt++ < retries);
|
||||
|
||||
if (ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0) !=
|
||||
prev_generic_por_0) {
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
|
||||
"MCP was reset after %d usec\n", cnt * delay);
|
||||
} else {
|
||||
@ -380,6 +394,71 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
static void ecore_emul_mcp_load_req(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_mcp_mb_params *p_mb_params)
|
||||
{
|
||||
if (GET_MFW_FIELD(p_mb_params->param, DRV_ID_MCP_HSI_VER) !=
|
||||
1 /* ECORE_LOAD_REQ_HSI_VER_1 */) {
|
||||
p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!loaded)
|
||||
p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
|
||||
else if (!loaded_port[p_hwfn->port_id])
|
||||
p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_PORT;
|
||||
else
|
||||
p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_FUNCTION;
|
||||
|
||||
/* On CMT, always tell that it's engine */
|
||||
if (ECORE_IS_CMT(p_hwfn->p_dev))
|
||||
p_mb_params->mcp_resp = FW_MSG_CODE_DRV_LOAD_ENGINE;
|
||||
|
||||
loaded++;
|
||||
loaded_port[p_hwfn->port_id]++;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
|
||||
"Load phase: 0x%08x load cnt: 0x%x port id=%d port_load=%d\n",
|
||||
p_mb_params->mcp_resp, loaded, p_hwfn->port_id,
|
||||
loaded_port[p_hwfn->port_id]);
|
||||
}
|
||||
|
||||
static void ecore_emul_mcp_unload_req(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
loaded--;
|
||||
loaded_port[p_hwfn->port_id]--;
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", loaded);
|
||||
}
|
||||
|
||||
static enum _ecore_status_t
|
||||
ecore_emul_mcp_cmd(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_mcp_mb_params *p_mb_params)
|
||||
{
|
||||
if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
return ECORE_INVAL;
|
||||
|
||||
switch (p_mb_params->cmd) {
|
||||
case DRV_MSG_CODE_LOAD_REQ:
|
||||
ecore_emul_mcp_load_req(p_hwfn, p_mb_params);
|
||||
break;
|
||||
case DRV_MSG_CODE_UNLOAD_REQ:
|
||||
ecore_emul_mcp_unload_req(p_hwfn);
|
||||
break;
|
||||
case DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT:
|
||||
case DRV_MSG_CODE_RESOURCE_CMD:
|
||||
case DRV_MSG_CODE_MDUMP_CMD:
|
||||
case DRV_MSG_CODE_GET_ENGINE_CONFIG:
|
||||
case DRV_MSG_CODE_GET_PPFID_BITMAP:
|
||||
return ECORE_NOTIMPL;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Must be called while cmd_lock is acquired */
|
||||
static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
@ -488,13 +567,18 @@ void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt)
|
||||
{
|
||||
u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
|
||||
u32 delay = ECORE_MCP_RESP_ITER_US;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
delay = ECORE_EMUL_MCP_RESP_ITER_US;
|
||||
#endif
|
||||
cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
|
||||
cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
|
||||
cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
|
||||
OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
|
||||
OSAL_UDELAY(delay);
|
||||
cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
|
||||
OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
|
||||
OSAL_UDELAY(delay);
|
||||
cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
|
||||
|
||||
DP_NOTICE(p_hwfn, false,
|
||||
@ -617,15 +701,21 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
|
||||
{
|
||||
osal_size_t union_data_size = sizeof(union drv_union_data);
|
||||
u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
|
||||
u32 delay = CHIP_MCP_RESP_ITER_US;
|
||||
u32 usecs = ECORE_MCP_RESP_ITER_US;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
delay = EMUL_MCP_RESP_ITER_US;
|
||||
/* There is a built-in delay of 100usec in each MFW response read */
|
||||
if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
|
||||
max_retries /= 10;
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn))
|
||||
return ecore_emul_mcp_cmd(p_hwfn, p_mb_params);
|
||||
|
||||
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
|
||||
max_retries = ECORE_EMUL_DRV_MB_MAX_RETRIES;
|
||||
usecs = ECORE_EMUL_MCP_RESP_ITER_US;
|
||||
}
|
||||
#endif
|
||||
if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
|
||||
max_retries = DIV_ROUND_UP(max_retries, 1000);
|
||||
usecs *= 1000;
|
||||
}
|
||||
|
||||
/* MCP not initialized */
|
||||
if (!ecore_mcp_is_init(p_hwfn)) {
|
||||
@ -650,7 +740,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
|
||||
}
|
||||
|
||||
return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
|
||||
delay);
|
||||
usecs);
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
|
||||
@ -660,18 +750,6 @@ enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_mcp_mb_params mb_params;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
|
||||
if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
|
||||
loaded--;
|
||||
loaded_port[p_hwfn->port_id]--;
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
|
||||
loaded);
|
||||
}
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
|
||||
mb_params.cmd = cmd;
|
||||
mb_params.param = param;
|
||||
@ -745,34 +823,6 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
|
||||
u32 *p_load_code)
|
||||
{
|
||||
static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
|
||||
|
||||
if (!loaded)
|
||||
load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
|
||||
else if (!loaded_port[p_hwfn->port_id])
|
||||
load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
|
||||
else
|
||||
load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
|
||||
|
||||
/* On CMT, always tell that it's engine */
|
||||
if (ECORE_IS_CMT(p_hwfn->p_dev))
|
||||
load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
|
||||
|
||||
*p_load_code = load_phase;
|
||||
loaded++;
|
||||
loaded_port[p_hwfn->port_id]++;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
|
||||
"Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
|
||||
*p_load_code, loaded, p_hwfn->port_id,
|
||||
loaded_port[p_hwfn->port_id]);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool
|
||||
ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
|
||||
enum ecore_override_force_load override_force_load)
|
||||
@ -1004,13 +1054,6 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
|
||||
u8 mfw_drv_role = 0, mfw_force_cmd;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
|
||||
ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
OSAL_MEM_ZERO(&in_params, sizeof(in_params));
|
||||
in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
|
||||
in_params.drv_ver_0 = ECORE_VERSION;
|
||||
@ -1166,15 +1209,17 @@ static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
|
||||
u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
|
||||
u32 path_addr = SECTION_ADDR(mfw_path_offsize,
|
||||
ECORE_PATH_ID(p_hwfn));
|
||||
u32 disabled_vfs[VF_MAX_STATIC / 32];
|
||||
u32 disabled_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
|
||||
int i;
|
||||
|
||||
OSAL_MEM_ZERO(disabled_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
|
||||
"Reading Disabled VF information from [offset %08x],"
|
||||
" path_addr %08x\n",
|
||||
mfw_path_offsize, path_addr);
|
||||
|
||||
for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
|
||||
for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++) {
|
||||
disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
|
||||
path_addr +
|
||||
OFFSETOF(struct public_path,
|
||||
@ -1193,16 +1238,11 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
u32 *vfs_to_ack)
|
||||
{
|
||||
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
|
||||
PUBLIC_FUNC);
|
||||
u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
|
||||
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
|
||||
MCP_PF_ID(p_hwfn));
|
||||
struct ecore_mcp_mb_params mb_params;
|
||||
enum _ecore_status_t rc;
|
||||
int i;
|
||||
u16 i;
|
||||
|
||||
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
|
||||
for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
|
||||
DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
|
||||
"Acking VFs [%08x,...,%08x] - %08x\n",
|
||||
i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
|
||||
@ -1210,7 +1250,7 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
|
||||
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
|
||||
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
|
||||
mb_params.p_data_src = vfs_to_ack;
|
||||
mb_params.data_src_size = VF_MAX_STATIC / 8;
|
||||
mb_params.data_src_size = (u8)VF_BITMAP_SIZE_IN_BYTES;
|
||||
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
|
||||
&mb_params);
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
@ -1219,13 +1259,6 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
|
||||
return ECORE_TIMEOUT;
|
||||
}
|
||||
|
||||
/* TMP - clear the ACK bits; should be done by MFW */
|
||||
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
|
||||
ecore_wr(p_hwfn, p_ptt,
|
||||
func_addr +
|
||||
OFFSETOF(struct public_func, drv_ack_vf_disabled) +
|
||||
i * sizeof(u32), 0);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1471,8 +1504,11 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
|
||||
u32 cmd;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
|
||||
if (b_up)
|
||||
OSAL_LINK_UPDATE(p_hwfn);
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Set the shmem configuration according to params */
|
||||
@ -1853,6 +1889,13 @@ ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
|
||||
struct mdump_config_stc mdump_config;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
|
||||
DP_INFO(p_hwfn, "Emulation: Can't get mdump info\n");
|
||||
return ECORE_NOTIMPL;
|
||||
}
|
||||
#endif
|
||||
|
||||
OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
|
||||
|
||||
addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
|
||||
@ -2042,6 +2085,9 @@ ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
|
||||
/* update storm FW with negotiation results */
|
||||
ecore_sp_pf_update_ufp(p_hwfn);
|
||||
|
||||
/* update stag pcp value */
|
||||
ecore_sp_pf_update_stag(p_hwfn);
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
@ -2159,9 +2205,9 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
|
||||
u32 global_offsize;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
|
||||
DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
|
||||
return ECORE_SUCCESS;
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
|
||||
DP_INFO(p_hwfn, "Emulation: Can't get MFW version\n");
|
||||
return ECORE_NOTIMPL;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2203,26 +2249,29 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
u32 *p_media_type)
|
||||
{
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
*p_media_type = MEDIA_UNSPECIFIED;
|
||||
|
||||
/* TODO - Add support for VFs */
|
||||
if (IS_VF(p_hwfn->p_dev))
|
||||
return ECORE_INVAL;
|
||||
|
||||
if (!ecore_mcp_is_init(p_hwfn)) {
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
|
||||
DP_INFO(p_hwfn, "Emulation: Can't get media type\n");
|
||||
return ECORE_NOTIMPL;
|
||||
}
|
||||
#endif
|
||||
DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
|
||||
return ECORE_BUSY;
|
||||
}
|
||||
|
||||
if (!p_ptt) {
|
||||
*p_media_type = MEDIA_UNSPECIFIED;
|
||||
rc = ECORE_INVAL;
|
||||
} else {
|
||||
*p_media_type = ecore_rd(p_hwfn, p_ptt,
|
||||
p_hwfn->mcp_info->port_addr +
|
||||
OFFSETOF(struct public_port,
|
||||
media_type));
|
||||
}
|
||||
if (!p_ptt)
|
||||
return ECORE_INVAL;
|
||||
|
||||
*p_media_type = ecore_rd(p_hwfn, p_ptt,
|
||||
p_hwfn->mcp_info->port_addr +
|
||||
OFFSETOF(struct public_port, media_type));
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
@ -2626,9 +2675,9 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
|
||||
u32 flash_size;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
|
||||
DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
|
||||
return ECORE_INVAL;
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
|
||||
DP_INFO(p_hwfn, "Emulation: Can't get flash size\n");
|
||||
return ECORE_NOTIMPL;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2725,6 +2774,16 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
u8 vf_id, u8 num)
|
||||
{
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && !ecore_mcp_is_init(p_hwfn)) {
|
||||
DP_INFO(p_hwfn,
|
||||
"Emulation: Avoid sending the %s mailbox command\n",
|
||||
ECORE_IS_BB(p_hwfn->p_dev) ? "CFG_VF_MSIX" :
|
||||
"CFG_PF_VFS_MSIX");
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ECORE_IS_BB(p_hwfn->p_dev))
|
||||
return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
|
||||
else
|
||||
|
@ -75,11 +75,16 @@ struct ecore_mcp_mb_params {
|
||||
u32 cmd;
|
||||
u32 param;
|
||||
void *p_data_src;
|
||||
u8 data_src_size;
|
||||
void *p_data_dst;
|
||||
u8 data_dst_size;
|
||||
u32 mcp_resp;
|
||||
u32 mcp_param;
|
||||
u8 data_src_size;
|
||||
u8 data_dst_size;
|
||||
u32 flags;
|
||||
#define ECORE_MB_FLAG_CAN_SLEEP (0x1 << 0)
|
||||
#define ECORE_MB_FLAG_AVOID_BLOCK (0x1 << 1)
|
||||
#define ECORE_MB_FLAGS_IS_SET(params, flag) \
|
||||
((params) != OSAL_NULL && ((params)->flags & ECORE_MB_FLAG_##flag))
|
||||
};
|
||||
|
||||
struct ecore_drv_tlv_hdr {
|
||||
|
@ -62,6 +62,7 @@ struct ecore_iscsi_pf_params {
|
||||
u8 num_uhq_pages_in_ring;
|
||||
u8 num_queues;
|
||||
u8 log_page_size;
|
||||
u8 log_page_size_conn;
|
||||
u8 rqe_log_size;
|
||||
u8 max_fin_rt;
|
||||
u8 gl_rq_pi;
|
||||
|
@ -355,14 +355,16 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
|
||||
p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
|
||||
|
||||
/* enable_stag_pri_change should be set if port is in BD mode or,
|
||||
* UFP with Host Control mode or, UFP with DCB over base interface.
|
||||
* UFP with Host Control mode.
|
||||
*/
|
||||
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
|
||||
if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
|
||||
(p_hwfn->p_dcbx_info->results.dcbx_enabled))
|
||||
if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
|
||||
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
|
||||
else
|
||||
p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
|
||||
|
||||
p_ramrod->outer_tag_config.outer_tag.tci |=
|
||||
OSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13));
|
||||
}
|
||||
|
||||
/* Place EQ address in RAMROD */
|
||||
@ -459,8 +461,7 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
|
||||
return rc;
|
||||
|
||||
p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
|
||||
if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
|
||||
(p_hwfn->p_dcbx_info->results.dcbx_enabled))
|
||||
if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
|
||||
p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
|
||||
else
|
||||
p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
|
||||
@ -637,6 +638,10 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
|
||||
p_ent->ramrod.pf_update.mf_vlan |=
|
||||
OSAL_CPU_TO_LE16(((u16)p_hwfn->ufp_info.tc << 13));
|
||||
|
||||
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
|
||||
}
|
||||
|
||||
|
@ -185,11 +185,26 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
|
||||
/***************************************************************************
|
||||
* HSI access
|
||||
***************************************************************************/
|
||||
|
||||
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
|
||||
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
|
||||
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
|
||||
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
|
||||
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
|
||||
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
|
||||
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
|
||||
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
|
||||
|
||||
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_spq *p_spq)
|
||||
{
|
||||
__le32 *p_spq_base_lo, *p_spq_base_hi;
|
||||
struct regpair *p_consolid_base_addr;
|
||||
u8 *p_flags1, *p_flags9, *p_flags10;
|
||||
struct core_conn_context *p_cxt;
|
||||
struct ecore_cxt_info cxt_info;
|
||||
u32 core_conn_context_size;
|
||||
__le16 *p_physical_q0;
|
||||
u16 physical_q;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
@ -197,41 +212,39 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
|
||||
|
||||
if (rc < 0) {
|
||||
if (rc != ECORE_SUCCESS) {
|
||||
DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
|
||||
p_spq->cid);
|
||||
return;
|
||||
}
|
||||
|
||||
p_cxt = cxt_info.p_cxt;
|
||||
core_conn_context_size = sizeof(*p_cxt);
|
||||
p_flags1 = &p_cxt->xstorm_ag_context.flags1;
|
||||
p_flags9 = &p_cxt->xstorm_ag_context.flags9;
|
||||
p_flags10 = &p_cxt->xstorm_ag_context.flags10;
|
||||
p_physical_q0 = &p_cxt->xstorm_ag_context.physical_q0;
|
||||
p_spq_base_lo = &p_cxt->xstorm_st_context.spq_base_lo;
|
||||
p_spq_base_hi = &p_cxt->xstorm_st_context.spq_base_hi;
|
||||
p_consolid_base_addr = &p_cxt->xstorm_st_context.consolid_base_addr;
|
||||
|
||||
/* @@@TBD we zero the context until we have ilt_reset implemented. */
|
||||
OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
|
||||
OSAL_MEM_ZERO(p_cxt, core_conn_context_size);
|
||||
|
||||
if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
|
||||
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
|
||||
XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
|
||||
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
|
||||
XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
|
||||
/* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
|
||||
* E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
|
||||
*/
|
||||
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
|
||||
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
|
||||
}
|
||||
SET_FIELD(*p_flags10, XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
|
||||
SET_FIELD(*p_flags1, XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
|
||||
SET_FIELD(*p_flags9, XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
|
||||
|
||||
/* CDU validation - FIXME currently disabled */
|
||||
|
||||
/* QM physical queue */
|
||||
physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
|
||||
p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
|
||||
*p_physical_q0 = OSAL_CPU_TO_LE16(physical_q);
|
||||
|
||||
p_cxt->xstorm_st_context.spq_base_lo =
|
||||
DMA_LO_LE(p_spq->chain.p_phys_addr);
|
||||
p_cxt->xstorm_st_context.spq_base_hi =
|
||||
DMA_HI_LE(p_spq->chain.p_phys_addr);
|
||||
*p_spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr);
|
||||
*p_spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr);
|
||||
|
||||
DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
|
||||
DMA_REGPAIR_LE(*p_consolid_base_addr,
|
||||
p_hwfn->p_consq->chain.p_phys_addr);
|
||||
}
|
||||
|
||||
|
@ -906,7 +906,7 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
|
||||
*
|
||||
* @brief ecore_iov_config_perm_table - configure the permission
|
||||
* zone table.
|
||||
* In E4, queue zone permission table size is 320x9. There
|
||||
* The queue zone permission table size is 320x9. There
|
||||
* are 320 VF queues for single engine device (256 for dual
|
||||
* engine device), and each entry has the following format:
|
||||
* {Valid, VF[7:0]}
|
||||
@ -967,6 +967,9 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
for (qid = 0; qid < num_rx_queues; qid++) {
|
||||
p_block = ecore_get_igu_free_sb(p_hwfn, false);
|
||||
if (!p_block)
|
||||
continue;
|
||||
|
||||
vf->igu_sbs[qid] = p_block->igu_sb_id;
|
||||
p_block->status &= ~ECORE_IGU_STATUS_FREE;
|
||||
SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
|
||||
@ -1064,6 +1067,15 @@ void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
|
||||
p_bulletin->capability_speed = p_caps->speed_capabilities;
|
||||
}
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt)
|
||||
{
|
||||
/* Increase the maximum number of DORQ FIFO entries used by child VFs */
|
||||
ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);
|
||||
}
|
||||
#endif
|
||||
|
||||
enum _ecore_status_t
|
||||
ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
@ -1188,18 +1200,39 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
|
||||
&link_params, &link_state, &link_caps);
|
||||
|
||||
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
if (rc == ECORE_SUCCESS) {
|
||||
vf->b_init = true;
|
||||
p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
|
||||
vf->b_init = true;
|
||||
#ifndef REMOVE_DBG
|
||||
p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
|
||||
(1ULL << (vf->relative_vf_id % 64));
|
||||
#endif
|
||||
|
||||
if (IS_LEAD_HWFN(p_hwfn))
|
||||
p_hwfn->p_dev->p_iov_info->num_vfs++;
|
||||
if (IS_LEAD_HWFN(p_hwfn))
|
||||
p_hwfn->p_dev->p_iov_info->num_vfs++;
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);
|
||||
#endif
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
return rc;
|
||||
#ifndef ASIC_ONLY
|
||||
static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt)
|
||||
{
|
||||
if (!ecore_mcp_is_init(p_hwfn)) {
|
||||
u32 sriov_dis = ecore_rd(p_hwfn, p_ptt,
|
||||
PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);
|
||||
|
||||
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,
|
||||
sriov_dis);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
@ -1257,6 +1290,11 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
|
||||
p_hwfn->p_dev->p_iov_info->num_vfs--;
|
||||
}
|
||||
|
||||
#ifndef ASIC_ONLY
|
||||
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
|
||||
ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);
|
||||
#endif
|
||||
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
@ -1391,7 +1429,7 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
|
||||
|
||||
eng_vf_id = p_vf->abs_vf_id;
|
||||
|
||||
OSAL_MEMSET(¶ms, 0, sizeof(struct dmae_params));
|
||||
OSAL_MEMSET(¶ms, 0, sizeof(params));
|
||||
SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
|
||||
params.dst_vf_id = eng_vf_id;
|
||||
|
||||
@ -1787,7 +1825,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
|
||||
/* fill in pfdev info */
|
||||
pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
|
||||
pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
|
||||
pfdev_info->indices_per_sb = MAX_PIS_PER_SB;
|
||||
pfdev_info->indices_per_sb = PIS_PER_SB;
|
||||
|
||||
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
|
||||
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
|
||||
@ -2247,10 +2285,14 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
|
||||
ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
|
||||
sizeof(struct channel_list_end_tlv));
|
||||
|
||||
/* Update the TLV with the response */
|
||||
/* Update the TLV with the response.
|
||||
* The VF Rx producers are located in the vf zone.
|
||||
*/
|
||||
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
|
||||
req = &mbx->req_virt->start_rxq;
|
||||
p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
|
||||
|
||||
p_tlv->offset =
|
||||
PXP_VF_BAR0_START_MSDM_ZONE_B +
|
||||
OFFSETOF(struct mstorm_vf_zone,
|
||||
non_trigger.eth_rx_queue_producers) +
|
||||
sizeof(struct eth_rx_prod_data) * req->rx_qid;
|
||||
@ -2350,13 +2392,15 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
|
||||
if (p_cid == OSAL_NULL)
|
||||
goto out;
|
||||
|
||||
/* Legacy VFs have their Producers in a different location, which they
|
||||
* calculate on their own and clean the producer prior to this.
|
||||
/* The VF Rx producers are located in the vf zone.
|
||||
* Legacy VFs have their producers in the queue zone, but they
|
||||
* calculate the location by their own and clean them prior to this.
|
||||
*/
|
||||
if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
|
||||
REG_WR(p_hwfn,
|
||||
GTT_BAR0_MAP_REG_MSDM_RAM +
|
||||
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
|
||||
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
|
||||
req->rx_qid),
|
||||
0);
|
||||
|
||||
rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
|
||||
@ -3855,48 +3899,70 @@ ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
|
||||
return ECORE_SUCCESS;
|
||||
}
|
||||
|
||||
#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
|
||||
|
||||
static enum _ecore_status_t
|
||||
ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
|
||||
{
|
||||
u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
|
||||
int i, cnt;
|
||||
u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
|
||||
u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
|
||||
u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;
|
||||
u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
|
||||
u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
|
||||
u8 port_id, tc, tc_id = 0, voq = 0;
|
||||
int cnt;
|
||||
|
||||
/* Read initial consumers & producers */
|
||||
for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
|
||||
u32 prod;
|
||||
|
||||
cons[i] = ecore_rd(p_hwfn, p_ptt,
|
||||
PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
|
||||
i * 0x40);
|
||||
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
|
||||
/* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
|
||||
for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
|
||||
tc_id = (tc < max_phys_tcs_per_port) ?
|
||||
tc :
|
||||
PURE_LB_TC;
|
||||
voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
|
||||
cons[voq] = ecore_rd(p_hwfn, p_ptt,
|
||||
cons_voq0_addr + voq * 0x40);
|
||||
prod = ecore_rd(p_hwfn, p_ptt,
|
||||
PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
|
||||
i * 0x40);
|
||||
distance[i] = prod - cons[i];
|
||||
prod_voq0_addr + voq * 0x40);
|
||||
distance[voq] = prod - cons[voq];
|
||||
}
|
||||
}
|
||||
|
||||
/* Wait for consumers to pass the producers */
|
||||
i = 0;
|
||||
port_id = 0;
|
||||
tc = 0;
|
||||
for (cnt = 0; cnt < 50; cnt++) {
|
||||
for (; i < MAX_NUM_VOQS_E4; i++) {
|
||||
u32 tmp;
|
||||
|
||||
for (; port_id < max_ports_per_engine; port_id++) {
|
||||
/* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
|
||||
for (; tc < max_phys_tcs_per_port + 1; tc++) {
|
||||
tc_id = (tc < max_phys_tcs_per_port) ?
|
||||
tc :
|
||||
PURE_LB_TC;
|
||||
voq = VOQ(port_id, tc_id,
|
||||
max_phys_tcs_per_port);
|
||||
tmp = ecore_rd(p_hwfn, p_ptt,
|
||||
PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
|
||||
i * 0x40);
|
||||
if (distance[i] > tmp - cons[i])
|
||||
cons_voq0_addr + voq * 0x40);
|
||||
if (distance[voq] > tmp - cons[voq])
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == MAX_NUM_VOQS_E4)
|
||||
if (tc == max_phys_tcs_per_port + 1)
|
||||
tc = 0;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (port_id == max_ports_per_engine)
|
||||
break;
|
||||
|
||||
OSAL_MSLEEP(20);
|
||||
}
|
||||
|
||||
if (cnt == 50) {
|
||||
DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
|
||||
p_vf->abs_vf_id, i);
|
||||
DP_ERR(p_hwfn,
|
||||
"VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n",
|
||||
p_vf->abs_vf_id, voq, port_id, tc_id);
|
||||
return ECORE_TIMEOUT;
|
||||
}
|
||||
|
||||
@ -3996,11 +4062,11 @@ cleanup:
|
||||
enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt)
|
||||
{
|
||||
u32 ack_vfs[VF_MAX_STATIC / 32];
|
||||
u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
u16 i;
|
||||
|
||||
OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
|
||||
OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
|
||||
|
||||
/* Since BRB <-> PRS interface can't be tested as part of the flr
|
||||
* polling due to HW limitations, simply sleep a bit. And since
|
||||
@ -4019,10 +4085,10 @@ enum _ecore_status_t
|
||||
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt, u16 rel_vf_id)
|
||||
{
|
||||
u32 ack_vfs[VF_MAX_STATIC / 32];
|
||||
u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
|
||||
enum _ecore_status_t rc = ECORE_SUCCESS;
|
||||
|
||||
OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
|
||||
OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
|
||||
|
||||
/* Wait instead of polling the BRB <-> PRS interface */
|
||||
OSAL_MSLEEP(100);
|
||||
@ -4039,7 +4105,8 @@ bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
|
||||
u16 i;
|
||||
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
|
||||
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
|
||||
|
||||
for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
|
||||
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
|
||||
"[%08x,...,%08x]: %08x\n",
|
||||
i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
|
||||
@ -4396,7 +4463,7 @@ enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
|
||||
if (!vf_info)
|
||||
return ECORE_INVAL;
|
||||
|
||||
OSAL_MEMSET(¶ms, 0, sizeof(struct dmae_params));
|
||||
OSAL_MEMSET(¶ms, 0, sizeof(params));
|
||||
SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);
|
||||
SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
|
||||
params.src_vf_id = vf_info->abs_vf_id;
|
||||
@ -4785,9 +4852,9 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
|
||||
struct ecore_ptt *p_ptt,
|
||||
int vfid, int val)
|
||||
{
|
||||
struct ecore_mcp_link_state *p_link;
|
||||
struct ecore_vf_info *vf;
|
||||
u8 abs_vp_id = 0;
|
||||
u16 rl_id;
|
||||
enum _ecore_status_t rc;
|
||||
|
||||
vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
|
||||
@ -4799,10 +4866,8 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
|
||||
if (rc != ECORE_SUCCESS)
|
||||
return rc;
|
||||
|
||||
p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
|
||||
|
||||
return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
|
||||
p_link->speed);
|
||||
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
|
||||
return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
|
||||
}
|
||||
|
||||
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
|
||||
|
@ -257,6 +257,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
|
||||
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
|
||||
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
|
||||
struct ecore_vf_acquire_sw_info vf_sw_info;
|
||||
struct ecore_dev *p_dev = p_hwfn->p_dev;
|
||||
struct vf_pf_resc_request *p_resc;
|
||||
bool resources_acquired = false;
|
||||
struct vfpf_acquire_tlv *req;
|
||||
@ -427,20 +428,20 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
|
||||
p_iov->bulletin.size = resp->bulletin_size;
|
||||
|
||||
/* get HW info */
|
||||
p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
|
||||
p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
|
||||
p_dev->type = resp->pfdev_info.dev_type;
|
||||
p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
|
||||
|
||||
DP_INFO(p_hwfn, "Chip details - %s%d\n",
|
||||
ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
|
||||
ECORE_IS_BB(p_dev) ? "BB" : "AH",
|
||||
CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
|
||||
|
||||
p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
|
||||
p_dev->chip_num = pfdev_info->chip_num & 0xffff;
|
||||
|
||||
/* Learn of the possibility of CMT */
|
||||
if (IS_LEAD_HWFN(p_hwfn)) {
|
||||
if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
|
||||
DP_INFO(p_hwfn, "100g VF\n");
|
||||
p_hwfn->p_dev->num_hwfns = 2;
|
||||
p_dev->num_hwfns = 2;
|
||||
}
|
||||
}
|
||||
|
||||
@ -636,10 +637,6 @@ free_p_iov:
|
||||
return ECORE_NOMEM;
|
||||
}
|
||||
|
||||
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
|
||||
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
|
||||
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
|
||||
|
||||
/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
|
||||
static void
|
||||
__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
|
||||
@ -828,8 +825,7 @@ ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
|
||||
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
|
||||
u32 init_prod_val = 0;
|
||||
|
||||
*pp_prod = (u8 OSAL_IOMEM *)
|
||||
p_hwfn->regview +
|
||||
*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
|
||||
MSTORM_QZONE_START(p_hwfn->p_dev) +
|
||||
(hw_qid) * MSTORM_QZONE_SIZE;
|
||||
|
||||
|
@ -44,7 +44,7 @@
|
||||
/* Driver versions */
|
||||
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
|
||||
#define QEDE_PMD_VERSION_MAJOR 2
|
||||
#define QEDE_PMD_VERSION_MINOR 10
|
||||
#define QEDE_PMD_VERSION_MINOR 11
|
||||
#define QEDE_PMD_VERSION_REVISION 0
|
||||
#define QEDE_PMD_VERSION_PATCH 1
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
char qede_fw_file[PATH_MAX];
|
||||
|
||||
static const char * const QEDE_DEFAULT_FIRMWARE =
|
||||
"/lib/firmware/qed/qed_init_values-8.37.7.0.bin";
|
||||
"/lib/firmware/qed/qed_init_values-8.40.25.0.bin";
|
||||
|
||||
static void
|
||||
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
|
||||
|
@ -805,7 +805,7 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
|
||||
fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
|
||||
fp->rxq->handle = ret_params.p_handle;
|
||||
|
||||
fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
|
||||
fp->rxq->hw_cons_ptr = &fp->sb_info->sb_pi_array[RX_PI];
|
||||
qede_update_rx_prod(qdev, fp->rxq);
|
||||
eth_dev->data->rx_queue_state[rx_queue_id] =
|
||||
RTE_ETH_QUEUE_STATE_STARTED;
|
||||
@ -863,7 +863,7 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
|
||||
txq->doorbell_addr = ret_params.p_doorbell;
|
||||
txq->handle = ret_params.p_handle;
|
||||
|
||||
txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
|
||||
txq->hw_cons_ptr = &fp->sb_info->sb_pi_array[TX_PI(0)];
|
||||
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
|
||||
DB_DEST_XCM);
|
||||
SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
|
||||
|
Loading…
x
Reference in New Issue
Block a user