qlnx: clean up empty lines in .c and .h files

This commit is contained in:
Mateusz Guzik 2020-09-01 21:57:33 +00:00
parent 29c3bcd9d0
commit 3720573374
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=365169
81 changed files with 65 additions and 1299 deletions

View File

@ -92,7 +92,6 @@ extern void qlnx_get_protocol_stats(void *cdev, int proto_type,
extern void qlnx_sp_isr(void *arg);
extern void qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req,
void *p_sw_info);
extern void qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id);
@ -126,7 +125,7 @@ is_power_of_2(unsigned long n)
{
return (n == roundup_pow_of_two(n));
}
static __inline unsigned long
rounddown_pow_of_two(unsigned long x)
{
@ -345,7 +344,6 @@ do { \
} \
} while (0)
#define OSAL_LIST_IS_EMPTY(list) \
((list)->cnt == 0)
@ -364,7 +362,6 @@ do { \
entry = (type *)tmp_entry, \
tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
#define OSAL_BAR_SIZE(dev, bar_id) qlnx_pci_bus_get_bar_size(dev, bar_id)
#define OSAL_PCI_READ_CONFIG_BYTE(dev, reg, value) \
@ -440,7 +437,6 @@ qlnx_log2(uint32_t x)
#define OSAL_UNLIKELY
#define OSAL_NULL NULL
#define OSAL_MAX_T(type, __max1, __max2) max_t(type, __max1, __max2)
#define OSAL_MIN_T(type, __max1, __max2) min_t(type, __max1, __max2)
@ -513,7 +509,6 @@ bitmap_weight(unsigned long *bitmap, unsigned nbits)
#endif
#define OSAL_TEST_AND_FLIP_BIT qlnx_test_and_change_bit
#define OSAL_TEST_AND_CLEAR_BIT test_and_clear_bit
#define OSAL_MEMCMP memcmp

View File

@ -72,7 +72,6 @@
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 /*Maximum number of RX queues that can be allocated to VF with doubled VF zone size. Up to 96 VF supported in this mode*/
#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 /*Maximum number of RX queues that can be allocated to VF with 4 VF zone size. Up to 48 VF supported in this mode*/
/********************************/
/* CORE (LIGHT L2) FW CONSTANTS */
/********************************/
@ -97,12 +96,10 @@
#define MAX_NUM_LL2_RX_QUEUES 48
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
///////////////////////////////////////////////////////////////////////////////////////////////////
// Include firmware verison number only- do not add constants here to avoid redundunt compilations
///////////////////////////////////////////////////////////////////////////////////////////////////
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 33
#define FW_REVISION_VERSION 7
@ -196,7 +193,6 @@
#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
/*****************/
/* DQ CONSTANTS */
/*****************/
@ -412,12 +408,10 @@
#define PIS_PER_SB_E5 8
#define MAX_PIS_PER_SB OSAL_MAX_T(PIS_PER_SB_E4,PIS_PER_SB_E5)
#define CAU_HC_STOPPED_STATE 3 /* fsm is stopped or not valid for this sb */
#define CAU_HC_DISABLE_STATE 4 /* fsm is working without interrupt coalescing for this sb*/
#define CAU_HC_ENABLE_STATE 0 /* fsm is working with interrupt coalescing for this sb*/
/*****************/
/* IGU CONSTANTS */
/*****************/
@ -594,12 +588,10 @@
#define PXP_NUM_ILT_RECORDS_E5 13664
// Host Interface
#define PXP_QUEUES_ZONE_MAX_NUM_E4 320
#define PXP_QUEUES_ZONE_MAX_NUM_E5 512
/*****************/
/* PRM CONSTANTS */
/*****************/
@ -608,7 +600,6 @@
/* SDMs CONSTANTS */
/*****************/
#define SDM_OP_GEN_TRIG_NONE 0
#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
#define SDM_OP_GEN_TRIG_AGG_INT 2
@ -660,14 +651,12 @@ struct coalescing_timeset
#define COALESCING_TIMESET_VALID_SHIFT 7
};
struct common_queue_zone
{
__le16 ring_drv_data_consumer;
__le16 reserved;
};
/*
* ETH Rx producers data
*/
@ -677,7 +666,6 @@ struct eth_rx_prod_data
__le16 cqe_prod /* CQE producer. */;
};
struct tcp_ulp_connect_done_params
{
__le16 mss;
@ -696,7 +684,6 @@ struct iscsi_connect_done_results
struct tcp_ulp_connect_done_params params /* decided tcp params after connect done */;
};
struct iscsi_eqe_data
{
__le16 icid /* Context ID of the connection */;
@ -712,7 +699,6 @@ struct iscsi_eqe_data
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
/*
* Multi function mode
*/
@ -724,7 +710,6 @@ enum mf_mode
MAX_MF_MODE
};
/*
* Per-protocol connection types
*/
@ -743,7 +728,6 @@ enum protocol_type
MAX_PROTOCOL_TYPE
};
struct regpair
{
__le32 lo /* low word for reg-pair */;
@ -768,9 +752,6 @@ union rdma_eqe_data
struct rdma_eqe_destroy_qp rdma_destroy_qp_data /* RoCE Destroy Event Data */;
};
/*
* Ustorm Queue Zone
*/
@ -780,14 +761,12 @@ struct ustorm_eth_queue_zone
u8 reserved[3];
};
struct ustorm_queue_zone
{
struct ustorm_eth_queue_zone eth;
struct common_queue_zone common;
};
/*
* status block structure
*/
@ -804,7 +783,6 @@ struct cau_pi_entry
#define CAU_PI_ENTRY_RESERVED_SHIFT 24
};
/*
* status block structure
*/
@ -836,7 +814,6 @@ struct cau_sb_entry
#define CAU_SB_ENTRY_TPH_SHIFT 31
};
/*
* Igu cleanup bit values to distinguish between clean or producer consumer update.
*/
@ -847,7 +824,6 @@ enum command_type_bit
MAX_COMMAND_TYPE_BIT
};
/*
* core doorbell data
*/
@ -868,7 +844,6 @@ struct core_db_data
__le16 spq_prod;
};
/*
* Enum of doorbell aggregative command selection
*/
@ -881,7 +856,6 @@ enum db_agg_cmd_sel
MAX_DB_AGG_CMD_SEL
};
/*
* Enum of doorbell destination
*/
@ -894,7 +868,6 @@ enum db_dest
MAX_DB_DEST
};
/*
* Enum of doorbell DPM types
*/
@ -907,7 +880,6 @@ enum db_dpm_type
MAX_DB_DPM_TYPE
};
/*
* Structure for doorbell data, in L2 DPM mode, for the first doorbell in a DPM burst
*/
@ -932,7 +904,6 @@ struct db_l2_dpm_data
#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
};
/*
* Structure for SGE in a DPM doorbell of type DPM_L2_BD
*/
@ -952,7 +923,6 @@ struct db_l2_dpm_sge
__le32 reserved2;
};
/*
* Structure for doorbell address, in legacy mode
*/
@ -967,7 +937,6 @@ struct db_legacy_addr
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
/*
* Structure for doorbell address, in PWM mode
*/
@ -986,7 +955,6 @@ struct db_pwm_addr
#define DB_PWM_ADDR_RESERVED1_SHIFT 28
};
/*
* Parameters to RDMA firmware, passed in EDPM doorbell
*/
@ -1023,8 +991,6 @@ struct db_rdma_dpm_data
struct db_rdma_dpm_params params /* parametes passed to RDMA firmware */;
};
/*
* Igu interrupt command
*/
@ -1037,7 +1003,6 @@ enum igu_int_cmd
MAX_IGU_INT_CMD
};
/*
* IGU producer or consumer update command
*/
@ -1061,7 +1026,6 @@ struct igu_prod_cons_update
__le32 reserved1;
};
/*
* Igu segments access for default status block only
*/
@ -1072,7 +1036,6 @@ enum igu_seg_access
MAX_IGU_SEG_ACCESS
};
/*
* Enumeration for L3 type field of parsing_and_err_flags. L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according to the last-ethertype)
*/
@ -1084,7 +1047,6 @@ enum l3_type
MAX_L3_TYPE
};
/*
* Enumeration for l4Protocol field of parsing_and_err_flags. L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the first fragment, the protocol-type should be set to none.
*/
@ -1096,7 +1058,6 @@ enum l4_protocol
MAX_L4_PROTOCOL
};
/*
* Parsing and error flags field.
*/
@ -1133,7 +1094,6 @@ struct parsing_and_err_flags
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
/*
* Parsing error flags bitmap.
*/
@ -1174,7 +1134,6 @@ struct parsing_err_flags
#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
};
/*
* Pb context
*/
@ -1183,7 +1142,6 @@ struct pb_context
__le32 crc[4];
};
/*
* Concrete Function ID.
*/
@ -1202,7 +1160,6 @@ struct pxp_concrete_fid
#define PXP_CONCRETE_FID_VFID_SHIFT 8
};
/*
* Concrete Function ID.
*/
@ -1255,9 +1212,6 @@ struct pxp_pretend_cmd
#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15
};
/*
* PTT Record in PXP Admin Window.
*/
@ -1271,7 +1225,6 @@ struct pxp_ptt_entry
struct pxp_pretend_cmd pretend;
};
/*
* VF Zone A Permission Register.
*/
@ -1288,7 +1241,6 @@ struct pxp_vf_zone_a_permission
#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
};
/*
* Rdif context
*/
@ -1359,8 +1311,6 @@ struct rdif_task_context
__le32 reserved2;
};
/*
* status block structure
*/
@ -1381,7 +1331,6 @@ struct status_block_e4
#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
};
/*
* status block structure
*/
@ -1402,7 +1351,6 @@ struct status_block_e5
#define STATUS_BLOCK_E5_ZERO_PAD3_SHIFT 24
};
/*
* Tdif context
*/
@ -1489,7 +1437,6 @@ struct tdif_task_context
u8 partial_dif_data_b[8];
};
/*
* Timers context
*/
@ -1539,7 +1486,6 @@ struct timers_context
#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
/*
* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc
*/

View File

@ -1062,7 +1062,6 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
continue; \
} else \
/* Total number of ILT lines used by this PF */
static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
{
@ -1590,7 +1589,6 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i],
cdu_seg_params);
}
}

View File

@ -194,7 +194,6 @@ static u32 (*cond_arr[])(const u32 *r, const u32 *imm) = {
#endif /* __PREVENT_COND_ARR__ */
/******************************* Data Types **********************************/
enum platform_ids {
@ -540,7 +539,6 @@ struct phy_defs {
#define EMPTY_FW_VERSION_STR "???_???_???_???"
#define EMPTY_FW_IMAGE_STR "???????????????"
/***************************** Constant Arrays *******************************/
struct dbg_array {
@ -553,7 +551,6 @@ struct dbg_array {
static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { { OSAL_NULL } };
#else
static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = {
/* BIN_BUF_DBG_MODE_TREE */
{ (const u32 *)dbg_modes_tree_buf, OSAL_ARRAY_SIZE(dbg_modes_tree_buf)},
@ -651,7 +648,6 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
/* Storm constant definitions array */
static struct storm_defs s_storm_defs[] = {
/* Tstorm */
{ 'T', BLOCK_TSEM,
{ DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT }, true,
@ -1405,7 +1401,6 @@ static struct block_defs block_bar0_map_defs = {
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0 };
static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
&block_grc_defs,
&block_miscs_defs,
@ -1498,10 +1493,8 @@ static struct block_defs* s_block_defs[MAX_BLOCK_ID] = {
};
/* Constraint operation types */
static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
/* DBG_BUS_CONSTRAINT_OP_EQ */
{ 0, false },
@ -1534,7 +1527,6 @@ static struct dbg_bus_constraint_op_defs s_constraint_op_defs[] = {
};
static const char* s_dbg_target_names[] = {
/* DBG_BUS_TARGET_ID_INT_BUF */
"int-buf",
@ -1546,7 +1538,6 @@ static const char* s_dbg_target_names[] = {
};
static struct storm_mode_defs s_storm_mode_defs[] = {
/* DBG_BUS_STORM_MODE_PRINTF */
{ "printf", true, 0 },
@ -1576,7 +1567,6 @@ static struct storm_mode_defs s_storm_mode_defs[] = {
};
static struct platform_defs s_platform_defs[] = {
/* PLATFORM_ASIC */
{ "asic", 1, 256, 32768 },
@ -1591,7 +1581,6 @@ static struct platform_defs s_platform_defs[] = {
};
static struct grc_param_defs s_grc_param_defs[] = {
/* DBG_GRC_PARAM_DUMP_TSTORM */
{ { 1, 1, 1 }, 0, 1, false, 1, 1 },
@ -1755,7 +1744,6 @@ static struct big_ram_defs s_big_ram_defs[] = {
};
static struct reset_reg_defs s_reset_regs_defs[] = {
/* DBG_RESET_REG_MISCS_PL_UA */
{ MISCS_REG_RESET_PL_UA, { true, true, true }, { 0x0, 0x0, 0x0 } },
@ -2308,7 +2296,6 @@ static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
/* Config SEM */
if (storm_mode->is_fast_dbg) {
/* Enable fast debug */
ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST);
ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DEBUG_MODE, storm_mode->id_in_hw);
@ -2323,7 +2310,6 @@ static void ecore_bus_enable_storm(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, base_addr + SEM_FAST_REG_DBG_MODE6_SRC_DISABLE, SEM_FAST_MODE6_SRC_ENABLE_VAL);
}
else {
/* Enable slow debug */
ecore_wr(p_hwfn, p_ptt, storm->sem_frame_mode_addr, DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST);
ecore_wr(p_hwfn, p_ptt, storm->sem_slow_enable_addr, 1);
@ -2521,7 +2507,6 @@ static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
last_written_line = ecore_rd(p_hwfn, p_ptt, DBG_REG_INTR_BUFFER_WR_PTR);
if (ecore_rd(p_hwfn, p_ptt, DBG_REG_WRAP_ON_INT_BUFFER)) {
/* Internal buffer was wrapped: first dump from write pointer
* to buffer end, then dump from buffer start to write pointer.
*/
@ -2530,7 +2515,6 @@ static u32 ecore_bus_dump_int_buf(struct ecore_hwfn *p_hwfn,
offset += ecore_bus_dump_int_buf_range(p_hwfn, p_ptt, dump_buf + offset, dump, 0, last_written_line);
}
else if (last_written_line) {
/* Internal buffer wasn't wrapped: dump from buffer start until
* write pointer.
*/
@ -3392,7 +3376,6 @@ static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
offset += ecore_dump_section_hdr(dump_buf + offset, dump, "grc_mem", num_params);
if (name) {
/* Dump name */
if (is_storm) {
OSAL_STRCPY(buf, "?STORM_");
@ -3406,7 +3389,6 @@ static u32 ecore_grc_dump_mem_hdr(struct ecore_hwfn *p_hwfn,
offset += ecore_dump_str_param(dump_buf + offset, dump, "name", buf);
}
else {
/* Dump address */
u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
@ -3712,7 +3694,6 @@ static u32 ecore_grc_dump_vfc_cam(struct ecore_hwfn *p_hwfn,
SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
for (row = 0; row < VFC_CAM_NUM_ROWS; row++, offset += VFC_CAM_RESP_DWORDS) {
/* Write VFC CAM command */
SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, cam_cmd, VFC_CAM_CMD_DWORDS);
@ -3750,7 +3731,6 @@ static u32 ecore_grc_dump_vfc_ram(struct ecore_hwfn *p_hwfn,
return offset + total_size;
for (row = ram_defs->base_row; row < ram_defs->base_row + ram_defs->num_rows; row++, offset += VFC_RAM_RESP_DWORDS) {
/* Write VFC RAM command */
ARR_REG_WR(p_hwfn, p_ptt, storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR, ram_cmd, VFC_RAM_CMD_DWORDS);
@ -4036,7 +4016,6 @@ static u32 ecore_grc_dump_static_debug(struct ecore_hwfn *p_hwfn,
/* Enable block's client */
ecore_bus_enable_clients(p_hwfn, p_ptt, 1 << block->dbg_client_id[dev_data->chip_id]);
for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc); line_id++) {
/* Configure debug line ID */
ecore_config_dbg_line(p_hwfn, p_ptt, (enum block_id)block_id, (u8)line_id, 0xf, 0, 0, 0);
@ -4076,7 +4055,6 @@ static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
*num_dumped_dwords = 0;
if (dump) {
/* Find port mode */
switch (ecore_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
case 0: port_mode = 1; break;
@ -4177,7 +4155,6 @@ static enum dbg_status ecore_grc_dump(struct ecore_hwfn *p_hwfn,
offset += ecore_dump_last_section(dump_buf, offset, dump);
if (dump) {
/* Unstall storms */
if (ecore_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
ecore_grc_stall_storms(p_hwfn, p_ptt, false);
@ -4745,7 +4722,6 @@ static enum dbg_status ecore_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
}
else {
/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
* test how much data is available, except for reading it.
*/
@ -4799,7 +4775,6 @@ static enum dbg_status ecore_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
ecore_dump_num_param(dump_buf + size_param_offset, dump, "size", dwords_read);
}
else {
/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
* test how much data is available, except for reading it.
*/
@ -5084,7 +5059,6 @@ enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, DBG_REG_OTHER_ENGINE_MODE_BB_K2, DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX);
}
else {
/* Configure ethernet header of 14 bytes */
ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_WIDTH, 0);
ecore_wr(p_hwfn, p_ptt, DBG_REG_ETHERNET_HDR_7, dest_addr_lo32);
@ -5671,7 +5645,6 @@ static enum dbg_status ecore_config_storm_inputs(struct ecore_hwfn *p_hwfn,
/* Configure calendar */
for (i = 0; i < NUM_CALENDAR_SLOTS; i++, next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS) {
/* Find next enabled Storm */
for (; !dev_data->bus.storms[next_storm_id].enabled; next_storm_id = (next_storm_id + 1) % MAX_DBG_STORMS);
@ -5718,7 +5691,6 @@ static void ecore_assign_hw_ids(struct ecore_hwfn *p_hwfn,
}
if (hw_id_per_dword) {
/* Assign a different HW ID for each dword */
for (val_id = 0; val_id < VALUES_PER_CYCLE; val_id++)
hw_ids[val_id] = val_id;
@ -6047,7 +6019,6 @@ enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
return DBG_STATUS_INVALID_ARGS;
if (s_grc_param_defs[grc_param].is_preset) {
/* Preset param */
/* Disabling a preset is not allowed. Call
@ -6071,7 +6042,6 @@ enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
}
}
else {
/* Regular param - set its value */
ecore_grc_set_param(p_hwfn, grc_param, val);
}
@ -6506,4 +6476,3 @@ bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
return s_reset_regs_defs[reset_reg].exists[dev_data->chip_id] ?
!(ecore_rd(p_hwfn, p_ptt, s_reset_regs_defs[reset_reg].addr) & (1 << block->reset_bit_offset)) : true;
}

View File

@ -893,5 +893,4 @@ bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum block_id block);
#endif

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __DBG_VALUES_H__
#define __DBG_VALUES_H__

View File

@ -232,7 +232,6 @@ enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
&p_hwfn->db_recovery_info.list,
list_entry,
struct ecore_db_recovery_entry) {
/* search according to db_data addr since db_addr is not unique (roce) */
if (db_entry->db_data == db_data) {
ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
@ -1838,7 +1837,6 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
return flags;
}
/* Getters for resource amounts necessary for qm initialization */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
{

View File

@ -53,9 +53,7 @@ static inline enum _ecore_status_t ecore_fcoe_alloc(struct ecore_hwfn OSAL_UNUSE
{
return ECORE_INVAL;
}
static inline void ecore_fcoe_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static inline void ecore_fcoe_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
#endif
@ -74,4 +72,3 @@ ecore_sp_fcoe_conn_destroy(struct ecore_hwfn *p_hwfn,
#endif
#endif /*__ECORE_FCOE_H__*/

View File

@ -35,7 +35,6 @@
/********************************/
#include "common_hsi.h"
/*
* opcodes for the event ring
*/
@ -54,7 +53,6 @@ enum common_event_opcode
MAX_COMMON_EVENT_OPCODE
};
/*
* Common Ramrod Command IDs
*/
@ -71,7 +69,6 @@ enum common_ramrod_cmd_id
MAX_COMMON_RAMROD_CMD_ID
};
/*
* How ll2 should deal with packet upon errors
*/
@ -83,7 +80,6 @@ enum core_error_handle
MAX_CORE_ERROR_HANDLE
};
/*
* opcodes for the event ring
*/
@ -98,7 +94,6 @@ enum core_event_opcode
MAX_CORE_EVENT_OPCODE
};
/*
* The L4 pseudo checksum mode for Core
*/
@ -109,7 +104,6 @@ enum core_l4_pseudo_checksum_mode
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
/*
* Light-L2 RX Producers in Tstorm RAM
*/
@ -121,7 +115,6 @@ struct core_ll2_port_stats
struct regpair gsi_crcchksm_error;
};
/*
* Ethernet TX Per Queue Stats
*/
@ -135,7 +128,6 @@ struct core_ll2_pstorm_per_queue_stat
struct regpair sent_bcast_pkts /* number of total packets sent without errors */;
};
/*
* Light-L2 RX Producers in Tstorm RAM
*/
@ -146,14 +138,12 @@ struct core_ll2_rx_prod
__le32 reserved;
};
struct core_ll2_tstorm_per_queue_stat
{
struct regpair packet_too_big_discard /* Number of packets discarded because they are bigger than MTU */;
struct regpair no_buff_discard /* Number of packets discarded due to lack of host buffers */;
};
struct core_ll2_ustorm_per_queue_stat
{
struct regpair rcv_ucast_bytes;
@ -164,7 +154,6 @@ struct core_ll2_ustorm_per_queue_stat
struct regpair rcv_bcast_pkts;
};
/*
* Core Ramrod Command IDs (light L2)
*/
@ -180,7 +169,6 @@ enum core_ramrod_cmd_id
MAX_CORE_RAMROD_CMD_ID
};
/*
* Core RX CQE Type for Light L2
*/
@ -191,7 +179,6 @@ enum core_roce_flavor_type
MAX_CORE_ROCE_FLAVOR_TYPE
};
/*
* Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
*/
@ -206,7 +193,6 @@ struct core_rx_action_on_error
#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
/*
* Core RX BD for Light L2
*/
@ -216,7 +202,6 @@ struct core_rx_bd
__le16 reserved[4];
};
/*
* Core RX CM offload BD for Light L2
*/
@ -236,8 +221,6 @@ union core_rx_bd_union
struct core_rx_bd_with_buff_len rx_bd_with_len /* Core Rx Bd with dynamic buffer length */;
};
/*
* Opaque Data for Light L2 RX CQE .
*/
@ -246,7 +229,6 @@ struct core_rx_cqe_opaque_data
__le32 data[2] /* Opaque CQE Data */;
};
/*
* Core RX CQE Type for Light L2
*/
@ -259,7 +241,6 @@ enum core_rx_cqe_type
MAX_CORE_RX_CQE_TYPE
};
/*
* Core RX CQE for Light L2 .
*/
@ -315,10 +296,6 @@ union core_rx_cqe_union
struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
};
/*
* Ramrod data for rx queue start ramrod
*/
@ -344,7 +321,6 @@ struct core_rx_start_ramrod_data
u8 reserved[6];
};
/*
* Ramrod data for rx queue stop ramrod
*/
@ -357,7 +333,6 @@ struct core_rx_stop_ramrod_data
__le16 reserved2[2];
};
/*
* Flags for Core TX BD
*/
@ -408,8 +383,6 @@ struct core_tx_bd
#define CORE_TX_BD_TX_DST_SHIFT 14
};
/*
* Light L2 TX Destination
*/
@ -422,7 +395,6 @@ enum core_tx_dest
MAX_CORE_TX_DEST
};
/*
* Ramrod data for tx queue start ramrod
*/
@ -441,7 +413,6 @@ struct core_tx_start_ramrod_data
u8 resrved[3];
};
/*
* Ramrod data for tx queue stop ramrod
*/
@ -450,7 +421,6 @@ struct core_tx_stop_ramrod_data
__le32 reserved0[2];
};
/*
* Ramrod data for tx queue update ramrod
*/
@ -462,7 +432,6 @@ struct core_tx_update_ramrod_data
__le32 reserved1[1];
};
/*
* Enum flag for what type of DCB data to update
*/
@ -475,7 +444,6 @@ enum dcb_dscp_update_mode
MAX_DCB_DSCP_UPDATE_MODE
};
/*
* The core storm context for the Ystorm
*/
@ -978,7 +946,6 @@ struct e4_core_conn_context
struct regpair ustorm_st_padding[2] /* padding */;
};
struct e5_xstorm_core_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1479,7 +1446,6 @@ struct e5_core_conn_context
struct regpair ustorm_st_padding[2] /* padding */;
};
struct eth_mstorm_per_pf_stat
{
struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
@ -1488,7 +1454,6 @@ struct eth_mstorm_per_pf_stat
struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
};
struct eth_mstorm_per_queue_stat
{
struct regpair ttl0_discard /* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (in IPv6) */;
@ -1501,7 +1466,6 @@ struct eth_mstorm_per_queue_stat
struct regpair tpa_coalesced_bytes /* total TCP payload length in all TPA aggregations */;
};
/*
* Ethernet TX Per PF
*/
@ -1524,7 +1488,6 @@ struct eth_pstorm_per_pf_stat
struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
};
/*
* Ethernet TX Per Queue Stats
*/
@ -1539,7 +1502,6 @@ struct eth_pstorm_per_queue_stat
struct regpair error_drop_pkts /* number of total packets dropped due to errors */;
};
/*
* ETH Rx producers data
*/
@ -1552,7 +1514,6 @@ struct eth_rx_rate_limit
__le16 reserved1;
};
struct eth_ustorm_per_pf_stat
{
struct regpair rcv_lb_ucast_bytes /* number of total ucast bytes received on loopback port without errors */;
@ -1569,7 +1530,6 @@ struct eth_ustorm_per_pf_stat
struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
};
struct eth_ustorm_per_queue_stat
{
struct regpair rcv_ucast_bytes;
@ -1580,7 +1540,6 @@ struct eth_ustorm_per_queue_stat
struct regpair rcv_bcast_pkts;
};
/*
* Event Ring VF-PF Channel data
*/
@ -1622,7 +1581,6 @@ union event_ring_data
struct initial_cleanup_eqe_data vf_init_cleanup /* VF Initial Cleanup data */;
};
/*
* Event Ring Entry
*/
@ -1659,9 +1617,6 @@ union event_ring_element
struct event_ring_next_addr next_addr /* Event Ring Next Page Address */;
};
/*
* Ports mode
*/
@ -1672,7 +1627,6 @@ enum fw_flow_ctrl_mode
MAX_FW_FLOW_CTRL_MODE
};
/*
* GFT profile type.
*/
@ -1686,7 +1640,6 @@ enum gft_profile_type
MAX_GFT_PROFILE_TYPE
};
/*
* Major and Minor hsi Versions
*/
@ -1696,8 +1649,6 @@ struct hsi_fp_ver_struct
u8 major_ver_arr[2] /* Major Version of driver loading pf */;
};
/*
* Integration Phase
*/
@ -1709,7 +1660,6 @@ enum integ_phase
MAX_INTEG_PHASE
};
/*
* Ports mode
*/
@ -1722,8 +1672,6 @@ enum iwarp_ll2_tx_queues
MAX_IWARP_LL2_TX_QUEUES
};
/*
* Malicious VF error ID
*/
@ -1754,8 +1702,6 @@ enum malicious_vf_error_id
MAX_MALICIOUS_VF_ERROR_ID
};
/*
* Mstorm non-triggering VF zone
*/
@ -1765,7 +1711,6 @@ struct mstorm_non_trigger_vf_zone
struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD] /* VF RX queues producers */;
};
/*
* Mstorm VF zone
*/
@ -1774,7 +1719,6 @@ struct mstorm_vf_zone
struct mstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
};
/*
* vlan header including TPID and TCI fields
*/
@ -1796,7 +1740,6 @@ struct outer_tag_config_struct
u8 inner_to_outer_pri_map[8] /* Map from inner to outer priority. Set pri_map_valid when init map */;
};
/*
* personality per PF
*/
@ -1813,7 +1756,6 @@ enum personality_type
MAX_PERSONALITY_TYPE
};
/*
* tunnel configuration
*/
@ -1857,8 +1799,6 @@ struct pf_start_ramrod_data
struct outer_tag_config_struct outer_tag_config /* Outer tag configurations */;
};
/*
* Per protocol DCB data
*/
@ -1917,8 +1857,6 @@ struct pf_update_ramrod_data
struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */;
};
/*
* Ports mode
*/
@ -1932,8 +1870,6 @@ enum ports_mode
MAX_PORTS_MODE
};
/*
* use to index in hsi_fp_[major|minor]_ver_arr per protocol
*/
@ -1944,8 +1880,6 @@ enum protocol_version_array_key
MAX_PROTOCOL_VERSION_ARRAY_KEY
};
/*
* RDMA TX Stats
*/
@ -1964,7 +1898,6 @@ struct pstorm_non_trigger_vf_zone
struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
};
/*
* Pstorm VF zone
*/
@ -1974,7 +1907,6 @@ struct pstorm_vf_zone
struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
};
/*
* Ramrod Header of SPQE
*/
@ -1986,7 +1918,6 @@ struct ramrod_header
__le16 echo /* Ramrod echo */;
};
/*
* RDMA RX Stats
*/
@ -1996,8 +1927,6 @@ struct rdma_rcv_stats
struct regpair rcv_pkts /* number of total RDMA packets received */;
};
/*
* Data for update QCN/DCQCN RL ramrod
*/
@ -2022,7 +1951,6 @@ struct rl_update_ramrod_data
__le32 reserved[2];
};
/*
* Slowpath Element (SPQE)
*/
@ -2032,7 +1960,6 @@ struct slow_path_element
struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
};
/*
* Tstorm non-triggering VF zone
*/
@ -2041,7 +1968,6 @@ struct tstorm_non_trigger_vf_zone
struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
};
struct tstorm_per_port_stat
{
struct regpair trunc_error_discard /* packet is dropped because it was truncated in NIG */;
@ -2063,7 +1989,6 @@ struct tstorm_per_port_stat
struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
};
/*
* Tstorm VF zone
*/
@ -2072,7 +1997,6 @@ struct tstorm_vf_zone
struct tstorm_non_trigger_vf_zone non_trigger /* non-interrupt-triggering zone */;
};
/*
* Tunnel classification scheme
*/
@ -2086,8 +2010,6 @@ enum tunnel_clss
MAX_TUNNEL_CLSS
};
/*
* Ustorm non-triggering VF zone
*/
@ -2097,7 +2019,6 @@ struct ustorm_non_trigger_vf_zone
struct regpair vf_pf_msg_addr /* VF-PF message address */;
};
/*
* Ustorm triggering VF zone
*/
@ -2107,7 +2028,6 @@ struct ustorm_trigger_vf_zone
u8 reserved[7];
};
/*
* Ustorm VF zone
*/
@ -2117,7 +2037,6 @@ struct ustorm_vf_zone
struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
};
/*
* VF-PF channel data
*/
@ -2129,8 +2048,6 @@ struct vf_pf_channel_data
__le16 reserved1;
};
/*
* Ramrod data for VF start ramrod
*/
@ -2144,7 +2061,6 @@ struct vf_start_ramrod_data
struct hsi_fp_ver_struct hsi_fp_ver /* FP HSI version to be used by FW */;
};
/*
* Ramrod data for VF start ramrod
*/
@ -2156,7 +2072,6 @@ struct vf_stop_ramrod_data
__le32 reserved2;
};
/*
* VF zone size mode.
*/
@ -2168,10 +2083,6 @@ enum vf_zone_size_mode
MAX_VF_ZONE_SIZE_MODE
};
/*
* Attentions status block
*/
@ -2184,7 +2095,6 @@ struct atten_status_block
__le32 reserved1;
};
/*
* DMAE command
*/
@ -2252,7 +2162,6 @@ struct dmae_cmd
__le16 xsum8 /* checksum8 result */;
};
enum dmae_cmd_comp_crc_en_enum
{
dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
@ -2260,7 +2169,6 @@ enum dmae_cmd_comp_crc_en_enum
MAX_DMAE_CMD_COMP_CRC_EN_ENUM
};
enum dmae_cmd_comp_func_enum
{
dmae_cmd_comp_func_to_src /* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */,
@ -2268,7 +2176,6 @@ enum dmae_cmd_comp_func_enum
MAX_DMAE_CMD_COMP_FUNC_ENUM
};
enum dmae_cmd_comp_word_en_enum
{
dmae_cmd_comp_word_disabled /* Do not write a completion word */,
@ -2276,7 +2183,6 @@ enum dmae_cmd_comp_word_en_enum
MAX_DMAE_CMD_COMP_WORD_EN_ENUM
};
enum dmae_cmd_c_dst_enum
{
dmae_cmd_c_dst_pcie,
@ -2284,7 +2190,6 @@ enum dmae_cmd_c_dst_enum
MAX_DMAE_CMD_C_DST_ENUM
};
enum dmae_cmd_dst_enum
{
dmae_cmd_dst_none_0,
@ -2294,7 +2199,6 @@ enum dmae_cmd_dst_enum
MAX_DMAE_CMD_DST_ENUM
};
enum dmae_cmd_error_handling_enum
{
dmae_cmd_error_handling_send_regular_comp /* Send a regular completion (with no error indication) */,
@ -2303,7 +2207,6 @@ enum dmae_cmd_error_handling_enum
MAX_DMAE_CMD_ERROR_HANDLING_ENUM
};
enum dmae_cmd_src_enum
{
dmae_cmd_src_pcie /* The source is the PCIe */,
@ -2311,7 +2214,6 @@ enum dmae_cmd_src_enum
MAX_DMAE_CMD_SRC_ENUM
};
struct e4_mstorm_core_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2350,10 +2252,6 @@ struct e4_mstorm_core_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e4_ystorm_core_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2399,7 +2297,6 @@ struct e4_ystorm_core_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct e5_mstorm_core_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2438,10 +2335,6 @@ struct e5_mstorm_core_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e5_ystorm_core_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2487,7 +2380,6 @@ struct e5_ystorm_core_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct fw_asserts_ram_section
{
__le16 section_ram_line_offset /* The offset of the section in the RAM in RAM lines (64-bit units) */;
@ -2498,7 +2390,6 @@ struct fw_asserts_ram_section
u8 list_next_index_dword_offset /* The offset of the next list index field within the section in dwords */;
};
struct fw_ver_num
{
u8 major /* Firmware major version number */;
@ -2523,16 +2414,12 @@ struct fw_info
struct fw_asserts_ram_section fw_asserts_section /* Info regarding the FW asserts section in the Storm RAM */;
};
struct fw_info_location
{
__le32 grc_addr /* GRC address where the fw_info struct is located. */;
__le32 size /* Size of the fw_info structure (thats located at the grc_addr). */;
};
/*
* IGU cleanup command
*/
@ -2550,7 +2437,6 @@ struct igu_cleanup
__le32 reserved1;
};
/*
* IGU firmware driver command
*/
@ -2560,7 +2446,6 @@ union igu_command
struct igu_cleanup cleanup;
};
/*
* IGU firmware driver command
*/
@ -2576,7 +2461,6 @@ struct igu_command_reg_ctrl
#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
};
/*
* IGU mapping line structure
*/
@ -2597,7 +2481,6 @@ struct igu_mapping_line
#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
};
/*
* IGU MSIX line structure
*/
@ -2616,7 +2499,6 @@ struct igu_msix_vector
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
/*
* per encapsulation type enabling flags
*/
@ -2639,7 +2521,6 @@ struct prs_reg_encapsulation_type_en
#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
};
enum pxp_tph_st_hint
{
TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
@ -2649,7 +2530,6 @@ enum pxp_tph_st_hint
MAX_PXP_TPH_ST_HINT
};
/*
* QM hardware structure of enable bypass credit mask
*/
@ -2674,7 +2554,6 @@ struct qm_rf_bypass_mask
#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
};
/*
* QM hardware structure of opportunistic credit mask
*/
@ -2703,7 +2582,6 @@ struct qm_rf_opportunistic_mask
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
};
/*
* E4 QM hardware structure of QM map memory
*/
@ -2726,7 +2604,6 @@ struct qm_rf_pq_map_e4
#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
};
/*
* E5 QM hardware structure of QM map memory
*/
@ -2749,7 +2626,6 @@ struct qm_rf_pq_map_e5
#define QM_RF_PQ_MAP_E5_RESERVED_SHIFT 27
};
/*
* Completion params for aggregated interrupt completion
*/
@ -2764,7 +2640,6 @@ struct sdm_agg_int_comp_params
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
};
/*
* SDM operation gen command (generate aggregative interrupt)
*/

View File

@ -34,7 +34,6 @@
/* Debug Tools HSI constants and macros */
/****************************************/
enum block_addr
{
GRCBASE_GRC = 0x50000,
@ -128,7 +127,6 @@ enum block_addr
MAX_BLOCK_ADDR
};
enum block_id
{
BLOCK_GRC,
@ -222,7 +220,6 @@ enum block_id
MAX_BLOCK_ID
};
/*
* binary debug buffer types
*/
@ -247,7 +244,6 @@ enum bin_dbg_buffer_type
MAX_BIN_DBG_BUFFER_TYPE
};
/*
* Attention bit mapping
*/
@ -260,7 +256,6 @@ struct dbg_attn_bit_mapping
#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
};
/*
* Attention block per-type data
*/
@ -281,7 +276,6 @@ struct dbg_attn_block
struct dbg_attn_block_type_data per_type_data[2] /* attention block per-type data. Count must match the number of elements in dbg_attn_type. */;
};
/*
* Attention register result
*/
@ -313,8 +307,6 @@ struct dbg_attn_block_result
struct dbg_attn_reg_result reg_results[15] /* result data for each register in the block in which at least one attention bit is set */;
};
/*
* mode header
*/
@ -343,8 +335,6 @@ struct dbg_attn_reg
u32 mask_address /* MASK attention register GRC address (in dwords) */;
};
/*
* attention types
*/
@ -355,7 +345,6 @@ enum dbg_attn_type
MAX_DBG_ATTN_TYPE
};
/*
* Debug Bus block data
*/
@ -366,7 +355,6 @@ struct dbg_bus_block
u16 lines_offset /* Offset of this blocks lines in the Debug Bus lines array. */;
};
/*
* Debug Bus block user data
*/
@ -377,7 +365,6 @@ struct dbg_bus_block_user_data
u16 names_offset /* Offset of this blocks lines in the debug bus line name offsets array. */;
};
/*
* Block Debug line data
*/
@ -393,7 +380,6 @@ struct dbg_bus_line
u8 group_sizes /* Four 2-bit values, indicating the size of each group minus 1 (i.e. value=0 means size=1, value=1 means size=2, etc), starting from lsb. The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1). */;
};
/*
* condition header for registers dump
*/
@ -404,7 +390,6 @@ struct dbg_dump_cond_hdr
u8 data_size /* size in dwords of the data following this header */;
};
/*
* memory data for registers dump
*/
@ -424,7 +409,6 @@ struct dbg_dump_mem
#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
/*
* register data for registers dump
*/
@ -439,7 +423,6 @@ struct dbg_dump_reg
#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
/*
* split header for registers dump
*/
@ -452,7 +435,6 @@ struct dbg_dump_split_hdr
#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
};
/*
* condition header for idle check
*/
@ -462,7 +444,6 @@ struct dbg_idle_chk_cond_hdr
u16 data_size /* size in dwords of the data following this header */;
};
/*
* Idle Check condition register
*/
@ -480,7 +461,6 @@ struct dbg_idle_chk_cond_reg
u8 start_entry /* index of the first entry to check */;
};
/*
* Idle Check info register
*/
@ -497,7 +477,6 @@ struct dbg_idle_chk_info_reg
struct dbg_mode_hdr mode /* Mode header */;
};
/*
* Idle Check register
*/
@ -507,7 +486,6 @@ union dbg_idle_chk_reg
struct dbg_idle_chk_info_reg info_reg /* info register */;
};
/*
* Idle Check result header
*/
@ -521,7 +499,6 @@ struct dbg_idle_chk_result_hdr
u8 reserved;
};
/*
* Idle Check result register header
*/
@ -536,7 +513,6 @@ struct dbg_idle_chk_result_reg_hdr
u16 size /* register size in dwords */;
};
/*
* Idle Check rule
*/
@ -553,7 +529,6 @@ struct dbg_idle_chk_rule
u16 imm_offset /* offset of this rules immediate values in the immediate values array (in dwords) */;
};
/*
* Idle Check rule parsing data
*/
@ -566,7 +541,6 @@ struct dbg_idle_chk_rule_parsing_data
#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
};
/*
* idle check severity types
*/
@ -578,8 +552,6 @@ enum dbg_idle_chk_severity_types
MAX_DBG_IDLE_CHK_SEVERITY_TYPES
};
/*
* Debug Bus block data
*/
@ -598,7 +570,6 @@ struct dbg_bus_block_data
u8 hw_id /* HW ID associated with the block */;
};
/*
* Debug Bus Clients
*/
@ -626,7 +597,6 @@ enum dbg_bus_clients
MAX_DBG_BUS_CLIENTS
};
/*
* Debug Bus constraint operation types
*/
@ -645,7 +615,6 @@ enum dbg_bus_constraint_ops
MAX_DBG_BUS_CONSTRAINT_OPS
};
/*
* Debug Bus trigger state data
*/
@ -750,7 +719,6 @@ struct dbg_bus_data
struct dbg_bus_storm_data storms[6] /* Debug Bus data for each block */;
};
/*
* Debug bus filter types
*/
@ -763,7 +731,6 @@ enum dbg_bus_filter_types
MAX_DBG_BUS_FILTER_TYPES
};
/*
* Debug bus frame modes
*/
@ -775,8 +742,6 @@ enum dbg_bus_frame_modes
MAX_DBG_BUS_FRAME_MODES
};
/*
* Debug bus other engine mode
*/
@ -790,8 +755,6 @@ enum dbg_bus_other_engine_modes
MAX_DBG_BUS_OTHER_ENGINE_MODES
};
/*
* Debug bus post-trigger recording types
*/
@ -802,7 +765,6 @@ enum dbg_bus_post_trigger_types
MAX_DBG_BUS_POST_TRIGGER_TYPES
};
/*
* Debug bus pre-trigger recording types
*/
@ -814,7 +776,6 @@ enum dbg_bus_pre_trigger_types
MAX_DBG_BUS_PRE_TRIGGER_TYPES
};
/*
* Debug bus SEMI frame modes
*/
@ -825,7 +786,6 @@ enum dbg_bus_semi_frame_modes
MAX_DBG_BUS_SEMI_FRAME_MODES
};
/*
* Debug bus states
*/
@ -838,11 +798,6 @@ enum dbg_bus_states
MAX_DBG_BUS_STATES
};
/*
* Debug Bus Storm modes
*/
@ -860,7 +815,6 @@ enum dbg_bus_storm_modes
MAX_DBG_BUS_STORM_MODES
};
/*
* Debug bus target IDs
*/
@ -872,8 +826,6 @@ enum dbg_bus_targets
MAX_DBG_BUS_TARGETS
};
/*
* GRC Dump data
*/
@ -885,7 +837,6 @@ struct dbg_grc_data
u32 param_val[48] /* Value of each GRC parameter. Array size must match the enum dbg_grc_params. */;
};
/*
* Debug GRC params
*/
@ -935,7 +886,6 @@ enum dbg_grc_params
MAX_DBG_GRC_PARAMS
};
/*
* Debug reset registers
*/
@ -952,7 +902,6 @@ enum dbg_reset_regs
MAX_DBG_RESET_REGS
};
/*
* Debug status codes
*/
@ -1015,7 +964,6 @@ enum dbg_status
MAX_DBG_STATUS
};
/*
* Debug Storms IDs
*/
@ -1030,7 +978,6 @@ enum dbg_storms
MAX_DBG_STORMS
};
/*
* Idle Check data
*/
@ -1059,5 +1006,4 @@ struct dbg_tools_data
u32 num_regs_read /* Numbers of registers that were read since last log */;
};
#endif /* __ECORE_HSI_DEBUG_TOOLS__ */

View File

@ -585,7 +585,6 @@ struct e4_eth_conn_context
struct mstorm_eth_conn_st_ctx mstorm_st_context /* mstorm storm context */;
};
struct e5_xstorm_eth_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1131,7 +1130,6 @@ struct e5_eth_conn_context
struct mstorm_eth_conn_st_ctx mstorm_st_context /* mstorm storm context */;
};
/*
* Ethernet filter types: mac/vlan/pair
*/
@ -1162,7 +1160,6 @@ enum eth_error_code
MAX_ETH_ERROR_CODE
};
/*
* opcodes for the event ring
*/
@ -1189,7 +1186,6 @@ enum eth_event_opcode
MAX_ETH_EVENT_OPCODE
};
/*
* Classify rule types in E2/E3
*/
@ -1202,7 +1198,6 @@ enum eth_filter_action
MAX_ETH_FILTER_ACTION
};
/*
* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
*/
@ -1219,7 +1214,6 @@ struct eth_filter_cmd
__le16 vlan_id;
};
/*
* $$KEEP_ENDIANNESS$$
*/
@ -1232,7 +1226,6 @@ struct eth_filter_cmd_header
u8 reserved1[4];
};
/*
* Ethernet filter types: mac/vlan/pair
*/
@ -1251,7 +1244,6 @@ enum eth_filter_type
MAX_ETH_FILTER_TYPE
};
/*
* eth IPv4 Fragment Type
*/
@ -1263,7 +1255,6 @@ enum eth_ipv4_frag_type
MAX_ETH_IPV4_FRAG_TYPE
};
/*
* eth IPv4 Fragment Type
*/
@ -1274,7 +1265,6 @@ enum eth_ip_type
MAX_ETH_IP_TYPE
};
/*
* Ethernet Ramrod Command IDs
*/
@ -1301,7 +1291,6 @@ enum eth_ramrod_cmd_id
MAX_ETH_RAMROD_CMD_ID
};
/*
* return code from eth sp ramrods
*/
@ -1316,7 +1305,6 @@ struct eth_return_code
#define ETH_RETURN_CODE_RX_TX_SHIFT 7
};
/*
* What to do in case an error occurs
*/
@ -1327,7 +1315,6 @@ enum eth_tx_err
MAX_ETH_TX_ERR
};
/*
* Array of the different error type behaviors
*/
@ -1352,7 +1339,6 @@ struct eth_tx_err_vals
#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
};
/*
* vport rss configuration data
*/
@ -1387,7 +1373,6 @@ struct eth_vport_rss_config
__le32 reserved3[2];
};
/*
* eth vport RSS mode
*/
@ -1398,7 +1383,6 @@ enum eth_vport_rss_mode
MAX_ETH_VPORT_RSS_MODE
};
/*
* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
*/
@ -1421,7 +1405,6 @@ struct eth_vport_rx_mode
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
};
/*
* Command for setting tpa parameters
*/
@ -1442,7 +1425,6 @@ struct eth_vport_tpa_param
u8 reserved;
};
/*
* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
*/
@ -1463,7 +1445,6 @@ struct eth_vport_tx_mode
#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
};
/*
* GFT filter update action type.
*/
@ -1474,9 +1455,6 @@ enum gft_filter_update_action
MAX_GFT_FILTER_UPDATE_ACTION
};
/*
* Ramrod data for rx add openflow filter
*/
@ -1504,7 +1482,6 @@ struct rx_add_openflow_filter_data
__le16 l4_src_port /* Searcher String - TCP/UDP Source Port */;
};
/*
* Ramrod data for rx create gft action
*/
@ -1514,7 +1491,6 @@ struct rx_create_gft_action_data
u8 reserved[7];
};
/*
* Ramrod data for rx create openflow action
*/
@ -1524,7 +1500,6 @@ struct rx_create_openflow_action_data
u8 reserved[7];
};
/*
* Ramrod data for rx queue start ramrod
*/
@ -1557,7 +1532,6 @@ struct rx_queue_start_ramrod_data
struct regpair reserved2 /* FW reserved. */;
};
/*
* Ramrod data for rx queue stop ramrod
*/
@ -1570,7 +1544,6 @@ struct rx_queue_stop_ramrod_data
u8 reserved[3];
};
/*
* Ramrod data for rx queue update ramrod
*/
@ -1590,7 +1563,6 @@ struct rx_queue_update_ramrod_data
struct regpair reserved6 /* FW reserved. */;
};
/*
* Ramrod data for rx Add UDP Filter
*/
@ -1608,7 +1580,6 @@ struct rx_udp_filter_data
__le32 tenant_id /* Searcher String - Tenant ID */;
};
/*
* add or delete GFT filter - filter is packet header of type of packet wished to pass certain FW flow
*/
@ -1628,8 +1599,6 @@ struct rx_update_gft_filter_data
u8 reserved;
};
/*
* Ramrod data for tx queue start ramrod
*/
@ -1671,7 +1640,6 @@ struct tx_queue_start_ramrod_data
struct regpair bd_cons_address /* BD consumer address in host - for PMD queues */;
};
/*
* Ramrod data for tx queue stop ramrod
*/
@ -1680,7 +1648,6 @@ struct tx_queue_stop_ramrod_data
__le16 reserved[4];
};
/*
* Ramrod data for tx queue update ramrod
*/
@ -1692,8 +1659,6 @@ struct tx_queue_update_ramrod_data
struct regpair reserved1[5];
};
/*
* Ramrod data for vport update ramrod
*/
@ -1703,7 +1668,6 @@ struct vport_filter_update_ramrod_data
struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT] /* Filter Commands */;
};
/*
* Ramrod data for vport start ramrod
*/
@ -1731,7 +1695,6 @@ struct vport_start_ramrod_data
u8 reserved[1];
};
/*
* Ramrod data for vport stop ramrod
*/
@ -1741,7 +1704,6 @@ struct vport_stop_ramrod_data
u8 reserved[7];
};
/*
* Ramrod data for vport update ramrod
*/
@ -1800,11 +1762,6 @@ struct vport_update_ramrod_data
struct eth_vport_rss_config rss_config /* rss config data */;
};
struct E4XstormEthConnAgCtxDqExtLdPart
{
u8 reserved0 /* cdu_validation */;
@ -2035,7 +1992,6 @@ struct E4XstormEthConnAgCtxDqExtLdPart
__le32 reg4 /* reg4 */;
};
struct e4_mstorm_eth_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2074,10 +2030,6 @@ struct e4_mstorm_eth_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e4_xstorm_eth_hw_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -2299,8 +2251,6 @@ struct e4_xstorm_eth_hw_conn_ag_ctx
__le16 conn_dpi /* conn_dpi */;
};
struct E5XstormEthConnAgCtxDqExtLdPart
{
u8 reserved0 /* cdu_validation */;
@ -2531,7 +2481,6 @@ struct E5XstormEthConnAgCtxDqExtLdPart
__le32 reg4 /* reg4 */;
};
struct e5_mstorm_eth_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2570,10 +2519,6 @@ struct e5_mstorm_eth_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e5_xstorm_eth_hw_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -2795,8 +2740,6 @@ struct e5_xstorm_eth_hw_conn_ag_ctx
__le16 conn_dpi /* conn_dpi */;
};
/*
* GFT CAM line struct
*/
@ -2813,7 +2756,6 @@ struct gft_cam_line
#define GFT_CAM_LINE_RESERVED1_SHIFT 29
};
/*
* GFT CAM line struct with fields breakout
*/
@ -2846,14 +2788,12 @@ struct gft_cam_line_mapped
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
union gft_cam_line_union
{
struct gft_cam_line cam_line;
struct gft_cam_line_mapped cam_line_mapped;
};
/*
* Used in gft_profile_key: Indication for ip version
*/
@ -2864,7 +2804,6 @@ enum gft_profile_ip_version
MAX_GFT_PROFILE_IP_VERSION
};
/*
* Profile key stucr fot GFT logic in Prs
*/
@ -2885,7 +2824,6 @@ struct gft_profile_key
#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
};
/*
* Used in gft_profile_key: Indication for tunnel type
*/
@ -2900,7 +2838,6 @@ enum gft_profile_tunnel_type
MAX_GFT_PROFILE_TUNNEL_TYPE
};
/*
* Used in gft_profile_key: Indication for protocol type
*/
@ -2925,7 +2862,6 @@ enum gft_profile_upper_protocol_type
MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
};
/*
* GFT RAM line struct
*/
@ -3019,7 +2955,6 @@ struct gft_ram_line
#define GFT_RAM_LINE_RESERVED1_SHIFT 10
};
/*
* Used in the first 2 bits for gft_ram_line: Indication for vlan mask
*/

View File

@ -40,7 +40,6 @@
/************************************************************************/
#include "fcoe_common.h"
/*
* The fcoe storm context of Ystorm
*/
@ -785,7 +784,6 @@ struct e4_fcoe_conn_context
struct mstorm_fcoe_conn_st_ctx mstorm_st_context /* mstorm storm context */;
};
struct e5_xstorm_fcoe_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1285,7 +1283,6 @@ struct e5_fcoe_conn_context
struct mstorm_fcoe_conn_st_ctx mstorm_st_context /* mstorm storm context */;
};
/*
* FCoE connection offload params passed by driver to FW in FCoE offload ramrod
*/
@ -1294,7 +1291,6 @@ struct fcoe_conn_offload_ramrod_params
struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
};
/*
* FCoE connection terminate params passed by driver to FW in FCoE terminate conn ramrod
*/
@ -1303,7 +1299,6 @@ struct fcoe_conn_terminate_ramrod_params
struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
};
/*
* FCoE event type
*/
@ -1318,7 +1313,6 @@ enum fcoe_event_type
MAX_FCOE_EVENT_TYPE
};
/*
* FCoE init params passed by driver to FW in FCoE init ramrod
*/
@ -1327,9 +1321,6 @@ struct fcoe_init_ramrod_params
struct fcoe_init_func_ramrod_data init_ramrod_data;
};
/*
* FCoE ramrod Command IDs
*/
@ -1343,7 +1334,6 @@ enum fcoe_ramrod_cmd_id
MAX_FCOE_RAMROD_CMD_ID
};
/*
* FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod
*/
@ -1352,21 +1342,6 @@ struct fcoe_stat_ramrod_params
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
struct e4_ystorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1412,11 +1387,6 @@ struct e4_ystorm_fcoe_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct e5_ystorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;

View File

@ -54,7 +54,6 @@ struct init_brb_ram_req
u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
};
/*
* ETS per-TC init requirements
*/
@ -74,8 +73,6 @@ struct init_ets_req
struct init_ets_tc_req tc_req[NUM_OF_TCS] /* ETS initialization requirements per TC. */;
};
/*
* NIG LB RL init requirements
*/
@ -87,7 +84,6 @@ struct init_nig_lb_rl_req
u16 tc_rate[NUM_OF_PHYS_TCS] /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */;
};
/*
* NIG TC mapping for each priority
*/
@ -97,7 +93,6 @@ struct init_nig_pri_tc_map_entry
u8 valid /* indicates if the mapping entry is valid */;
};
/*
* NIG priority to TC map init requirements
*/
@ -106,7 +101,6 @@ struct init_nig_pri_tc_map_req
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
/*
* QM per-port init parameters
*/
@ -119,7 +113,6 @@ struct init_qm_port_params
u16 reserved;
};
/*
* QM per-PQ init parameters
*/
@ -131,7 +124,6 @@ struct init_qm_pq_params
u8 rl_valid /* Indicates if a rate limiter should be allocated for the PQ (0/1) */;
};
/*
* QM per-vport init parameters
*/

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_HSI_INIT_TOOL__
#define __ECORE_HSI_INIT_TOOL__
/**************************************/
@ -45,7 +44,6 @@
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
enum chip_ids
{
CHIP_BB,
@ -54,7 +52,6 @@ enum chip_ids
MAX_CHIP_IDS
};
enum init_modes
{
MODE_BB_A0_DEPRECATED,
@ -76,7 +73,6 @@ enum init_modes
MAX_INIT_MODES
};
enum init_phases
{
PHASE_ENGINE,
@ -87,7 +83,6 @@ enum init_phases
MAX_INIT_PHASES
};
enum init_split_types
{
SPLIT_TYPE_NONE,
@ -98,7 +93,6 @@ enum init_split_types
MAX_INIT_SPLIT_TYPES
};
/*
* Binary buffer header
*/
@ -108,7 +102,6 @@ struct bin_buffer_hdr
u32 length /* buffer length in bytes */;
};
/*
* binary init buffer types
*/
@ -122,7 +115,6 @@ enum bin_init_buffer_type
MAX_BIN_INIT_BUFFER_TYPE
};
/*
* init array header: raw
*/
@ -184,10 +176,6 @@ union init_array_hdr
struct init_array_pattern_hdr pattern /* pattern init array header */;
};
/*
* init array types
*/
@ -199,8 +187,6 @@ enum init_array_types
MAX_INIT_ARRAY_TYPES
};
/*
* init operation: callback
*/
@ -215,7 +201,6 @@ struct init_callback_op
u16 block_id /* Blocks ID */;
};
/*
* init operation: delay
*/
@ -229,7 +214,6 @@ struct init_delay_op
u32 delay /* delay in us */;
};
/*
* init operation: if_mode
*/
@ -246,7 +230,6 @@ struct init_if_mode_op
u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
};
/*
* init operation: if_phase
*/
@ -270,7 +253,6 @@ struct init_if_phase_op
#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
};
/*
* init mode operators
*/
@ -282,7 +264,6 @@ enum init_mode_ops
MAX_INIT_MODE_OPS
};
/*
* init operation: raw
*/
@ -366,8 +347,6 @@ union init_op
struct init_delay_op delay /* delay init operation */;
};
/*
* Init command operation types
*/
@ -382,7 +361,6 @@ enum init_op_types
MAX_INIT_OP_TYPES
};
/*
* init polling types
*/
@ -395,9 +373,6 @@ enum init_poll_types
MAX_INIT_POLL_TYPES
};
/*
* init source types
*/
@ -410,9 +385,6 @@ enum init_source_types
MAX_INIT_SOURCE_TYPES
};
/*
* Internal RAM Offsets macro data
*/

View File

@ -40,7 +40,6 @@
/************************************************************************/
#include "iscsi_common.h"
/*
* The iscsi storm connection context of Ystorm
*/
@ -580,7 +579,6 @@ struct e4_iscsi_conn_context
struct ustorm_iscsi_conn_st_ctx ustorm_st_context /* ustorm storm context */;
};
struct e5_xstorm_iscsi_conn_ag_ctx
{
u8 cdu_validation /* cdu_validation */;
@ -1110,7 +1108,6 @@ struct e5_iscsi_conn_context
struct ustorm_iscsi_conn_st_ctx ustorm_st_context /* ustorm storm context */;
};
/*
* iSCSI init params passed by driver to FW in iSCSI init ramrod
*/
@ -1120,17 +1117,6 @@ struct iscsi_init_ramrod_params
struct tcp_init_params tcp_init /* TCP parameters initialized by the bus-driver */;
};
struct e4_ystorm_iscsi_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1176,11 +1162,6 @@ struct e4_ystorm_iscsi_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct e5_ystorm_iscsi_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;

View File

@ -476,7 +476,6 @@ struct e4_iwarp_conn_context
struct ustorm_iwarp_conn_st_ctx ustorm_st_context /* ustorm storm context */;
};
struct e5_xstorm_iwarp_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -889,7 +888,6 @@ struct e5_iwarp_conn_context
struct ustorm_iwarp_conn_st_ctx ustorm_st_context /* ustorm storm context */;
};
/*
* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod
*/
@ -927,7 +925,6 @@ struct iwarp_create_qp_ramrod_data
u8 reserved2[6];
};
/*
* iWARP completion queue types
*/
@ -943,14 +940,12 @@ enum iwarp_eqe_async_opcode
MAX_IWARP_EQE_ASYNC_OPCODE
};
struct iwarp_eqe_data_mpa_async_completion
{
__le16 ulp_data_len /* On active side, length of ULP Data, from peers MPA Connect Response */;
u8 reserved[6];
};
struct iwarp_eqe_data_tcp_async_completion
{
__le16 ulp_data_len /* On passive side, length of ULP Data, from peers active MPA Connect Request */;
@ -958,7 +953,6 @@ struct iwarp_eqe_data_tcp_async_completion
u8 reserved[5];
};
/*
* iWARP completion queue types
*/
@ -975,7 +969,6 @@ enum iwarp_eqe_sync_opcode
MAX_IWARP_EQE_SYNC_OPCODE
};
/*
* iWARP EQE completion status
*/
@ -1012,7 +1005,6 @@ enum iwarp_fw_return_code
MAX_IWARP_FW_RETURN_CODE
};
/*
* unaligned opaque data received from LL2
*/
@ -1022,7 +1014,6 @@ struct iwarp_init_func_params
u8 reserved1[7];
};
/*
* iwarp func init ramrod data
*/
@ -1033,7 +1024,6 @@ struct iwarp_init_func_ramrod_data
struct iwarp_init_func_params iwarp;
};
/*
* iWARP QP - possible states to transition to
*/
@ -1044,7 +1034,6 @@ enum iwarp_modify_qp_new_state_type
MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
};
/*
* iwarp modify qp responder ramrod data
*/
@ -1071,7 +1060,6 @@ struct iwarp_modify_qp_ramrod_data
__le32 reserved1[10];
};
/*
* MPA params for Enhanced mode
*/
@ -1127,7 +1115,6 @@ struct iwarp_mpa_offload_ramrod_data
u8 reserved3[13];
};
/*
* iWARP TCP connection offload params passed by driver to FW
*/
@ -1143,7 +1130,6 @@ struct iwarp_offload_params
u8 reserved[10];
};
/*
* iWARP query QP output params
*/
@ -1157,7 +1143,6 @@ struct iwarp_query_qp_output_params
u8 reserved1[4] /* 64 bit alignment */;
};
/*
* iWARP query QP ramrod data
*/
@ -1166,7 +1151,6 @@ struct iwarp_query_qp_ramrod_data
struct regpair output_params_addr;
};
/*
* iWARP Ramrod Command IDs
*/
@ -1183,7 +1167,6 @@ enum iwarp_ramrod_cmd_id
MAX_IWARP_RAMROD_CMD_ID
};
/*
* Per PF iWARP retransmit path statistics
*/
@ -1193,7 +1176,6 @@ struct iwarp_rxmit_stats_drv
struct regpair tx_fast_retransmit_event_cnt /* Number of times fast retransmit event occurred */;
};
/*
* iWARP and TCP connection offload params passed by driver to FW in iWARP offload ramrod
*/
@ -1203,7 +1185,6 @@ struct iwarp_tcp_offload_ramrod_data
struct tcp_offload_params_opt2 tcp /* tcp offload params */;
};
/*
* iWARP MPA negotiation types
*/
@ -1214,9 +1195,6 @@ enum mpa_negotiation_mode
MAX_MPA_NEGOTIATION_MODE
};
/*
* iWARP MPA Enhanced mode RTR types
*/
@ -1233,11 +1211,6 @@ enum mpa_rtr_type
MAX_MPA_RTR_TYPE
};
/*
* unaligned opaque data received from LL2
*/
@ -1255,10 +1228,6 @@ struct unaligned_opaque_data
__le32 cid;
};
struct e4_mstorm_iwarp_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1297,8 +1266,6 @@ struct e4_mstorm_iwarp_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e4_ustorm_iwarp_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1369,8 +1336,6 @@ struct e4_ustorm_iwarp_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e4_ystorm_iwarp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1416,7 +1381,6 @@ struct e4_ystorm_iwarp_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct e5_mstorm_iwarp_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1455,8 +1419,6 @@ struct e5_mstorm_iwarp_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e5_ustorm_iwarp_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1539,8 +1501,6 @@ struct e5_ustorm_iwarp_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e5_ystorm_iwarp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;

View File

@ -248,7 +248,6 @@ struct e4_rdma_task_context
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
};
struct e5_ystorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -483,8 +482,6 @@ struct e5_rdma_task_context
struct e5_ustorm_rdma_task_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
};
/*
* rdma function init ramrod data
*/
@ -497,7 +494,6 @@ struct rdma_close_func_ramrod_data
u8 reserved[4];
};
/*
* rdma function init CNQ parameters
*/
@ -512,7 +508,6 @@ struct rdma_cnq_params
u8 reserved1[6];
};
/*
* rdma create cq ramrod data
*/
@ -531,7 +526,6 @@ struct rdma_create_cq_ramrod_data
__le16 reserved1;
};
/*
* rdma deregister tid ramrod data
*/
@ -541,7 +535,6 @@ struct rdma_deregister_tid_ramrod_data
__le32 reserved;
};
/*
* rdma destroy cq output params
*/
@ -552,7 +545,6 @@ struct rdma_destroy_cq_output_params
__le32 reserved1;
};
/*
* rdma destroy cq ramrod data
*/
@ -561,7 +553,6 @@ struct rdma_destroy_cq_ramrod_data
struct regpair output_params_addr;
};
/*
* RDMA slow path EQ cmd IDs
*/
@ -581,7 +572,6 @@ enum rdma_event_opcode
MAX_RDMA_EVENT_OPCODE
};
/*
* RDMA FW return code for slow path ramrods
*/
@ -595,7 +585,6 @@ enum rdma_fw_return_code
MAX_RDMA_FW_RETURN_CODE
};
/*
* rdma function init header
*/
@ -612,7 +601,6 @@ struct rdma_init_func_hdr
__le32 reserved;
};
/*
* rdma function init ramrod data
*/
@ -622,7 +610,6 @@ struct rdma_init_func_ramrod_data
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
/*
* RDMA ramrod command IDs
*/
@ -642,7 +629,6 @@ enum rdma_ramrod_cmd_id
MAX_RDMA_RAMROD_CMD_ID
};
/*
* rdma register tid ramrod data
*/
@ -699,7 +685,6 @@ struct rdma_register_tid_ramrod_data
__le32 reserved4[2];
};
/*
* rdma resize cq output params
*/
@ -709,7 +694,6 @@ struct rdma_resize_cq_output_params
__le32 old_cq_prod /* cq producer value on old PBL */;
};
/*
* rdma resize cq ramrod data
*/
@ -729,7 +713,6 @@ struct rdma_resize_cq_ramrod_data
struct regpair output_params_addr;
};
/*
* The rdma SRQ context
*/
@ -738,7 +721,6 @@ struct rdma_srq_context
struct regpair temp[8];
};
/*
* rdma create qp requester ramrod data
*/
@ -764,7 +746,6 @@ struct rdma_srq_create_ramrod_data
struct regpair producers_addr /* SRQ PBL base address */;
};
/*
* rdma create qp requester ramrod data
*/
@ -774,7 +755,6 @@ struct rdma_srq_destroy_ramrod_data
__le32 reserved;
};
/*
* rdma create qp requester ramrod data
*/
@ -784,7 +764,6 @@ struct rdma_srq_modify_ramrod_data
__le32 wqe_limit;
};
/*
* RDMA Tid type enumeration (for register_tid ramrod)
*/
@ -797,7 +776,6 @@ enum rdma_tid_type
MAX_RDMA_TID_TYPE
};
/*
* The rdma XRC SRQ context
*/
@ -806,9 +784,6 @@ struct rdma_xrc_srq_context
struct regpair temp[9];
};
struct E4XstormRoceConnAgCtxDqExtLdPart
{
u8 reserved0 /* cdu_validation */;
@ -1037,7 +1012,6 @@ struct E4XstormRoceConnAgCtxDqExtLdPart
__le32 reg4 /* reg4 */;
};
struct e4_mstorm_rdma_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1076,8 +1050,6 @@ struct e4_mstorm_rdma_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e4_tstorm_rdma_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1183,7 +1155,6 @@ struct e4_tstorm_rdma_conn_ag_ctx
__le32 reg10 /* reg10 */;
};
struct e4_tstorm_rdma_task_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1264,7 +1235,6 @@ struct e4_tstorm_rdma_task_ag_ctx
__le32 reg2 /* reg2 */;
};
struct e4_ustorm_rdma_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1335,8 +1305,6 @@ struct e4_ustorm_rdma_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e4_xstorm_rdma_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1567,7 +1535,6 @@ struct e4_xstorm_rdma_conn_ag_ctx
__le32 reg6 /* cf_array1 */;
};
struct e4_ystorm_rdma_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1613,8 +1580,6 @@ struct e4_ystorm_rdma_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct e5_mstorm_rdma_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1653,8 +1618,6 @@ struct e5_mstorm_rdma_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e5_tstorm_rdma_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1776,7 +1739,6 @@ struct e5_tstorm_rdma_conn_ag_ctx
__le16 e4_reserved9 /* word4 */;
};
struct e5_tstorm_rdma_task_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1857,7 +1819,6 @@ struct e5_tstorm_rdma_task_ag_ctx
__le32 reg2 /* reg2 */;
};
struct e5_ustorm_rdma_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1940,8 +1901,6 @@ struct e5_ustorm_rdma_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e5_xstorm_rdma_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -2172,7 +2131,6 @@ struct e5_xstorm_rdma_conn_ag_ctx
__le32 reg6 /* cf_array1 */;
};
struct e5_ystorm_rdma_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2218,5 +2176,4 @@ struct e5_ystorm_rdma_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
#endif /* __ECORE_HSI_RDMA__ */

View File

@ -107,7 +107,6 @@ struct e4_roce_conn_context
struct regpair ustorm_st_padding[2] /* padding */;
};
/*
* roce connection context
*/
@ -128,9 +127,6 @@ struct e5_roce_conn_context
struct regpair ustorm_st_padding[2] /* padding */;
};
/*
* roce create qp requester ramrod data
*/
@ -181,7 +177,6 @@ struct roce_create_qp_req_ramrod_data
__le16 dpi;
};
/*
* roce create qp responder ramrod data
*/
@ -242,7 +237,6 @@ struct roce_create_qp_resp_ramrod_data
__le16 dpi;
};
/*
* roce DCQCN received statistics
*/
@ -252,7 +246,6 @@ struct roce_dcqcn_received_stats
struct regpair cnp_pkt_rcv /* The number of total RoCE packets with CNP opcode received */;
};
/*
* roce DCQCN sent statistics
*/
@ -261,7 +254,6 @@ struct roce_dcqcn_sent_stats
struct regpair cnp_pkt_sent /* The number of total RoCE packets with CNP opcode sent */;
};
/*
* RoCE destroy qp requester output params
*/
@ -271,7 +263,6 @@ struct roce_destroy_qp_req_output_params
__le32 cq_prod /* Completion producer value at destroy QP */;
};
/*
* RoCE destroy qp requester ramrod data
*/
@ -280,7 +271,6 @@ struct roce_destroy_qp_req_ramrod_data
struct regpair output_params_addr;
};
/*
* RoCE destroy qp responder output params
*/
@ -290,7 +280,6 @@ struct roce_destroy_qp_resp_output_params
__le32 cq_prod /* Completion producer value at destroy QP */;
};
/*
* RoCE destroy qp responder ramrod data
*/
@ -299,7 +288,6 @@ struct roce_destroy_qp_resp_ramrod_data
struct regpair output_params_addr;
};
/*
* roce special events statistics
*/
@ -312,7 +300,6 @@ struct roce_events_stats
__le32 reserved;
};
/*
* ROCE slow path EQ cmd IDs
*/
@ -327,7 +314,6 @@ enum roce_event_opcode
MAX_ROCE_EVENT_OPCODE
};
/*
* roce func init ramrod data
*/
@ -340,7 +326,6 @@ struct roce_init_func_params
__le32 cnp_send_timeout /* The minimal difference of send time between CNP packets for specific QP. Units are in microseconds */;
};
/*
* roce func init ramrod data
*/
@ -350,7 +335,6 @@ struct roce_init_func_ramrod_data
struct roce_init_func_params roce;
};
/*
* roce modify qp requester ramrod data
*/
@ -403,7 +387,6 @@ struct roce_modify_qp_req_ramrod_data
__le32 dst_gid[4] /* BE order. In case of IPv4 the higher register will hold the address. Low registers must be zero! */;
};
/*
* roce modify qp responder ramrod data
*/
@ -452,7 +435,6 @@ struct roce_modify_qp_resp_ramrod_data
__le32 dst_gid[4] /* BE order. In case of IPv4 the higher register will hold the address. Low registers must be zero! */;
};
/*
* RoCE query qp requester output params
*/
@ -468,7 +450,6 @@ struct roce_query_qp_req_output_params
#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
};
/*
* RoCE query qp requester ramrod data
*/
@ -477,7 +458,6 @@ struct roce_query_qp_req_ramrod_data
struct regpair output_params_addr;
};
/*
* RoCE query qp responder output params
*/
@ -491,7 +471,6 @@ struct roce_query_qp_resp_output_params
#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
};
/*
* RoCE query qp responder ramrod data
*/
@ -500,7 +479,6 @@ struct roce_query_qp_resp_ramrod_data
struct regpair output_params_addr;
};
/*
* ROCE ramrod command IDs
*/
@ -515,11 +493,6 @@ enum roce_ramrod_cmd_id
MAX_ROCE_RAMROD_CMD_ID
};
struct e4_mstorm_roce_req_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -558,7 +531,6 @@ struct e4_mstorm_roce_req_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e4_mstorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -597,7 +569,6 @@ struct e4_mstorm_roce_resp_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e4_tstorm_roce_req_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -703,7 +674,6 @@ struct e4_tstorm_roce_req_conn_ag_ctx
__le32 reg10 /* reg10 */;
};
struct e4_tstorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -809,7 +779,6 @@ struct e4_tstorm_roce_resp_conn_ag_ctx
__le32 reg10 /* reg10 */;
};
struct e4_ustorm_roce_req_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -880,7 +849,6 @@ struct e4_ustorm_roce_req_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e4_ustorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -951,7 +919,6 @@ struct e4_ustorm_roce_resp_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e4_xstorm_roce_req_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1182,7 +1149,6 @@ struct e4_xstorm_roce_req_conn_ag_ctx
__le32 orq_cons /* cf_array1 */;
};
struct e4_xstorm_roce_resp_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1415,7 +1381,6 @@ struct e4_xstorm_roce_resp_conn_ag_ctx
__le32 msn_and_syndrome /* cf_array1 */;
};
struct e4_ystorm_roce_req_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1461,7 +1426,6 @@ struct e4_ystorm_roce_req_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct e4_ystorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1507,7 +1471,6 @@ struct e4_ystorm_roce_resp_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct E5XstormRoceConnAgCtxDqExtLdPart
{
u8 reserved0 /* cdu_validation */;
@ -1774,7 +1737,6 @@ struct E5XstormRoceConnAgCtxDqExtLdPart
__le32 reg13 /* reg13 */;
};
struct e5_mstorm_roce_req_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1813,7 +1775,6 @@ struct e5_mstorm_roce_req_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e5_mstorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -1852,7 +1813,6 @@ struct e5_mstorm_roce_resp_conn_ag_ctx
__le32 reg1 /* reg1 */;
};
struct e5_tstorm_roce_req_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1974,7 +1934,6 @@ struct e5_tstorm_roce_req_conn_ag_ctx
__le16 e4_reserved9 /* word4 */;
};
struct e5_tstorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2096,7 +2055,6 @@ struct e5_tstorm_roce_resp_conn_ag_ctx
__le16 e4_reserved9 /* word4 */;
};
struct e5_ustorm_roce_req_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2179,7 +2137,6 @@ struct e5_ustorm_roce_req_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e5_ustorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2262,7 +2219,6 @@ struct e5_ustorm_roce_resp_conn_ag_ctx
__le16 word3 /* word3 */;
};
struct e5_xstorm_roce_req_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -2493,7 +2449,6 @@ struct e5_xstorm_roce_req_conn_ag_ctx
__le32 orq_cons /* cf_array1 */;
};
struct e5_xstorm_roce_resp_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -2726,7 +2681,6 @@ struct e5_xstorm_roce_resp_conn_ag_ctx
__le32 msn_and_syndrome /* cf_array1 */;
};
struct e5_ystorm_roce_req_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2772,7 +2726,6 @@ struct e5_ystorm_roce_req_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
struct e5_ystorm_roce_resp_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2818,7 +2771,6 @@ struct e5_ystorm_roce_resp_conn_ag_ctx
__le32 reg3 /* reg3 */;
};
/*
* Roce doorbell data
*/

View File

@ -235,7 +235,6 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn,
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0);
if (vport_rl_en) {
/* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@ -573,7 +572,7 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn,
u32 pf_rl)
{
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
@ -692,7 +691,6 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}
/******************** INTERFACE IMPLEMENTATION *********************/
u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
@ -814,7 +812,7 @@ int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
u16 pf_wfq)
{
u32 inc_val;
inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
@ -832,7 +830,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
u32 pf_rl)
{
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration\n");
@ -912,7 +910,6 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
/* Go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
/* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
@ -959,7 +956,6 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
/* NIG: packet prioritry configuration constants */
#define NIG_PRIORITY_MAP_TC_BITS 4
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req* req,
@ -1036,7 +1032,6 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
/* Configure and enable global MAC+LB RL */
if (req->lb_mac_rate) {
/* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
@ -1054,7 +1049,6 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
/* Configure and enable global LB-only RL */
if (req->lb_rate) {
/* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_rate);
@ -1068,7 +1062,6 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
/* Per-TC RLs */
for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; tc++, reg_offset += 4) {
/* Disable TC RL */
ctrl = NIG_RL_BASE_TYPE << NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
@ -1123,7 +1116,6 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
#define PRS_ETS_MIN_WFQ_BYTES 1600
#define PRS_ETS_UP_BOUND(weight,mtu) (2 * ((weight) > (mtu) ? (weight) : (mtu)))
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req* req)
@ -1275,7 +1267,6 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
#endif /* UNUSED_HSI_FUNC */
#define SET_TUNNEL_TYPE_ENABLE_BIT(var,offset,enable) var = ((var) & ~(1 << (offset))) | ( (enable) ? (1 << (offset)) : 0)
#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
#define PRS_ETH_OUTPUT_FORMAT -46832
@ -1452,7 +1443,6 @@ void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id)
@ -1470,7 +1460,6 @@ void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE*pf_id + REG_SIZE, 0);
}
void ecore_set_gft_event_id_cm_hdr (struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@ -1589,7 +1578,6 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
#endif /* UNUSED_HSI_FUNC */
/* Configure VF zone size mode*/
@ -1843,5 +1831,3 @@ void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn * p_hwfn,
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
}

View File

@ -102,7 +102,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
}
}
@ -241,7 +240,6 @@ static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, addr, fill);
}
static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_write_op *cmd,

View File

@ -76,7 +76,6 @@ enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
*/
void ecore_init_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_init_clear_rt_data - Clears the runtime init array.
*
@ -121,7 +120,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
#define STORE_RT_REG_AGG(hwfn, offset, val) \
ecore_init_store_rt_agg(hwfn, offset, (u32*)&val, sizeof(val))
/**
* @brief
* Initialize GTT global windows and set admin window

View File

@ -497,7 +497,6 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
/* check if db_drop or overflow happened */
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
/* obtain data about db drop/overflow */
first_drop_reason = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_REASON) &
@ -1819,7 +1818,6 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
return ECORE_NOMEM;
}
/* Status Block setup */
p_hwfn->p_sp_sb = p_sb;
ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,

View File

@ -290,4 +290,3 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#endif /* __ECORE_INT_H__ */

View File

@ -790,7 +790,6 @@ static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn OSAL_UNUSED *p_hwfn, bool OSAL_UNUSED b_untagged_only, int OSAL_UNUSED vfid) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, u16 OSAL_UNUSED *opaque_fid) {}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn OSAL_UNUSED p_hwfn, u16 OSAL_UNUSED pvid, int OSAL_UNUSED vfid) {}
static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, struct ecore_ptt OSAL_UNUSED *p_ptt) {return ECORE_INVAL;}
static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
@ -806,7 +805,6 @@ static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn OSAL_
static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid, int OSAL_UNUSED val) { return ECORE_INVAL; }
static OSAL_INLINE enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid, struct ecore_eth_stats OSAL_UNUSED *p_stats) { return ECORE_INVAL; }
static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return OSAL_NULL;}

View File

@ -185,4 +185,3 @@ ecore_sp_iscsi_conn_clear_sq(struct ecore_hwfn *p_hwfn,
struct ecore_spq_comp_cb *p_comp_addr);
#endif /*__ECORE_ISCSI_H__*/

View File

@ -276,7 +276,6 @@ enum _ecore_status_t
ecore_iscsi_terminate_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_iscsi_update_connection - updates previously
* offloaded iSCSI connection

View File

@ -600,7 +600,6 @@ ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn,
fpdu->incomplete_bytes);
if (qp->iwarp_state != ECORE_IWARP_QP_STATE_ERROR) {
rc = ecore_iwarp_modify_qp(p_hwfn, qp,
ECORE_IWARP_QP_STATE_ERROR,
false);
@ -1724,7 +1723,6 @@ ecore_iwarp_resc_free(struct ecore_hwfn *p_hwfn)
ecore_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
}
enum _ecore_status_t
ecore_iwarp_accept(void *rdma_cxt,
struct ecore_iwarp_accept_in *iparams)
@ -1972,7 +1970,6 @@ ecore_iwarp_get_listener(struct ecore_hwfn *p_hwfn,
OSAL_LIST_FOR_EACH_ENTRY(listener,
&p_hwfn->p_rdma_info->iwarp.listen_list,
list_entry, struct ecore_iwarp_listener) {
if (listener->port == cm_info->local_port) {
/* Any IP (i.e. 0.0.0.0 ) will be treated as any vlan */
if (!OSAL_MEMCMP(listener->ip_addr,
@ -3026,7 +3023,6 @@ ecore_iwarp_ll2_stop(struct ecore_hwfn *p_hwfn)
int rc = 0;
if (iwarp_info->ll2_syn_handle != ECORE_IWARP_HANDLE_INVAL) {
rc = ecore_ll2_terminate_connection(p_hwfn,
iwarp_info->ll2_syn_handle);
if (rc)

View File

@ -808,7 +808,7 @@ enum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
val = p_params->update_anti_spoofing_en_flg;
p_ramrod->common.update_anti_spoofing_en_flg = val;
rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc != ECORE_SUCCESS) {
/* Return spq entry which is taken in ecore_sp_init_request()*/

View File

@ -31,7 +31,6 @@
#ifndef __ECORE_L2_H__
#define __ECORE_L2_H__
#include "ecore.h"
#include "ecore_hw.h"
#include "ecore_spq.h"

View File

@ -780,7 +780,6 @@ ecore_ll2_lb_txq_completion(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
while (num_bds) {
if (OSAL_LIST_IS_EMPTY(&p_tx->active_descq))
return ECORE_INVAL;
@ -1754,7 +1753,6 @@ static void ecore_ll2_tx_packet_notify(struct ecore_hwfn *p_hwfn,
p_ll2_conn->tx_queue.cur_send_packet->bd_used)
return;
/* Push the current packet to the list and clean after it */
OSAL_LIST_PUSH_TAIL(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
&p_ll2_conn->tx_queue.sending_descq);

View File

@ -2545,7 +2545,6 @@ enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
*p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
return ECORE_INVAL;
} else {
nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
MISC_REG_GEN_PURP_CR0);
nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_MCP_H__
#define __ECORE_MCP_H__

View File

@ -25,7 +25,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");

View File

@ -99,7 +99,6 @@ static inline void
ecore_ooo_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
#endif
void ecore_ooo_save_history_entry(struct ecore_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
@ -149,4 +148,3 @@ void ecore_ooo_dump_rx_event(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_buffer *p_buffer);
#endif /*__ECORE_OOO_H__*/

View File

@ -90,7 +90,6 @@ struct ecore_fcoe_pf_params {
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
struct ecore_iscsi_pf_params {
u64 glbl_q_params_addr;
u64 bdq_pbl_base_addr[3];
u16 cq_num_entries;
@ -172,5 +171,3 @@ struct ecore_pf_params {
};
#endif

View File

@ -506,7 +506,6 @@ void ecore_rdma_bmap_free(struct ecore_hwfn *p_hwfn,
bmap->bitmap = OSAL_NULL;
}
void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn)
{
if (IS_IWARP(p_hwfn))
@ -584,7 +583,6 @@ static void ecore_rdma_get_guid(struct ecore_hwfn *p_hwfn, u8 *guid)
guid[7] = mac_addr[5];
}
static void ecore_rdma_init_events(
struct ecore_hwfn *p_hwfn,
struct ecore_rdma_start_in_params *params)
@ -833,7 +831,6 @@ ecore_rdma_start_fw(struct ecore_hwfn *p_hwfn,
&p_ent->ramrod.iwarp_init_func);
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
} else {
#ifdef CONFIG_DCQCN
rc = ecore_roce_dcqcn_cfg(p_hwfn, &params->roce.dcqcn_params,
&p_ent->ramrod.roce_init_func, p_ptt);
@ -849,7 +846,6 @@ ecore_rdma_start_fw(struct ecore_hwfn *p_hwfn,
ll2_queue_id = ecore_ll2_handle_to_queue_id(
p_hwfn, params->roce.ll2_handle);
p_ent->ramrod.roce_init_func.roce.ll2_queue_id = ll2_queue_id;
}
pheader = &p_ramrod->params_header;
@ -974,7 +970,6 @@ static enum _ecore_status_t ecore_rdma_setup(struct ecore_hwfn *p_hwfn,
return ecore_rdma_start_fw(p_hwfn, p_ptt, params);
}
enum _ecore_status_t ecore_rdma_stop(void *rdma_cxt)
{
struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)rdma_cxt;
@ -1454,7 +1449,6 @@ void ecore_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_ecore_mac)
p_fw_mac[2] = OSAL_CPU_TO_LE16((p_ecore_mac[4] << 8) + p_ecore_mac[5]);
}
enum _ecore_status_t ecore_rdma_query_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp,
struct ecore_rdma_query_qp_out_params *out_params)
@ -1496,7 +1490,6 @@ enum _ecore_status_t ecore_rdma_query_qp(void *rdma_cxt,
return rc;
}
enum _ecore_status_t ecore_rdma_destroy_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp,
struct ecore_rdma_destroy_qp_out_params *out_params)
@ -1524,7 +1517,6 @@ enum _ecore_status_t ecore_rdma_destroy_qp(void *rdma_cxt,
return rc;
}
struct ecore_rdma_qp *ecore_rdma_create_qp(void *rdma_cxt,
struct ecore_rdma_create_qp_in_params *in_params,
struct ecore_rdma_create_qp_out_params *out_params)

View File

@ -276,4 +276,3 @@ ecore_rdma_allocated_qps(struct ecore_hwfn *p_hwfn);
u16 ecore_rdma_get_fw_srq_id(struct ecore_hwfn *p_hwfn, u16 id, bool is_xrc);
#endif /*__ECORE_RDMA_H__*/

View File

@ -86,8 +86,6 @@ ecore_roce_async_event(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
#ifdef CONFIG_DCQCN
static enum _ecore_status_t ecore_roce_start_rl(
struct ecore_hwfn *p_hwfn,
@ -194,7 +192,6 @@ enum _ecore_status_t ecore_roce_dcqcn_cfg(
}
#endif
enum _ecore_status_t ecore_roce_stop(struct ecore_hwfn *p_hwfn)
{
struct ecore_bmap *cid_map = &p_hwfn->p_rdma_info->cid_map;
@ -220,7 +217,6 @@ enum _ecore_status_t ecore_roce_stop(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
static void ecore_rdma_copy_gids(struct ecore_rdma_qp *qp, __le32 *src_gid,
__le32 *dst_gid) {
u32 i;
@ -1333,7 +1329,6 @@ enum _ecore_status_t ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid)
return rc;
}
enum _ecore_status_t ecore_roce_create_ud_qp(void *rdma_cxt,
struct ecore_rdma_create_qp_out_params *out_params)
{
@ -1387,7 +1382,6 @@ enum _ecore_status_t ecore_roce_create_ud_qp(void *rdma_cxt,
return rc;
}
enum _ecore_status_t
ecore_roce_modify_qp(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_qp *qp,
@ -1500,7 +1494,6 @@ ecore_roce_modify_qp(struct ecore_hwfn *p_hwfn,
&num_bound_mw,
&qp->cq_prod.req);
if (rc != ECORE_SUCCESS)
return rc;

View File

@ -35,7 +35,6 @@
#define ETH_ALEN 6
#endif
enum ecore_roce_ll2_tx_dest
{
ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
@ -326,7 +325,6 @@ struct ecore_rdma_create_cq_in_params {
u16 int_timeout;
};
struct ecore_rdma_resize_cq_in_params {
/* input variables (given by miniport) */
@ -340,7 +338,6 @@ struct ecore_rdma_resize_cq_in_params {
*/
};
enum roce_mode
{
ROCE_V1,
@ -711,7 +708,6 @@ void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
/* iWARP API */
enum ecore_iwarp_event_type {
ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
@ -834,7 +830,6 @@ struct ecore_iwarp_tcp_abort_in {
void *ep_context;
};
enum _ecore_status_t
ecore_iwarp_connect(void *rdma_cxt,
struct ecore_iwarp_connect_in *iparams,

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __RT_DEFS_H__
#define __RT_DEFS_H__

View File

@ -51,7 +51,6 @@ struct ecore_spq_comp_cb {
void *cookie;
};
/**
* @brief ecore_eth_cqe_completion - handles the completion of a
* ramrod on the cqe ring

View File

@ -512,7 +512,6 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/* QM rate limiter resolution is 1.6Mbps */
#define QM_RL_RESOLUTION(mb_val) ((mb_val) * 10 / 16)

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_SP_COMMANDS_H__
#define __ECORE_SP_COMMANDS_H__

View File

@ -30,7 +30,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
@ -735,7 +734,6 @@ enum _ecore_status_t ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
OSAL_SPIN_LOCK(&p_spq->lock);
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
if (!p_ent) {
DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
@ -795,7 +793,6 @@ static enum _ecore_status_t ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
if (p_ent->queue == &p_spq->unlimited_pending) {
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
OSAL_LIST_PUSH_TAIL(&p_ent->list,
&p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
@ -1043,7 +1040,6 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
&p_spq->completion_pending,
list,
struct ecore_spq_entry) {
if (p_ent->elem.hdr.echo == echo) {
OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
&p_spq->completion_pending);

View File

@ -25,7 +25,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");

View File

@ -52,4 +52,3 @@ enum _ecore_status_t {
};
#endif /* __ECORE_STATUS_H__ */

View File

@ -58,7 +58,6 @@
#define ntohs(val) OSAL_BE16_TO_CPU(val)
#endif
struct ecore_ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */

View File

@ -25,7 +25,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
@ -581,7 +580,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
(u8 *)p_hwfn->p_dev->doorbells;
}
#endif
}
/* Allocate vf2pf msg */

View File

@ -344,7 +344,6 @@ u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id);
static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid, u16 OSAL_UNUSED bd_max_bytes, dma_addr_t OSAL_UNUSED bd_chain_phys_addr, dma_addr_t OSAL_UNUSED cqe_pbl_addr, u16 OSAL_UNUSED cqe_pbl_size, void OSAL_IOMEM OSAL_UNUSED **pp_prod) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid, dma_addr_t OSAL_UNUSED pbl_addr, u16 OSAL_UNUSED pbl_size, void OSAL_IOMEM OSAL_UNUSED **pp_doorbell) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid, bool OSAL_UNUSED cqe_completion) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid) {return ECORE_INVAL;}
#ifndef LINUX_REMOVE
@ -355,7 +354,6 @@ static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn OSAL
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED sb_id) {return 0;}
static OSAL_INLINE void ecore_vf_set_sb_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED sb_id, struct ecore_sb_info OSAL_UNUSED *p_sb) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED vport_id, u16 OSAL_UNUSED mtu, u8 OSAL_UNUSED inner_vlan_removal, enum ecore_tpa_mode OSAL_UNUSED tpa_mode, u8 OSAL_UNUSED max_buffers_per_cqe, u8 OSAL_UNUSED only_untagged, u8 OSAL_UNUSED zero_placement_offset) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_filter_ucast OSAL_UNUSED *p_param) {return ECORE_INVAL;}

View File

@ -215,7 +215,6 @@ ecore_vf_get_num_cids(struct ecore_hwfn OSAL_UNUSED *p_hwfn,
u8 OSAL_UNUSED *num_cids)
{
}
static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *port_mac) {}
static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_vlan_filters) {}
static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_mac_filters) {}

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_VF_PF_IF_H__
#define __ECORE_VF_PF_IF_H__

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ETH_COMMON__
#define __ETH_COMMON__
/********************/
@ -104,8 +103,6 @@
/* GFS constants */
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/*
* Destination port mode
*/
@ -118,7 +115,6 @@ enum dest_port_mode
MAX_DEST_PORT_MODE
};
/*
* Ethernet address type
*/
@ -131,7 +127,6 @@ enum eth_addr_type
MAX_ETH_ADDR_TYPE
};
struct eth_tx_1st_bd_flags
{
u8 bitfields;
@ -214,7 +209,6 @@ struct eth_edpm_fw_data
__le32 reserved;
};
/*
* tunneling parsing flags
*/
@ -276,7 +270,6 @@ struct eth_fast_path_rx_reg_cqe
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* TPA-continue ETH Rx FP CQE.
*/
@ -292,7 +285,6 @@ struct eth_fast_path_rx_tpa_cont_cqe
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* TPA-end ETH Rx FP CQE .
*/
@ -312,7 +304,6 @@ struct eth_fast_path_rx_tpa_end_cqe
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* TPA-start ETH Rx FP CQE.
*/
@ -341,7 +332,6 @@ struct eth_fast_path_rx_tpa_start_cqe
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* The L4 pseudo checksum mode for Ethernet
*/
@ -352,14 +342,11 @@ enum eth_l4_pseudo_checksum_mode
MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
};
struct eth_rx_bd
{
struct regpair addr /* single continues buffer */;
};
/*
* regular ETH Rx SP CQE
*/
@ -386,7 +373,6 @@ union eth_rx_cqe
struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
};
/*
* ETH Rx CQE type
*/
@ -401,7 +387,6 @@ enum eth_rx_cqe_type
MAX_ETH_RX_CQE_TYPE
};
/*
* Wrapper for PD RX CQE - used in order to cover full cache line when writing CQE
*/
@ -411,7 +396,6 @@ struct eth_rx_pmd_cqe
u8 reserved[ETH_RX_CQE_GAP];
};
/*
* Eth RX Tunnel Type
*/
@ -424,8 +408,6 @@ enum eth_rx_tunn_type
MAX_ETH_RX_TUNN_TYPE
};
/*
* Aggregation end reason.
*/
@ -442,8 +424,6 @@ enum eth_tpa_end_reason
MAX_ETH_TPA_END_REASON
};
/*
* The first tx bd of a given packet
*/
@ -454,8 +434,6 @@ struct eth_tx_1st_bd
struct eth_tx_data_1st_bd data /* Parsing information data. */;
};
/*
* The second tx bd of a given packet
*/
@ -466,7 +444,6 @@ struct eth_tx_2nd_bd
struct eth_tx_data_2nd_bd data /* Parsing information data. */;
};
/*
* The parsing information data for the third tx bd of a given packet.
*/
@ -496,7 +473,6 @@ struct eth_tx_3rd_bd
struct eth_tx_data_3rd_bd data /* Parsing information data. */;
};
/*
* Complementary information for the regular tx bd of a given packet.
*/
@ -523,7 +499,6 @@ struct eth_tx_bd
struct eth_tx_data_bd data /* Complementary information. */;
};
union eth_tx_bd_types
{
struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
@ -532,11 +507,6 @@ union eth_tx_bd_types
struct eth_tx_bd reg_bd /* The common non-special bd */;
};
/*
* Eth Tx Tunnel Type
*/
@ -549,7 +519,6 @@ enum eth_tx_tunn_type
MAX_ETH_TX_TUNN_TYPE
};
/*
* Ystorm Queue Zone
*/
@ -559,7 +528,6 @@ struct xstorm_eth_queue_zone
u8 reserved[7];
};
/*
* ETH doorbell data
*/
@ -580,7 +548,6 @@ struct eth_db_data
__le16 bd_prod;
};
/*
* RSS hash type
*/

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __FCOE_COMMON__
#define __FCOE_COMMON__
/*********************/
@ -37,10 +36,6 @@
#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12
/*
* The fcoe storm task context protection-information of Ystorm
*/
@ -612,7 +607,6 @@ struct e4_fcoe_task_context
struct rdif_task_context rdif_context /* rdif context */;
};
struct e5_ystorm_fcoe_task_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -930,8 +924,6 @@ struct e5_fcoe_task_context
struct rdif_task_context rdif_context /* rdif context */;
};
/*
* FCoE additional WQE (Sq/ XferQ) information
*/
@ -943,8 +935,6 @@ union fcoe_additional_info_union
__le32 seq_rec_updated_offset /* The updated offset in SGL - Used in sequence recovery */;
};
/*
* FCoE Ramrod Command IDs
*/
@ -956,7 +946,6 @@ enum fcoe_completion_status
MAX_FCOE_COMPLETION_STATUS
};
/*
* FC address (SID/DID) network presentation
*/
@ -1022,7 +1011,6 @@ struct fcoe_conn_offload_ramrod_data
u8 reserved[5];
};
/*
* FCoE terminate connection request
*/
@ -1031,7 +1019,6 @@ struct fcoe_conn_terminate_ramrod_data
struct regpair terminate_params_addr /* Terminate params ptr */;
};
/*
* FCoE device type
*/
@ -1042,9 +1029,6 @@ enum fcoe_device_type
MAX_FCOE_DEVICE_TYPE
};
/*
* Data sgl
*/
@ -1056,10 +1040,6 @@ struct fcoe_fast_sgl_ctx
__le16 init_offset_in_first_sge /* offset from the beginning of the first page in the SGL, never changed by FW */;
};
/*
* FCoE firmware function init
*/
@ -1072,7 +1052,6 @@ struct fcoe_init_func_ramrod_data
__le32 reserved[3];
};
/*
* FCoE: Mode of the connection: Target or Initiator or both
*/
@ -1084,7 +1063,6 @@ enum fcoe_mode_type
MAX_FCOE_MODE_TYPE
};
/*
* Per PF FCoE receive path statistics - tStorm RAM structure
*/
@ -1102,8 +1080,6 @@ struct fcoe_rx_stat
__le32 rsrv;
};
/*
* FCoE SQE request type
*/
@ -1124,7 +1100,6 @@ enum fcoe_sqe_request_type
MAX_FCOE_SQE_REQUEST_TYPE
};
/*
* FCoe statistics request
*/
@ -1133,7 +1108,6 @@ struct fcoe_stat_ramrod_data
struct regpair stat_params_addr /* Statistics host address */;
};
/*
* FCoE task type
*/
@ -1155,13 +1129,6 @@ enum fcoe_task_type
MAX_FCOE_TASK_TYPE
};
/*
* Per PF FCoE transmit path statistics - pStorm RAM structure
*/
@ -1173,7 +1140,6 @@ struct fcoe_tx_stat
struct regpair fcoe_tx_other_pkt_cnt /* Transmitted FCoE packets which are not DATA/XFER_RDY count */;
};
/*
* FCoE SQ/XferQ element
*/
@ -1198,14 +1164,6 @@ struct fcoe_wqe
union fcoe_additional_info_union additional_info_union /* Additional wqe information (if needed) */;
};
/*
* FCoE XFRQ element
*/
@ -1222,16 +1180,6 @@ struct xfrqe_prot_flags
#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
};
/*
* FCoE doorbell data
*/

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ISCSI_COMMON__
#define __ISCSI_COMMON__
/**********************/
@ -73,7 +72,6 @@
#define ISCSI_INITIATOR_MODE 0
#define ISCSI_TARGET_MODE 1
/* iSCSI request op codes */
#define ISCSI_OPCODE_NOP_OUT (0)
#define ISCSI_OPCODE_SCSI_CMD (1)
@ -105,7 +103,6 @@
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
/*
* Union of data bd_opaque/ tq_tid
*/
@ -115,7 +112,6 @@ union bd_opaque_tq_union
__le16 tq_tid /* Immediate Data with DIF TQe TID */;
};
/*
* ISCSI SGL entry
*/
@ -136,14 +132,12 @@ struct cqe_error_bitmap
#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7
};
union cqe_error_status
{
u8 error_status /* all error bits as uint8 */;
struct cqe_error_bitmap error_bits /* cqe errors bitmap */;
};
/*
* iSCSI Login Response PDU header
*/
@ -152,7 +146,6 @@ struct data_hdr
__le32 data[12] /* iscsi header data */;
};
struct lun_mapper_addr_reserved
{
struct regpair lun_mapper_addr /* Lun mapper address */;
@ -219,8 +212,6 @@ union dif_configuration_params
struct dif_on_immediate_params def_dif_conf /* default dif on immediate rdif configuration */;
};
/*
* Union of data/r2t sequence number
*/
@ -1101,7 +1092,6 @@ struct e4_iscsi_task_context
struct rdif_task_context rdif_context /* rdif context */;
};
struct e5_ystorm_iscsi_task_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1328,10 +1318,6 @@ struct e5_iscsi_task_context
struct rdif_task_context rdif_context /* rdif context */;
};
/*
* ISCSI connection offload params passed by driver to FW in ISCSI offload ramrod
*/
@ -1359,7 +1345,6 @@ struct iscsi_conn_offload_params
__le32 stat_sn /* StatSn for Target Mode only: the first Login Response StatSn value for Target mode */;
};
/*
* iSCSI connection statistics
*/
@ -1375,7 +1360,6 @@ struct iscsi_conn_stats_params
__le32 reserved;
};
/*
* spe message header
*/
@ -1425,7 +1409,6 @@ struct iscsi_conn_update_ramrod_params
union dif_configuration_params dif_on_imme_params /* dif on immmediate params - Target mode Only */;
};
/*
* iSCSI CQ element
*/
@ -1481,7 +1464,6 @@ union iscsi_cqe
struct iscsi_cqe_unsolicited cqe_unsolicited /* Unsolicited CQE. relevant only when cqe_opcode == ISCSI_CQE_TYPE_UNSOLICITED */;
};
/*
* iSCSI CQE type
*/
@ -1495,10 +1477,6 @@ enum iscsi_cqes_type
MAX_ISCSI_CQES_TYPE
};
/*
* iSCSI CQE type
*/
@ -1512,9 +1490,6 @@ enum iscsi_cqe_unsolicited_type
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
/*
* iscsi debug modes
*/
@ -1539,8 +1514,6 @@ struct iscsi_debug_modes
#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
};
/*
* iSCSI kernel completion queue IDs
*/
@ -1569,7 +1542,6 @@ enum iscsi_eqe_opcode
MAX_ISCSI_EQE_OPCODE
};
/*
* iSCSI EQE and CQE completion status
*/
@ -1625,15 +1597,6 @@ enum iscsi_error_types
MAX_ISCSI_ERROR_TYPES
};
/*
* iSCSI Ramrod Command IDs
*/
@ -1651,12 +1614,6 @@ enum iscsi_ramrod_cmd_id
MAX_ISCSI_RAMROD_CMD_ID
};
/*
* ISCSI connection termination request
*/
@ -1671,7 +1628,6 @@ struct iscsi_spe_conn_mac_update
u8 reserved0[2];
};
/*
* ISCSI and TCP connection(Option 1) offload params passed by driver to FW in ISCSI offload ramrod
*/
@ -1684,7 +1640,6 @@ struct iscsi_spe_conn_offload
struct tcp_offload_params tcp /* iSCSI session offload params */;
};
/*
* ISCSI and TCP connection(Option 2) offload params passed by driver to FW in ISCSI offload ramrod
*/
@ -1697,7 +1652,6 @@ struct iscsi_spe_conn_offload_option2
struct tcp_offload_params_opt2 tcp /* iSCSI session offload params */;
};
/*
* ISCSI collect connection statistics request
*/
@ -1711,7 +1665,6 @@ struct iscsi_spe_conn_statistics
struct regpair stats_cnts_addr /* cmdq and unsolicited counters termination params */;
};
/*
* ISCSI connection termination request
*/
@ -1726,7 +1679,6 @@ struct iscsi_spe_conn_termination
struct regpair query_params_addr /* query_params_ptr */;
};
/*
* iSCSI firmware function destroy parameters
*/
@ -1737,7 +1689,6 @@ struct iscsi_spe_func_dstry
__le32 reserved1;
};
/*
* iSCSI firmware function init parameters
*/
@ -1761,8 +1712,6 @@ struct iscsi_spe_func_init
struct scsi_init_func_queues q_params /* SCSI RQ/CQ firmware function init parameters */;
};
/*
* iSCSI task type
*/
@ -1782,11 +1731,6 @@ enum iscsi_task_type
MAX_ISCSI_TASK_TYPE
};
/*
* iSCSI DesiredDataTransferLength/ttt union
*/
@ -1796,7 +1740,6 @@ union iscsi_ttt_txlen_union
__le32 ttt /* target transfer tag */;
};
/*
* iSCSI uHQ element
*/
@ -1822,7 +1765,6 @@ struct iscsi_uhqe
#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
/*
* iSCSI WQ element
*/
@ -1844,7 +1786,6 @@ struct iscsi_wqe
#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
/*
* iSCSI wqe type
*/
@ -1860,7 +1801,6 @@ enum iscsi_wqe_type
MAX_ISCSI_WQE_TYPE
};
/*
* iSCSI xHQ element
*/
@ -1884,8 +1824,6 @@ struct iscsi_xhqe
__le16 reserved1;
};
/*
* Per PF iSCSI receive path statistics - mStorm RAM structure
*/
@ -1895,8 +1833,6 @@ struct mstorm_iscsi_stats_drv
struct regpair iscsi_rx_dup_ack_cnt /* Received Dup-ACKs - after 3 dup ack, the counter doesnt count the same dup ack */;
};
/*
* Per PF iSCSI transmit path statistics - pStorm RAM structure
*/
@ -1906,8 +1842,6 @@ struct pstorm_iscsi_stats_drv
struct regpair iscsi_tx_packet_cnt /* Counts the number of tx packets that were transmitted */;
};
/*
* Per PF iSCSI receive path statistics - tStorm RAM structure
*/
@ -1924,7 +1858,6 @@ struct tstorm_iscsi_stats_drv
__le32 iscsi_immq_threshold_cnt /* Counts the number of times elements in immQ reached threshold */;
};
/*
* Per PF iSCSI receive path statistics - uStorm RAM structure
*/
@ -1935,8 +1868,6 @@ struct ustorm_iscsi_stats_drv
struct regpair iscsi_rx_total_pdu_cnt /* Number of total PDUs that were received */;
};
/*
* Per PF iSCSI transmit path statistics - xStorm RAM structure
*/
@ -1948,7 +1879,6 @@ struct xstorm_iscsi_stats_drv
struct regpair iscsi_tx_delayed_ack_cnt /* Transmitted Delayed ACKs */;
};
/*
* Per PF iSCSI transmit path statistics - yStorm RAM structure
*/
@ -1961,11 +1891,6 @@ struct ystorm_iscsi_stats_drv
struct regpair iscsi_tx_tcp_pkt_cnt /* Transmitted In-Order TCP Packets */;
};
struct e4_tstorm_iscsi_task_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2046,10 +1971,6 @@ struct e4_tstorm_iscsi_task_ag_ctx
__le32 reg2 /* reg2 */;
};
struct e5_tstorm_iscsi_task_ag_ctx
{
u8 byte0 /* cdu_validation */;
@ -2130,9 +2051,6 @@ struct e5_tstorm_iscsi_task_ag_ctx
__le32 reg2 /* reg2 */;
};
/*
* iSCSI doorbell data
*/

View File

@ -172,7 +172,6 @@ struct private_path {
u64 ecc_events;
};
/**************************************/
/* */
/* P R I V A T E P O R T */
@ -478,7 +477,6 @@ struct private_func {
struct dci_info_func dci_func;
};
/**************************************/
/* */
/* P R I V A T E D A T A */

View File

@ -108,7 +108,6 @@ struct eth_phy_cfg {
};
struct port_mf_cfg {
u32 dynamic_cfg; /* device control channel */
#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
#define PORT_MF_CFG_OV_TAG_OFFSET 0
@ -351,7 +350,6 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_PROTOCOL_ID_OFFSET 16
};
/* FW structure in BE */
struct dcbx_app_priority_feature {
u32 flags;
@ -853,7 +851,6 @@ struct public_port {
/**************************************/
struct public_func {
u32 iscsi_boot_signature;
u32 iscsi_boot_block_offset;
@ -887,7 +884,6 @@ struct public_func {
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_OFFSET 0x00000001
#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
#define FUNC_MF_CFG_PROTOCOL_OFFSET 4
#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
@ -1240,7 +1236,6 @@ union drv_union_data {
};
struct public_drv_mb {
u32 drv_mb_header;
#define DRV_MSG_CODE_MASK 0xffff0000
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
@ -1735,7 +1730,6 @@ struct public_drv_mb {
#define FW_MSG_CODE_WOL_READ_BUFFER_OK 0x00850000
#define FW_MSG_CODE_WOL_READ_BUFFER_INVALID_VAL 0x00860000
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_BAD_ASIC 0x00880000

View File

@ -62,5 +62,3 @@ struct mcp_trace {
};
#endif /* MFW_HSI_H */

View File

@ -28,7 +28,6 @@
*
*/
/****************************************************************************
*
* Name: nvm_cfg.h
@ -43,7 +42,6 @@
#ifndef NVM_CFG_H
#define NVM_CFG_H
#define NVM_CFG_version 0x83306
#define NVM_CFG_new_option_seq 26

View File

@ -28,7 +28,6 @@
*
*/
/****************************************************************************
* Name: nvm_map.h
*
@ -293,7 +292,6 @@ struct nvm_image {
#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image*)0)->f))))
struct hw_set_info {
u32 reg_type;
#define GRC_REG_TYPE 1

View File

@ -28,7 +28,6 @@
*
*/
#ifndef _PCICS_REG_DRIVER_H
#define _PCICS_REG_DRIVER_H

View File

@ -28,8 +28,6 @@
*
*/
/*
* File: qlnx_def.h
* Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
@ -70,7 +68,6 @@ enum QLNX_STATE {
#define BITS_PER_BYTE 8
#endif /* #ifndef BITS_PER_BYTE */
/*
* RX ring buffer contains pointer to kmalloc() data only,
*/
@ -128,13 +125,11 @@ struct qlnx_rx_queue {
uint16_t num_rx_buffers;
uint16_t rxq_id;
#ifdef QLNX_SOFT_LRO
struct lro_ctrl lro;
#endif
};
union db_prod {
struct eth_db_data data;
uint32_t raw;
@ -152,7 +147,6 @@ struct sw_tx_bd {
#define QLNX_MAX_SEGMENTS 255
struct qlnx_tx_queue {
int index; /* Queue index */
volatile __le16 *hw_cons_ptr;
struct sw_tx_bd sw_tx_ring[TX_RING_SIZE];
@ -179,7 +173,6 @@ struct qlnx_tx_queue {
(bd)->nbytes = htole16(len); \
} while (0);
#define QLNX_FP_MAX_SEGS 24
struct qlnx_fastpath {
@ -299,7 +292,6 @@ typedef struct qlnx_link_output qlnx_link_output_t;
#define QLNX_LINK_CAP_50000baseKR2_Full 0x0200
#define QLNX_LINK_CAP_100000baseKR4_Full 0x0400
/* Functions definition */
#define XMIT_PLAIN 0
@ -327,7 +319,6 @@ typedef struct qlnx_link_output qlnx_link_output_t;
#define QLNX_TX_ELEM_MIN_THRESH 32
#define QLNX_TX_COMPL_THRESH 32
#define QLNX_TPA_MAX_AGG_BUFFERS (20)
#define QLNX_MAX_NUM_MULTICAST_ADDRS ECORE_MAX_MC_ADDRS
@ -342,7 +333,6 @@ typedef struct _qlnx_vf_attr {
} qlnx_vf_attr_t;
typedef struct _qlnx_sriov_task {
struct task pf_task;
struct taskqueue *pf_taskqueue;
@ -353,13 +343,11 @@ typedef struct _qlnx_sriov_task {
} qlnx_sriov_task_t;
/*
* Adapter structure contains the hardware independent information of the
* pci function.
*/
struct qlnx_host {
/* interface to ecore */
struct ecore_dev cdev;
@ -426,7 +414,6 @@ struct qlnx_host {
bus_dma_tag_t tx_tag;
bus_dma_tag_t rx_tag;
struct ecore_sb_info sb_array[QLNX_MAX_RSS];
struct qlnx_rx_queue rxq_array[QLNX_MAX_RSS];
struct qlnx_tx_queue txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
@ -456,7 +443,6 @@ struct qlnx_host {
#define QLNX_MAX_TSS_CNT(ha) ((ha->num_rss) * (ha->num_tc))
qlnx_ivec_t irq_vec[QLNX_MAX_RSS];
uint8_t filter;
uint32_t nmcast;
@ -468,7 +454,7 @@ struct qlnx_host {
struct ecore_rss_params rss_params;
uint32_t rx_buf_size;
bool rx_csum_offload;
uint32_t rx_coalesce_usecs;
uint32_t tx_coalesce_usecs;
@ -480,7 +466,7 @@ struct qlnx_host {
uint64_t err_illegal_intr;
uint64_t err_fp_null;
uint64_t err_get_proto_invalid_type;
/* error recovery related */
uint32_t error_recovery;
struct task err_task;
@ -535,7 +521,6 @@ typedef struct qlnx_host qlnx_host_t;
#define QLNX_MAX_TSO_FRAME_SIZE 65536
#define QLNX_MAX_TX_MBUF_SIZE 65536 /* bytes - bd_len = 16bits */
#define QL_MAC_CMP(mac1, mac2) \
((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
(*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
@ -667,7 +652,6 @@ typedef struct qlnx_host qlnx_host_t;
} \
} while (0)
#else
#define QL_DPRINT1(ha, x, ...)
@ -692,7 +676,6 @@ typedef struct qlnx_host qlnx_host_t;
#define QL_ERR_INJCT_TX_INT_DIFF 0x0001
#define QL_ERR_INJCT_TX_INT_MBUF_NULL 0x0002
/*
* exported functions
*/
@ -711,7 +694,6 @@ extern void qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info);
extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
u16 sb_id);
/*
* Some OS specific stuff
*/
@ -733,7 +715,6 @@ extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
#define QLNX_IFM_25G_CR IFM_UNKNOWN
#endif /* #if (defined IFM_25G_SR) */
#if __FreeBSD_version < 1100000
#define QLNX_INC_IERRORS(ifp) ifp->if_ierrors++
@ -787,5 +768,4 @@ void prefetch(void *x)
#endif
#endif
#endif /* #ifndef _QLNX_DEF_H_ */

View File

@ -25,7 +25,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: qlnx_ioctl.c
* Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
@ -66,7 +65,6 @@ __FBSDID("$FreeBSD$");
#include "qlnx_ver.h"
#include <sys/smp.h>
static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td);
@ -164,7 +162,6 @@ qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
grcdump->pci_func = ha->pci_func;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
(grcdump->grcdump_size[i] < ha->grcdump_size[i]))
return (EINVAL);
@ -253,7 +250,6 @@ qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
idle_chk->pci_func = ha->pci_func;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if ((ha->idle_chk[i] == NULL) ||
(idle_chk->idle_chk[i] == NULL) ||
(idle_chk->idle_chk_size[i] <
@ -300,7 +296,6 @@ qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
}
switch (cmd) {
case QLNX_MCP_TRACE:
rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
p_ptt, &num_dwords);
@ -378,7 +373,6 @@ qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
}
switch (trace->cmd) {
case QLNX_MCP_TRACE:
rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
buffer, (trace->size[hwfn_index] >> 2),
@ -440,7 +434,6 @@ qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
switch (reg_rd_wr->cmd) {
case QLNX_REG_READ_CMD:
if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
@ -469,7 +462,6 @@ qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
int rval = 0;
switch (pci_cfg_rd_wr->cmd) {
case QLNX_PCICFG_READ:
pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
pci_cfg_rd_wr->reg,
@ -513,7 +505,6 @@ qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
outb = regs->reg_buf;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
rval = qlnx_grc_dump(ha, &dwords, i);
if (rval)
@ -656,7 +647,6 @@ qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
buf = qlnx_zalloc(nvram->data_len);
ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
QL_DPRINT9(ha, "data = %p data_len = 0x%x \
@ -678,7 +668,6 @@ qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
int ret = 0;
switch (nvram->cmd) {
case QLNX_NVRAM_CMD_WRITE_NVRAM:
ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
break;
@ -743,11 +732,9 @@ qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
s_stats->num_samples = ha->storm_stats_index;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
if (s_stats->buffer[i]) {
ret = copyout(&ha->storm_stats[index],
s_stats->buffer[i],
QLNX_STORM_STATS_BYTES_PER_HWFN);
@ -757,7 +744,6 @@ qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
if (s_stats->num_samples ==
QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
bzero((void *)&ha->storm_stats[i],
QLNX_STORM_STATS_BYTES_PER_HWFN);
@ -812,7 +798,6 @@ qlnx_lldp_configure(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn,
(lldp_mac[3] << 16) | (lldp_mac[4] << 24);
lldp_params.chassis_id_tlv[2] = lldp_mac[5];
lldp_params.port_id_tlv[0] = 0;
lldp_params.port_id_tlv[0] |= (QLNX_LLDP_TYPE_PORT_ID << 1);
lldp_params.port_id_tlv[0] |=
@ -987,7 +972,7 @@ qlnx_set_lldp_tlvx(qlnx_host_t *ha, qlnx_lldp_sys_tlvs_t *lldp_tlvs)
memcpy(tlv_params.buf, lldp_tlvs->buf, lldp_tlvs->buf_size);
ret = ecore_lldp_set_system_tlvs(p_hwfn, p_ptt, &tlv_params);
if (ret) {
device_printf(ha->pci_dev,
"%s: ecore_lldp_set_system_tlvs failed\n",
@ -1018,7 +1003,6 @@ qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
ifp = ha->ifp;
switch (cmd) {
case QLNX_GRC_DUMP_SIZE:
qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
break;
@ -1075,7 +1059,6 @@ qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
trace = (qlnx_trace_t *)data;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (trace->size[i] && trace->cmd && trace->buffer[i])
rval = qlnx_get_trace(ha, i, trace);
@ -1097,4 +1080,3 @@ qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
return (rval);
}

View File

@ -77,7 +77,6 @@ struct qlnx_trace {
};
typedef struct qlnx_trace qlnx_trace_t;
/*
* Read driver info
*/
@ -181,12 +180,10 @@ struct qlnx_perm_mac_addr {
};
typedef struct qlnx_perm_mac_addr qlnx_perm_mac_addr_t;
/*
* Read STORM statistics registers
*/
struct qlnx_storm_stats {
/* xstorm */
uint32_t xstorm_active_cycles;
uint32_t xstorm_stall_cycles;
@ -268,7 +265,6 @@ struct qlnx_lldp_sys_tlvs {
};
typedef struct qlnx_lldp_sys_tlvs qlnx_lldp_sys_tlvs_t;
/*
* Read grcdump size
*/

File diff suppressed because it is too large Load Diff

View File

@ -138,7 +138,7 @@ MALLOC_DECLARE(M_QLNXBUF);
else \
pause(fn, qlnx_ms_to_hz(msecs)); \
}
/*
* Locks
*/
@ -158,5 +158,4 @@ struct qlnx_dma {
};
typedef struct qlnx_dma qlnx_dma_t;
#endif /* #ifndef _QLNX_OS_H_ */

View File

@ -32,7 +32,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "qlnx_os.h"
#include "bcm_osal.h"
@ -154,7 +153,7 @@ qlnx_rdma_dev_add(struct qlnx_host *ha)
mtx_unlock(&qlnx_rdma_dev_lock);
_qlnx_rdma_dev_add(ha);
QL_DPRINT12(ha, "exit (%p)\n", ha);
return;
@ -235,7 +234,6 @@ qlnx_rdma_register_if(qlnx_rdma_if_t *rdma_if)
qlnx_host_t *ha;
if (mtx_initialized(&qlnx_rdma_dev_lock)) {
mtx_lock(&qlnx_rdma_dev_lock);
qlnx_rdma_if = rdma_if;
@ -263,13 +261,11 @@ qlnx_rdma_deregister_if(qlnx_rdma_if_t *rdma_if)
printf("%s: enter rdma_if = %p\n", __func__, rdma_if);
if (mtx_initialized(&qlnx_rdma_dev_lock)) {
mtx_lock(&qlnx_rdma_dev_lock);
ha = qlnx_host_list;
while (ha != NULL) {
mtx_unlock(&qlnx_rdma_dev_lock);
if (ha->dbg_level & 0xF000)
@ -293,14 +289,12 @@ qlnx_rdma_deregister_if(qlnx_rdma_if_t *rdma_if)
qlnx_rdma_if = NULL;
mtx_unlock(&qlnx_rdma_dev_lock);
}
printf("%s: exit rdma_if = %p\n", __func__, rdma_if);
return (ret);
}
void
qlnx_rdma_dev_open(struct qlnx_host *ha)
{
@ -319,7 +313,6 @@ qlnx_rdma_dev_open(struct qlnx_host *ha)
return;
}
void
qlnx_rdma_dev_close(struct qlnx_host *ha)
{
@ -343,5 +336,3 @@ qlnx_rdma_get_num_irqs(struct qlnx_host *ha)
{
return (QLNX_NUM_CNQ + ecore_rdma_get_sb_id(&ha->cdev.hwfns[0], 0) + 2);
}

View File

@ -27,7 +27,6 @@
* $FreeBSD$
*/
/*
* File: qlnx_rdma.h
* Author: David C Somayajulu

View File

@ -28,7 +28,6 @@
*
*/
/*
* File: qlnx_ver.h
* Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
@ -40,4 +39,3 @@
#define QLNX_VERSION_MAJOR 2
#define QLNX_VERSION_MINOR 0
#define QLNX_VERSION_BUILD 112

View File

@ -57,14 +57,12 @@
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
struct rdma_srq_id
{
__le16 srq_idx /* SRQ index */;
__le16 opaque_fid;
};
struct rdma_srq_producers
{
__le32 sge_prod /* Current produced sge in SRQ */;
@ -79,7 +77,6 @@ struct rdma_cnqe
struct regpair cq_handle;
};
struct rdma_cqe_responder
{
struct regpair srq_wr_id;
@ -148,9 +145,6 @@ union rdma_cqe
struct rdma_cqe_common cmn;
};
/*
* CQE requester status enumeration
*/
@ -171,8 +165,6 @@ enum rdma_cqe_requester_status_enum
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
/*
* CQE responder status enumeration
*/
@ -189,7 +181,6 @@ enum rdma_cqe_responder_status_enum
MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
};
/*
* CQE type enumeration
*/
@ -202,7 +193,6 @@ enum rdma_cqe_type
MAX_RDMA_CQE_TYPE
};
/*
* DIF Block size options
*/
@ -213,7 +203,6 @@ enum rdma_dif_block_size
MAX_RDMA_DIF_BLOCK_SIZE
};
/*
* DIF CRC initial value
*/
@ -224,7 +213,6 @@ enum rdma_dif_crc_seed
MAX_RDMA_DIF_CRC_SEED
};
/*
* RDMA DIF Error Result Structure
*/
@ -246,7 +234,6 @@ struct rdma_dif_error_result
u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
};
/*
* DIF IO direction
*/
@ -257,7 +244,6 @@ enum rdma_dif_io_direction_flg
MAX_RDMA_DIF_IO_DIRECTION_FLG
};
/*
* RDMA DIF Runt Result Structure
*/
@ -267,7 +253,6 @@ struct rdma_dif_runt_result
__le16 reserved[3];
};
/*
* memory window type enumeration
*/
@ -278,7 +263,6 @@ enum rdma_mw_type
MAX_RDMA_MW_TYPE
};
struct rdma_rq_sge
{
struct regpair addr;
@ -292,7 +276,6 @@ struct rdma_rq_sge
#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
};
struct rdma_sq_atomic_wqe
{
__le32 reserved1;
@ -323,7 +306,6 @@ struct rdma_sq_atomic_wqe
struct regpair swap_data /* Swap or add data */;
};
/*
* First element (16 bytes) of atomic wqe
*/
@ -350,7 +332,6 @@ struct rdma_sq_atomic_wqe_1st
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of atomic wqe
*/
@ -361,7 +342,6 @@ struct rdma_sq_atomic_wqe_2nd
__le32 reserved2;
};
/*
* Third element (16 bytes) of atomic wqe
*/
@ -371,7 +351,6 @@ struct rdma_sq_atomic_wqe_3rd
struct regpair swap_data /* Swap or add data */;
};
struct rdma_sq_bind_wqe
{
struct regpair addr;
@ -419,7 +398,6 @@ struct rdma_sq_bind_wqe
__le32 reserved4;
};
/*
* First element (16 bytes) of bind wqe
*/
@ -445,7 +423,6 @@ struct rdma_sq_bind_wqe_1st
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of bind wqe
*/
@ -478,7 +455,6 @@ struct rdma_sq_bind_wqe_2nd
__le32 reserved4;
};
/*
* Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
*/
@ -503,7 +479,6 @@ struct rdma_sq_common_wqe
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
struct rdma_sq_fmr_wqe
{
struct regpair addr;
@ -578,7 +553,6 @@ struct rdma_sq_fmr_wqe
__le32 Reserved5;
};
/*
* First element (16 bytes) of fmr wqe
*/
@ -606,7 +580,6 @@ struct rdma_sq_fmr_wqe_1st
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of fmr wqe
*/
@ -640,7 +613,6 @@ struct rdma_sq_fmr_wqe_2nd
struct regpair pbl_addr /* Address of PBL */;
};
/*
* Third element (16 bytes) of fmr wqe
*/
@ -670,7 +642,6 @@ struct rdma_sq_fmr_wqe_3rd
__le32 Reserved5;
};
struct rdma_sq_local_inv_wqe
{
struct regpair reserved;
@ -695,7 +666,6 @@ struct rdma_sq_local_inv_wqe
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
struct rdma_sq_rdma_wqe
{
__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
@ -735,7 +705,6 @@ struct rdma_sq_rdma_wqe
u8 reserved2[3];
};
/*
* First element (16 bytes) of rdma wqe
*/
@ -766,7 +735,6 @@ struct rdma_sq_rdma_wqe_1st
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
/*
* Second element (16 bytes) of rdma wqe
*/
@ -786,7 +754,6 @@ struct rdma_sq_rdma_wqe_2nd
u8 reserved2[3];
};
/*
* SQ WQE req type enumeration
*/
@ -807,7 +774,6 @@ enum rdma_sq_req_type
MAX_RDMA_SQ_REQ_TYPE
};
struct rdma_sq_send_wqe
{
__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
@ -834,7 +800,6 @@ struct rdma_sq_send_wqe
__le32 reserved1[4];
};
struct rdma_sq_send_wqe_1st
{
__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
@ -858,13 +823,11 @@ struct rdma_sq_send_wqe_1st
u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
};
struct rdma_sq_send_wqe_2st
{
__le32 reserved1[4];
};
struct rdma_sq_sge
{
__le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
@ -872,7 +835,6 @@ struct rdma_sq_sge
__le32 l_key;
};
struct rdma_srq_wqe_header
{
struct regpair wr_id;
@ -896,9 +858,6 @@ union rdma_srq_elm
struct rdma_srq_sge sge;
};
/*
* Rdma doorbell data for flags update
*/
@ -909,7 +868,6 @@ struct rdma_pwm_flags_data
u8 reserved;
};
/*
* Rdma doorbell data for SQ and RQ
*/
@ -919,14 +877,12 @@ struct rdma_pwm_val16_data
__le16 value /* aggregated value to update */;
};
union rdma_pwm_val16_data_union
{
struct rdma_pwm_val16_data as_struct /* Parameters field */;
__le32 as_dword;
};
/*
* Rdma doorbell data for CQ
*/
@ -948,7 +904,6 @@ struct rdma_pwm_val32_data
__le32 value /* aggregated value to update */;
};
union rdma_pwm_val32_data_union
{
struct rdma_pwm_val32_data as_struct /* Parameters field */;

View File

@ -45,7 +45,6 @@
#define ROCE_DCQCN_NP_MAX_QPS (64) /* notification point max QPs*/
#define ROCE_DCQCN_RP_MAX_QPS (64) /* reaction point max QPs*/
/*
* Affiliated asynchronous events / errors enumeration
*/

View File

@ -120,7 +120,6 @@ struct spad_layout {
#endif /* MDUMP_PARSE_TOOL */
#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + __builtin_offsetof(struct static_init, f))
/* This section is located at a fixed location in the beginning of the scratchpad,
@ -132,7 +131,6 @@ struct spad_layout {
*/
struct static_init {
u32 num_sections; /* 0xe20000 */
offsize_t sections[SPAD_SECTION_MAX]; /* 0xe20004 */
#define SECTION(_sec_) *((offsize_t*)(STRUCT_OFFSET(sections[_sec_])))
@ -178,7 +176,7 @@ struct static_init {
#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
#define AH_PCIE_LINK_PARAMS_ASPM_CAP_OFFSET (24)
#define AH_PCIE_LINK_PARAMS *((u32*)(STRUCT_OFFSET(ah_pcie_link_params)))
u32 flags; /* 0xe20850 */
#define M_GLOB_FLAGS *((u32*)(STRUCT_OFFSET(flags)))
#define FLAGS_VAUX_REQUIRED (1 << 0)

View File

@ -34,7 +34,6 @@
/* SCSI CONSTANTS */
/*********************/
#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
// Each Resource ID is one-one-valued mapped by the driver to a BDQ Resource ID (for instance per port)
#define BDQ_NUM_RESOURCES (4)
@ -69,7 +68,6 @@ struct iscsi_drv_opaque
__le16 opaque;
};
/*
* Scsi 2B/8B opaque union
*/
@ -88,7 +86,6 @@ struct scsi_bd
union scsi_opaque opaque /* Driver Metadata (preferably Virtual Address of buffer) */;
};
/*
* Scsi Drv BDQ struct
*/
@ -98,7 +95,6 @@ struct scsi_bdq_ram_drv_data
__le16 reserved0[3];
};
/*
* SCSI SGE entry
*/
@ -117,7 +113,6 @@ struct scsi_cached_sges
struct scsi_sge sge[4] /* Cached SGEs section */;
};
/*
* Scsi Drv CMDQ struct
*/
@ -128,7 +123,6 @@ struct scsi_drv_cmdq
__le32 reserved1;
};
/*
* Common SCSI init params passed by driver to FW in function init ramrod
*/
@ -140,7 +134,6 @@ struct scsi_init_func_params
u8 reserved2[12];
};
/*
* SCSI RQ/CQ/CMDQ firmware function init parameters
*/
@ -178,8 +171,6 @@ struct scsi_init_func_queues
__le16 cmdq_xon_threshold /* CMDQ XON threshold - when number of entries will be above that TH, it will send XON */;
};
/*
* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data)
*/
@ -188,8 +179,6 @@ struct scsi_ram_per_bdq_resource_drv_data
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS] /* External ring data */;
};
/*
* SCSI SGL types
*/
@ -200,7 +189,6 @@ enum scsi_sgl_mode
MAX_SCSI_SGL_MODE
};
/*
* SCSI SGL parameters
*/
@ -214,7 +202,6 @@ struct scsi_sgl_params
u8 reserved;
};
/*
* SCSI terminate connection params
*/
@ -225,7 +212,6 @@ struct scsi_terminate_extra_params
u8 reserved[4];
};
/*
* SCSI Task Queue Element
*/

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __TCP_COMMON__
#define __TCP_COMMON__
/********************/
@ -37,7 +36,6 @@
#define TCP_INVALID_TIMEOUT_VAL -1
/*
* OOO opaque data received from LL2
*/
@ -50,7 +48,6 @@ struct ooo_opaque
u8 ooo_isle /* OOO isle number to add the packet to */;
};
/*
* tcp connect mode enum
*/
@ -61,7 +58,6 @@ enum tcp_connect_mode
MAX_TCP_CONNECT_MODE
};
/*
* tcp function init parameters
*/
@ -73,7 +69,6 @@ struct tcp_init_params
u8 reserved[9];
};
/*
* tcp IPv4/IPv6 enum
*/
@ -84,7 +79,6 @@ enum tcp_ip_version
MAX_TCP_IP_VERSION
};
/*
* tcp offload parameters
*/
@ -164,7 +158,6 @@ struct tcp_offload_params
__le32 reserved3;
};
/*
* tcp offload parameters
*/
@ -212,7 +205,6 @@ struct tcp_offload_params_opt2
__le32 reserved3[16];
};
/*
* tcp IPv4/IPv6 enum
*/
@ -228,7 +220,6 @@ enum tcp_seg_placement_event
MAX_TCP_SEG_PLACEMENT_EVENT
};
/*
* tcp init parameters
*/
@ -282,7 +273,6 @@ struct tcp_update_params
u8 reserved1[7];
};
/*
* toe upload parameters
*/

View File

@ -216,7 +216,6 @@ qlnxr_check_gsi_qp_attrs(struct qlnxr_dev *dev,
return 0;
}
static int
qlnxr_ll2_post_tx(struct qlnxr_dev *dev, struct ecore_roce_ll2_packet *pkt)
{
@ -252,7 +251,6 @@ qlnxr_ll2_post_tx(struct qlnxr_dev *dev, struct ecore_roce_ll2_packet *pkt)
&ll2_tx_pkt,
1);
if (rc) {
QL_DPRINT11(dev->ha, "ecore_ll2_prepare_tx_packet failed\n");
/* TX failed while posting header - release resources*/
@ -466,7 +464,6 @@ qlnxr_destroy_gsi_qp(struct qlnxr_dev *dev)
return (rc);
}
static inline bool
qlnxr_get_vlan_id_gsi(struct ib_ah_attr *ah_attr, u16 *vlan_id)
{
@ -506,7 +503,6 @@ qlnxr_gsi_build_header(struct qlnxr_dev *dev,
bool has_udp = false;
#endif /* #if __FreeBSD_version >= 1102000 */
#if !DEFINE_IB_AH_ATTR_WITH_DMAC
u8 mac[ETH_ALEN];
#endif
@ -565,7 +561,7 @@ qlnxr_gsi_build_header(struct qlnxr_dev *dev,
QL_DPRINT12(dev->ha, "source mac: %x\n",
udh->eth.smac_h[j]);
}
QL_DPRINT12(dev->ha, "QP: %p, opcode: %d, wq: %lx, roce: %x, hops:%d,"
"imm : %d, vlan :%d, AH: %p\n",
qp, swr->opcode, swr->wr_id, *roce_mode, grh->hop_limit,
@ -884,4 +880,3 @@ qlnxr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
QL_DPRINT12(dev->ha, "exit i = %d\n", i);
return i;
}

View File

@ -27,12 +27,9 @@
* $FreeBSD$
*/
#ifndef __QLNXR_CM_H__
#define __QLNXR_CM_H__
/* ECORE LL2 has a limit to the number of buffers it can handle.
* FYI, OFED used 512 and 128 for recv and send.
*/

View File

@ -27,7 +27,6 @@
* $FreeBSD$
*/
/*
* File: qlnxr_def.h
* Author: David C Somayajulu
@ -234,7 +233,6 @@
#define QLNXR_MAX_MSIX (16)
struct qlnxr_cnq {
struct qlnxr_dev *dev;
struct ecore_chain pbl;
@ -422,7 +420,6 @@ struct qlnxr_dev {
typedef struct qlnxr_dev qlnxr_dev_t;
struct qlnxr_pd {
struct ib_pd ibpd;
u32 pd_id;
@ -442,8 +439,6 @@ struct qlnxr_ucontext {
struct mutex mm_list_lock;
};
struct qlnxr_dev_attr {
struct ib_device_attr ib_attr;
};
@ -549,7 +544,6 @@ struct qlnxr_cq {
uint16_t cnq_notif;
};
struct qlnxr_ah {
struct ib_ah ibah;
struct ib_ah_attr attr;
@ -725,7 +719,6 @@ enum qlnxr_mr_type {
QLNXR_MR_FRMR
};
struct qlnxr_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
@ -742,7 +735,6 @@ struct qlnxr_mr {
u64 *iova_start; /* valid only for kernel_mr */
};
struct qlnxr_mm {
struct {
u64 phy_addr;
@ -848,7 +840,6 @@ static inline bool qlnxr_qp_has_rq(struct qlnxr_qp *qp)
return 1;
}
#ifdef DEFINE_IB_FAST_REG
static inline struct qlnxr_fast_reg_page_list *get_qlnxr_frmr_list(
struct ib_fast_reg_page_list *ifrpl)
@ -904,7 +895,6 @@ qlnxr_get_dmac(struct qlnxr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr)
extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address,
uint8_t *new_mac_address);
#define QLNXR_ROCE_PKEY_MAX 1
#define QLNXR_ROCE_PKEY_TABLE_LEN 1
#define QLNXR_ROCE_PKEY_DEFAULT 0xffff

View File

@ -25,7 +25,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: qlnxr_os.c
*/
@ -65,7 +64,6 @@ uint32_t rtr_type = 7;
SYSCTL_UINT(_dev_qnxr, OID_AUTO, rtr_type, CTLFLAG_RW, &rtr_type, 1,
"iWARP: RDMAP opcode to use for the RTR message: BITMAP 1: RDMA_SEND 2: RDMA_WRITE 4: RDMA_READ. Default: 7");
#define QNXR_WQ_MULTIPLIER_MIN (1)
#define QNXR_WQ_MULTIPLIER_MAX (7)
#define QNXR_WQ_MULTIPLIER_DFT (3)
@ -253,21 +251,19 @@ qlnxr_register_device(qlnxr_dev_t *dev)
ibdev->alloc_fast_reg_page_list = qlnxr_alloc_frmr_page_list;
ibdev->free_fast_reg_page_list = qlnxr_free_frmr_page_list;
#endif /* #if __FreeBSD_version >= 1102000 */
ibdev->poll_cq = qlnxr_poll_cq;
ibdev->post_send = qlnxr_post_send;
ibdev->post_recv = qlnxr_post_recv;
ibdev->process_mad = qlnxr_process_mad;
ibdev->dma_device = &dev->pdev->dev;
ibdev->get_link_layer = qlnxr_link_layer;
if (QLNX_IS_IWARP(dev)) {
iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
device_printf(dev->ha->pci_dev, "device is IWARP\n");
if (iwcm == NULL)
return (-ENOMEM);
@ -407,18 +403,15 @@ qlnxr_setup_irqs(struct qlnxr_dev *dev)
QL_DPRINT12(ha, "enter start_irq_rid = %d num_rss = %d\n",
start_irq_rid, dev->ha->num_rss);
for (i = 0; i < dev->num_cnq; i++) {
dev->cnq_array[i].irq_rid = start_irq_rid + i;
dev->cnq_array[i].irq = bus_alloc_resource_any(dev->ha->pci_dev,
SYS_RES_IRQ,
&dev->cnq_array[i].irq_rid,
(RF_ACTIVE | RF_SHAREABLE));
if (dev->cnq_array[i].irq == NULL) {
QL_DPRINT11(ha,
"bus_alloc_resource_any failed irq_rid = %d\n",
dev->cnq_array[i].irq_rid);
@ -431,7 +424,6 @@ qlnxr_setup_irqs(struct qlnxr_dev *dev)
(INTR_TYPE_NET | INTR_MPSAFE),
NULL, qlnxr_intr, &dev->cnq_array[i],
&dev->cnq_array[i].irq_handle)) {
QL_DPRINT11(ha, "bus_setup_intr failed\n");
goto qlnxr_setup_irqs_err;
}
@ -484,7 +476,6 @@ qlnxr_free_resources(struct qlnxr_dev *dev)
return;
}
static int
qlnxr_alloc_resources(struct qlnxr_dev *dev)
{
@ -537,7 +528,6 @@ qlnxr_alloc_resources(struct qlnxr_dev *dev)
dev->cnq_array[i].index = i;
sprintf(dev->cnq_array[i].name, "qlnxr%d@pci:%d",
i, (dev->ha->pci_func));
}
QL_DPRINT12(ha, "exit\n");
@ -575,7 +565,6 @@ qlnxr_affiliated_event(void *context, u8 e_code, void *fw_handle)
if (QLNX_IS_IWARP(dev)) {
switch (e_code) {
case ECORE_IWARP_EVENT_CQ_OVERFLOW:
event.event = IB_EVENT_CQ_ERR;
event_type = EVENT_TYPE_CQ;
@ -589,7 +578,6 @@ qlnxr_affiliated_event(void *context, u8 e_code, void *fw_handle)
}
} else {
switch (e_code) {
case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
event.event = IB_EVENT_CQ_ERR;
event_type = EVENT_TYPE_CQ;
@ -633,7 +621,6 @@ qlnxr_affiliated_event(void *context, u8 e_code, void *fw_handle)
}
switch (event_type) {
case EVENT_TYPE_CQ:
if (cq && cq->sig == QLNXR_CQ_MAGIC_NUMBER) {
struct ib_cq *ibcq = &cq->ibcq;
@ -676,7 +663,6 @@ qlnxr_affiliated_event(void *context, u8 e_code, void *fw_handle)
default:
break;
}
QL_DPRINT12(ha, "exit\n");
@ -696,7 +682,6 @@ qlnxr_unaffiliated_event(void *context, u8 e_code)
return;
}
static int
qlnxr_set_device_attr(struct qlnxr_dev *dev)
{
@ -757,7 +742,6 @@ qlnxr_set_device_attr(struct qlnxr_dev *dev)
return 0;
}
static int
qlnxr_init_hw(struct qlnxr_dev *dev)
{
@ -804,14 +788,13 @@ qlnxr_init_hw(struct qlnxr_dev *dev)
in_params->roce.cq_mode = ECORE_RDMA_CQ_MODE_32_BITS;
in_params->max_mtu = dev->ha->max_frame_size;
if (QLNX_IS_IWARP(dev)) {
if (delayed_ack)
in_params->iwarp.flags |= ECORE_IWARP_DA_EN;
if (timestamp)
in_params->iwarp.flags |= ECORE_IWARP_TS_EN;
in_params->iwarp.rcv_wnd_size = rcv_wnd_size*1024;
in_params->iwarp.crc_needed = crc_needed;
in_params->iwarp.ooo_num_rx_bufs =
@ -887,7 +870,6 @@ qlnxr_add_ip_based_gid(struct qlnxr_dev *dev, struct ifnet *ifp)
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) {
QL_DPRINT12(dev->ha, "IP address : %x\n", ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr);
ipv6_addr_set_v4mapped(
((struct sockaddr_in *) ifa->ifa_addr)->sin_addr.s_addr,
@ -967,7 +949,7 @@ is_vlan_dev(struct ifnet *ifp)
{
return (ifp->if_type == IFT_L2VLAN);
}
static inline uint16_t
vlan_dev_vlan_id(struct ifnet *ifp)
{
@ -1212,7 +1194,7 @@ qlnxr_remove(void *eth_dev, void *qlnx_rdma_dev)
}
ib_unregister_device(&dev->ibdev);
if (QLNX_IS_ROCE(dev)) {
if (dev->pd_count)
return (EBUSY);
@ -1281,7 +1263,6 @@ qlnxr_notify(void *eth_dev, void *qlnx_rdma_dev, enum qlnx_rdma_event event)
QL_DPRINT12(ha, "enter (%p, %d)\n", qlnx_rdma_dev, event);
switch (event) {
case QLNX_ETHDEV_UP:
if (!test_and_set_bit(QLNXR_ENET_STATE_BIT, &dev->enet_state))
qlnxr_ib_dispatch_event(dev, QLNXR_PORT,
@ -1308,7 +1289,6 @@ qlnxr_mod_load(void)
{
int ret;
qlnxr_drv.add = qlnxr_add;
qlnxr_drv.remove = qlnxr_remove;
qlnxr_drv.notify = qlnxr_notify;
@ -1334,7 +1314,6 @@ qlnxr_event_handler(module_t mod, int event, void *arg)
int ret = 0;
switch (event) {
case MOD_LOAD:
ret = qlnxr_mod_load();
break;
@ -1364,4 +1343,3 @@ MODULE_DEPEND(qlnxr, linuxkpi, 1, 1, 1);
#endif /* #if __FreeBSD_version >= 1100000 */
DECLARE_MODULE(qlnxr, qlnxr_mod_info, SI_SUB_LAST, SI_ORDER_ANY);

View File

@ -31,7 +31,6 @@
#ifndef __QLNXR_ROCE_H__
#define __QLNXR_ROCE_H__
/*
* roce completion notification queue element
*/
@ -39,7 +38,6 @@ struct roce_cnqe {
struct regpair cq_handle;
};
struct roce_cqe_responder {
struct regpair srq_wr_id;
struct regpair qp_handle;
@ -104,9 +102,6 @@ union roce_cqe {
struct roce_cqe_common cmn;
};
/*
* CQE requester status enumeration
*/
@ -126,8 +121,6 @@ enum roce_cqe_requester_status_enum {
MAX_ROCE_CQE_REQUESTER_STATUS_ENUM
};
/*
* CQE responder status enumeration
*/
@ -143,7 +136,6 @@ enum roce_cqe_responder_status_enum {
MAX_ROCE_CQE_RESPONDER_STATUS_ENUM
};
/*
* CQE type enumeration
*/
@ -155,7 +147,6 @@ enum roce_cqe_type {
MAX_ROCE_CQE_TYPE
};
/*
* memory window type enumeration
*/
@ -165,7 +156,6 @@ enum roce_mw_type {
MAX_ROCE_MW_TYPE
};
struct roce_rq_sge {
struct regpair addr;
__le32 length;
@ -178,7 +168,6 @@ struct roce_rq_sge {
#define ROCE_RQ_SGE_RESERVED0_SHIFT 29
};
struct roce_sq_atomic_wqe {
struct regpair remote_va;
__le32 xrc_srq;
@ -205,7 +194,6 @@ struct roce_sq_atomic_wqe {
struct regpair reserved3;
};
/*
* First element (16 bytes) of atomic wqe
*/
@ -230,7 +218,6 @@ struct roce_sq_atomic_wqe_1st {
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of atomic wqe
*/
@ -240,7 +227,6 @@ struct roce_sq_atomic_wqe_2nd {
__le32 reserved2;
};
/*
* Third element (16 bytes) of atomic wqe
*/
@ -249,7 +235,6 @@ struct roce_sq_atomic_wqe_3rd {
struct regpair reserved3;
};
struct roce_sq_bind_wqe {
struct regpair addr;
__le32 l_key;
@ -295,7 +280,6 @@ struct roce_sq_bind_wqe {
__le32 reserved6;
};
/*
* First element (16 bytes) of bind wqe
*/
@ -332,7 +316,6 @@ struct roce_sq_bind_wqe_1st {
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of bind wqe
*/
@ -351,7 +334,6 @@ struct roce_sq_bind_wqe_2nd {
__le32 reserved6;
};
/*
* Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
*/
@ -375,7 +357,6 @@ struct roce_sq_common_wqe {
u8 prev_wqe_size;
};
struct roce_sq_fmr_wqe {
struct regpair addr;
__le32 l_key;
@ -422,7 +403,6 @@ struct roce_sq_fmr_wqe {
struct regpair pbl_addr;
};
/*
* First element (16 bytes) of fmr wqe
*/
@ -459,7 +439,6 @@ struct roce_sq_fmr_wqe_1st {
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of fmr wqe
*/
@ -479,7 +458,6 @@ struct roce_sq_fmr_wqe_2nd {
struct regpair pbl_addr;
};
struct roce_sq_local_inv_wqe {
struct regpair reserved;
__le32 inv_l_key;
@ -501,7 +479,6 @@ struct roce_sq_local_inv_wqe {
u8 prev_wqe_size;
};
struct roce_sq_rdma_wqe {
__le32 imm_data;
__le32 length;
@ -527,7 +504,6 @@ struct roce_sq_rdma_wqe {
__le32 reserved1;
};
/*
* First element (16 bytes) of rdma wqe
*/
@ -553,7 +529,6 @@ struct roce_sq_rdma_wqe_1st {
u8 prev_wqe_size;
};
/*
* Second element (16 bytes) of rdma wqe
*/
@ -563,7 +538,6 @@ struct roce_sq_rdma_wqe_2nd {
__le32 reserved1;
};
/*
* SQ WQE req type enumeration
*/
@ -583,7 +557,6 @@ enum roce_sq_req_type {
MAX_ROCE_SQ_REQ_TYPE
};
struct roce_sq_send_wqe {
__le32 inv_key_or_imm_data;
__le32 length;
@ -606,19 +579,16 @@ struct roce_sq_send_wqe {
u8 prev_wqe_size;
};
struct roce_sq_sge {
__le32 length;
struct regpair addr;
__le32 l_key;
};
struct roce_srq_prod {
__le16 prod;
};
struct roce_srq_sge {
struct regpair addr;
__le32 length;
@ -634,7 +604,6 @@ struct roce_srq_sge {
__le32 reserved3;
};
/*
* RoCE doorbell data for SQ and RQ
*/
@ -643,13 +612,11 @@ struct roce_pwm_val16_data {
__le16 prod_val;
};
union roce_pwm_val16_data_union {
struct roce_pwm_val16_data as_struct;
__le32 as_dword;
};
/*
* RoCE doorbell data for CQ
*/
@ -666,7 +633,6 @@ struct roce_pwm_val32_data {
__le32 cq_cons_val;
};
union roce_pwm_val32_data_union {
struct roce_pwm_val32_data as_struct;
struct regpair as_repair;

View File

@ -90,7 +90,7 @@ struct qlnxr_create_qp_uresp {
/* SQ*/
u32 sq_db_offset;
u16 sq_icid;
/* RQ */
u32 rq_db_offset;
u16 rq_icid;

View File

@ -25,7 +25,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: qlnxr_verbs.c
*/
@ -47,7 +46,6 @@ __FBSDID("$FreeBSD$");
(type_ptr)->field.lo = cpu_to_le32(lower_32_bits(vaddr));\
} while (0)
#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
do { \
TYPEPTR_ADDR_SET(sge, addr, vaddr); \
@ -90,7 +88,6 @@ qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
struct qlnxr_dev *dev,
struct ib_srq_init_attr *init_attr);
static int
qlnxr_copy_srq_uresp(struct qlnxr_dev *dev,
struct qlnxr_srq *srq,
@ -102,7 +99,6 @@ qlnxr_free_srq_user_params(struct qlnxr_srq *srq);
static void
qlnxr_free_srq_kernel_params(struct qlnxr_srq *srq);
static u32
qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq);
@ -762,7 +758,6 @@ qlnxr_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context,
pd->pd_id = pd_id;
if (udata && context) {
rc = ib_copy_to_udata(udata, &pd->pd_id, sizeof(pd->pd_id));
if (rc) {
QL_DPRINT11(ha, "ib_copy_to_udata failed\n");
@ -842,7 +837,6 @@ qlnxr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
return 0;
}
static inline bool
qlnxr_get_vlan_id_qp(qlnx_host_t *ha, struct ib_qp_attr *attr, int attr_mask,
u16 *vlan_id)
@ -918,8 +912,6 @@ get_gid_info(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return;
}
static int
qlnxr_add_mmap(struct qlnxr_ucontext *uctx, u64 phy_addr, unsigned long len)
{
@ -1041,7 +1033,7 @@ ib_ucontext *qlnxr_alloc_ucontext(struct ib_device *ibdev,
uresp.sges_per_recv_wr = QLNXR_MAX_RQE_ELEMENTS_PER_RQE;
uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
uresp.max_cqes = QLNXR_MAX_CQES;
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (rc)
goto err;
@ -1053,7 +1045,7 @@ ib_ucontext *qlnxr_alloc_ucontext(struct ib_device *ibdev,
goto err;
QL_DPRINT12(ha, "Allocated user context %p\n",
&ctx->ibucontext);
return &ctx->ibucontext;
err:
kfree(ctx);
@ -1151,7 +1143,6 @@ qlnxr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
dev->db_size))) {
QL_DPRINT12(ha, "Mapping doorbell bar\n");
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@ -1394,7 +1385,6 @@ qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
int entry;
#endif
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
@ -1430,7 +1420,6 @@ qlnxr_populate_pbls(struct qlnxr_dev *dev, struct ib_umem *umem,
#ifndef DEFINE_IB_UMEM_WITH_CHUNK
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
#else
list_for_each_entry(chunk, &umem->chunk_list, list) {
/* get all the dma regions from the chunk. */
@ -1561,7 +1550,6 @@ qlnxr_init_mr_info(struct qlnxr_dev *dev, struct mr_info *info,
return rc;
}
struct ib_mr *
#if __FreeBSD_version >= 1102000
qlnxr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
@ -2065,7 +2053,6 @@ qlnxr_destroy_cq(struct ib_cq *ibcq)
/* GSIs CQs are handled by driver, so they don't exist in the FW */
if (cq->cq_type != QLNXR_CQ_TYPE_GSI) {
iparams.icid = cq->icid;
rc = ecore_rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
@ -2297,7 +2284,6 @@ qlnxr_copy_rq_uresp(struct qlnxr_dev *dev,
/* iWARP requires two doorbells per RQ. */
if (QLNX_IS_IWARP(dev)) {
uresp->rq_db_offset =
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD);
uresp->rq_db2_offset =
@ -2366,7 +2352,6 @@ qlnxr_copy_qp_uresp(struct qlnxr_dev *dev,
return rc;
}
static void
qlnxr_set_common_qp_params(struct qlnxr_dev *dev,
struct qlnxr_qp *qp,
@ -2451,7 +2436,6 @@ qlnxr_check_srq_params(struct ib_pd *ibpd,
return 0;
}
static void
qlnxr_free_srq_user_params(struct qlnxr_srq *srq)
{
@ -2520,7 +2504,6 @@ qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
sizeof(struct rdma_srq_producers),
access, dmasync);
if (IS_ERR(srq->prod_umem)) {
qlnxr_free_pbl(srq->dev, &srq->usrq.pbl_info, srq->usrq.pbl_tbl);
ib_umem_release(srq->usrq.umem);
@ -2543,7 +2526,6 @@ qlnxr_init_srq_user_params(struct ib_ucontext *ib_ctx,
return 0;
}
static int
qlnxr_alloc_srq_kernel_params(struct qlnxr_srq *srq,
struct qlnxr_dev *dev,
@ -2646,7 +2628,6 @@ qlnxr_init_common_qp_in_params(struct qlnxr_dev *dev,
return;
}
static inline void
qlnxr_qp_user_print( struct qlnxr_dev *dev,
struct qlnxr_qp *qp)
@ -2946,7 +2927,6 @@ qlnxr_roce_create_kernel_qp(struct qlnxr_dev *dev,
in_params->sq_pbl_ptr = ecore_chain_get_pbl_phys(&qp->sq.pbl);
if (!qp->srq) {
rc = ecore_chain_alloc(
dev->cdev,
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
@ -3089,7 +3069,6 @@ qlnxr_iwarp_create_kernel_qp(struct qlnxr_dev *dev,
ext_pbl.p_pbl_virt, ext_pbl.p_pbl_phys);
if (!qp->srq) {
rc = ecore_chain_alloc(
dev->cdev,
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
@ -3313,7 +3292,6 @@ qlnxr_create_qp(struct ib_pd *ibpd,
return ERR_PTR(-EFAULT);
}
static enum ib_qp_state
qlnxr_get_ibqp_state(enum ecore_roce_qp_state qp_state)
{
@ -3645,7 +3623,6 @@ qlnxr_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
QL_DPRINT12(ha,
"Only MTU sizes of 256, 512, 1024,"
" 2048 and 4096 are supported "
@ -3981,7 +3958,6 @@ qlnxr_query_qp(struct ib_qp *ibqp,
return rc;
}
static void
qlnxr_cleanup_user(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
{
@ -3990,7 +3966,7 @@ qlnxr_cleanup_user(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
if (qp->usq.umem)
ib_umem_release(qp->usq.umem);
@ -4013,7 +3989,7 @@ qlnxr_cleanup_kernel(struct qlnxr_dev *dev, struct qlnxr_qp *qp)
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
if (qlnxr_qp_has_sq(qp)) {
QL_DPRINT12(ha, "freeing SQ\n");
ha->qlnxr_debug = 1;
@ -4045,7 +4021,7 @@ qlnxr_free_qp_resources(struct qlnxr_dev *dev,
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
#if 0
if (qp->qp_type != IB_QPT_GSI) {
rc = ecore_rdma_destroy_qp(dev->rdma_ctx, qp->ecore_qp,
@ -4095,7 +4071,6 @@ qlnxr_destroy_qp(struct ib_qp *ibqp)
if (QLNX_IS_ROCE(dev) && (qp->state != (ECORE_ROCE_QP_STATE_RESET |
ECORE_ROCE_QP_STATE_ERR |
ECORE_ROCE_QP_STATE_INIT))) {
attr.qp_state = IB_QPS_ERR;
attr_mask |= IB_QP_STATE;
@ -4144,7 +4119,6 @@ swap_wqe_data64(u64 *p)
*p = cpu_to_be64(cpu_to_le64(*p));
}
static u32
qlnxr_prepare_sq_inline_data(struct qlnxr_dev *dev,
struct qlnxr_qp *qp,
@ -4233,7 +4207,7 @@ qlnxr_prepare_sq_sges(struct qlnxr_dev *dev, struct qlnxr_qp *qp,
ha = dev->ha;
QL_DPRINT12(ha, "enter wr->num_sge = %d \n", wr->num_sge);
for (i = 0; i < wr->num_sge; i++) {
struct rdma_sq_sge *sge = ecore_chain_produce(&qp->sq.pbl);
@ -4264,7 +4238,7 @@ qlnxr_prepare_sq_rdma_data(struct qlnxr_dev *dev,
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
TYPEPTR_ADDR_SET(rwqe2, remote_va, rdma_wr(wr)->remote_addr);
@ -4296,7 +4270,7 @@ qlnxr_prepare_sq_send_data(struct qlnxr_dev *dev,
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
memset(swqe2, 0, sizeof(*swqe2));
if (wr->send_flags & IB_SEND_INLINE) {
@ -4323,7 +4297,7 @@ qlnx_handle_completed_mrs(struct qlnxr_dev *dev, struct mr_info *info)
int work = info->completed - info->completed_handled - 1;
QL_DPRINT12(ha, "enter [%d]\n", work);
while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
struct qlnxr_pbl *pbl;
@ -4401,7 +4375,7 @@ build_frmr_pbes(struct qlnxr_dev *dev, struct ib_send_wr *wr,
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
pbe = (struct regpair *)pbl_tbl->va;
num_pbes = 0;
@ -4441,7 +4415,7 @@ qlnxr_prepare_safe_pbl(struct qlnxr_dev *dev, struct mr_info *info)
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
if (info->completed == 0) {
//DP_VERBOSE(dev, QLNXR_MSG_MR, "First FMR\n");
/* first fmr */
@ -4486,7 +4460,7 @@ qlnxr_prepare_fmr(struct qlnxr_qp *qp,
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
if (wr->wr.fast_reg.page_list_len == 0)
BUG();
@ -4583,7 +4557,7 @@ qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
ha = dev->ha;
QL_DPRINT12(ha, "enter[qp, wr] = [%p,%p]\n", qp, wr);
/* prevent SQ overflow and/or processing of a bad WR */
err_wr = wr->num_sge > qp->sq.max_sges;
wq_is_full = qlnxr_wq_is_full(&qp->sq);
@ -4592,7 +4566,6 @@ qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
if (wq_is_full || err_wr || pbl_is_full) {
if (wq_is_full &&
!(qp->err_bitmap & QLNXR_QP_ERR_SQ_FULL)) {
qp->err_bitmap |= QLNXR_QP_ERR_SQ_FULL;
QL_DPRINT12(ha,
@ -4604,7 +4577,6 @@ qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
if (err_wr &&
!(qp->err_bitmap & QLNXR_QP_ERR_BAD_SR)) {
qp->err_bitmap |= QLNXR_QP_ERR_BAD_SR;
QL_DPRINT12(ha,
@ -4616,7 +4588,6 @@ qlnxr_can_post_send(struct qlnxr_qp *qp, struct ib_send_wr *wr)
if (pbl_is_full &&
!(qp->err_bitmap & QLNXR_QP_ERR_SQ_PBL_FULL)) {
qp->err_bitmap |= QLNXR_QP_ERR_SQ_PBL_FULL;
QL_DPRINT12(ha,
@ -4643,7 +4614,7 @@ qlnxr_post_send(struct ib_qp *ibqp,
bool comp;
qlnx_host_t *ha;
uint32_t reg_addr;
*bad_wr = NULL;
ha = dev->ha;
@ -4714,9 +4685,7 @@ qlnxr_post_send(struct ib_qp *ibqp,
qp->wqe_wr_id[qp->sq.prod].opcode = qlnxr_ib_to_wc_opcode(wr->opcode);
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
@ -5024,7 +4993,6 @@ qlnxr_srq_elem_left(struct qlnxr_srq_hwq_info *hw_srq)
return hw_srq->max_wr - used;
}
int
qlnxr_post_recv(struct ib_qp *ibqp,
struct ib_recv_wr *wr,
@ -5043,7 +5011,7 @@ qlnxr_post_recv(struct ib_qp *ibqp,
return -EINVAL;
QL_DPRINT12(ha, "enter\n");
if (qp->qp_type == IB_QPT_GSI) {
QL_DPRINT12(ha, "(qp->qp_type = IB_QPT_GSI)\n");
return qlnxr_gsi_post_recv(ibqp, wr, bad_wr);
@ -5187,7 +5155,7 @@ process_req(struct qlnxr_dev *dev,
qlnx_host_t *ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
while (num_entries && qp->sq.wqe_cons != hw_cons) {
if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
qlnxr_chk_if_fmr(qp);
@ -5207,7 +5175,6 @@ process_req(struct qlnxr_dev *dev,
wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
switch (wc->opcode) {
case IB_WC_RDMA_WRITE:
wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
@ -5266,9 +5233,8 @@ qlnxr_poll_cq_req(struct qlnxr_dev *dev,
qlnx_host_t *ha = dev->ha;
QL_DPRINT12(ha, "enter req->status = 0x%x\n", req->status);
switch (req->status) {
switch (req->status) {
case RDMA_CQE_REQ_STS_OK:
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
@ -5354,12 +5320,11 @@ __process_resp_one(struct qlnxr_dev *dev,
QL_DPRINT12(ha, "enter qp = %p resp->status = 0x%x\n",
qp, resp->status);
wc->opcode = IB_WC_RECV;
wc->wc_flags = 0;
switch (resp->status) {
case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
wc_status = IB_WC_LOC_ACCESS_ERR;
break;
@ -5426,7 +5391,6 @@ __process_resp_one(struct qlnxr_dev *dev,
flags = resp->flags & QLNXR_RESP_RDMA_IMM;
switch (flags) {
case QLNXR_RESP_RDMA_IMM:
/* update opcode */
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
@ -5476,7 +5440,7 @@ process_resp_one_srq(struct qlnxr_dev *dev,
qlnx_host_t *ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
wr_id = HILO_U64(resp->srq_wr_id.hi, resp->srq_wr_id.lo);
if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
@ -5511,7 +5475,7 @@ process_resp_one(struct qlnxr_dev *dev,
u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
QL_DPRINT12(ha, "enter\n");
__process_resp_one(dev, qp, cq, wc, resp, wr_id);
while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
@ -5532,7 +5496,7 @@ process_resp_flush(struct qlnxr_qp *qp,
qlnx_host_t *ha = qp->dev->ha;
QL_DPRINT12(ha, "enter\n");
while (num_entries && qp->rq.wqe_cons != hw_cons) {
/* fill WC */
wc->status = IB_WC_WR_FLUSH_ERR;
@ -5579,7 +5543,7 @@ qlnxr_poll_cq_resp_srq(struct qlnxr_dev *dev,
qlnx_host_t *ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
cnt = process_resp_one_srq(dev, qp, cq, wc, resp);
consume_cqe(cq);
*update |= 1;
@ -5601,7 +5565,7 @@ qlnxr_poll_cq_resp(struct qlnxr_dev *dev,
qlnx_host_t *ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
cnt = process_resp_flush(qp, num_entries, wc,
resp->rq_cons);
@ -5633,7 +5597,7 @@ doorbell_cq(struct qlnxr_dev *dev, struct qlnxr_cq *cq, u32 cons, u8 flags)
qlnx_host_t *ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
wmb();
cq->db.data.agg_flags = flags;
cq->db.data.value = cpu_to_le32(cons);
@ -5659,7 +5623,6 @@ doorbell_cq(struct qlnxr_dev *dev, struct qlnxr_cq *cq, u32 cons, u8 flags)
//#endif
}
static int
is_valid_cqe(struct qlnxr_cq *cq, union rdma_cqe *cqe)
{
@ -5684,7 +5647,7 @@ qlnxr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
return -EINVAL;
if (cq->destroyed) {
QL_DPRINT11(ha, "called after destroy for cq %p (icid=%d)\n",
cq, cq->icid);
@ -5762,11 +5725,10 @@ qlnxr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_unlock_irqrestore(&cq->cq_lock, flags);
QL_DPRINT12(ha, "exit\n");
return done;
}
int
qlnxr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
@ -5814,7 +5776,6 @@ qlnxr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
return 0;
}
static struct qlnxr_mr *
__qlnxr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
{
@ -5825,7 +5786,7 @@ __qlnxr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
qlnx_host_t *ha;
ha = dev->ha;
QL_DPRINT12(ha, "enter ibpd = %p pd = %p "
" pd_id = %d max_page_list_len = %d\n",
ibpd, pd, pd->pd_id, max_page_list_len);
@ -6019,7 +5980,7 @@ qlnxr_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
struct qlnxr_fast_reg_page_list *frmr_list;
frmr_list = get_qlnxr_frmr_list(page_list);
free_mr_info(frmr_list->dev, &frmr_list->info);
kfree(frmr_list->ibfrpl.page_list);
@ -6098,7 +6059,6 @@ qlnxr_validate_phys_buf_list(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
}
while (buf_cnt) {
*total_size += buf_list->size;
if (buf_list->size != size) {
@ -6124,7 +6084,6 @@ qlnxr_get_num_pages(qlnx_host_t *ha, struct ib_phys_buf *buf_list,
u64 size;
for (i = 0; i < buf_cnt; i++) {
size = 0;
while (size < buf_list->size) {
size += PAGE_SIZE;
@ -6178,7 +6137,6 @@ qlnxr_populate_phys_mem_pbls(struct qlnxr_dev *dev,
pbe_cnt = 0;
for (i = 0; i < buf_cnt; i++) {
pages = buf_list->size >> PAGE_SHIFT;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
@ -6353,9 +6311,9 @@ qlnxr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
QL_DPRINT12(ha, "no address handle can be allocated\n");
return ERR_PTR(-ENOMEM);
}
ah->attr = *attr;
return &ah->ibah;
}
@ -6365,7 +6323,7 @@ qlnxr_destroy_ah(struct ib_ah *ibah)
struct qlnxr_dev *dev;
qlnx_host_t *ha;
struct qlnxr_ah *ah = get_qlnxr_ah(ibah);
dev = get_qlnxr_dev((ibah->device));
ha = dev->ha;
@ -6442,7 +6400,6 @@ qlnxr_process_mad(struct ib_device *ibdev,
// return IB_MAD_RESULT_SUCCESS;
}
#if __FreeBSD_version >= 1102000
int
qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
@ -6479,10 +6436,8 @@ qlnxr_get_port_immutable(struct ib_device *ibdev, u8 port_num,
}
#endif /* #if __FreeBSD_version > 1102000 */
/***** iWARP related functions *************/
static void
qlnxr_iw_mpa_request(void *context,
struct ecore_iwarp_cm_event_params *params)
@ -6504,7 +6459,7 @@ qlnxr_iw_mpa_request(void *context,
params->cm_info->ip_version);
return;
}
ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
if (!ep) {
@ -6583,7 +6538,7 @@ qlnxr_iw_issue_event(void *context,
QL_DPRINT12(ha, "event=[%d] %s\n", event.event, str);
QL_DPRINT12(ha, "status=[%d] \n", event.status);
if (ep) {
if (ep->cm_id)
ep->cm_id->event_handler(ep->cm_id, &event);
@ -6609,7 +6564,7 @@ qlnxr_iw_close_event(void *context,
ha = dev->ha;
QL_DPRINT12(ha, "enter\n");
if (ep->cm_id) {
qlnxr_iw_issue_event(context,
params,
@ -6764,7 +6719,6 @@ qlnxr_iw_mpa_reply(void *context,
return rc;
}
void
qlnxr_iw_qp_event(void *context,
struct ecore_iwarp_cm_event_params *params,
@ -6805,9 +6759,8 @@ qlnxr_iw_event_handler(void *context,
QL_DPRINT12(ha, "[context, event] = [%p, 0x%x] "
"enter\n", context, params->event);
switch (params->event) {
switch (params->event) {
/* Passive side request received */
case ECORE_IWARP_EVENT_MPA_REQUEST:
qlnxr_iw_mpa_request(context, params);
@ -7167,7 +7120,7 @@ qlnxr_iw_accept(struct iw_cm_id *cm_id,
if (!(ha->ifp->if_drv_flags & IFF_DRV_RUNNING))
return -EINVAL;
qp = idr_find(&dev->qpidr, conn_param->qpn);
if (!qp) {
QL_DPRINT11(ha, "idr_find failed invalid qpn = %d\n",
@ -7238,7 +7191,7 @@ qlnxr_iw_qp_add_ref(struct ib_qp *ibqp)
ha = qp->dev->ha;
QL_DPRINT12(ha, "enter ibqp = %p\n", ibqp);
atomic_inc(&qp->refcnt);
QL_DPRINT12(ha, "exit \n");

View File

@ -27,7 +27,6 @@
* $FreeBSD$
*/
#ifndef __QLNXR_VERBS_H__
#define __QLNXR_VERBS_H__
@ -47,7 +46,6 @@ extern struct ib_srq *qlnxr_create_srq(struct ib_pd *,
extern int qlnxr_destroy_srq(struct ib_srq *);
extern int qlnxr_modify_srq(struct ib_srq *,
struct ib_srq_attr *,
enum ib_srq_attr_mask,
@ -117,7 +115,6 @@ extern int qlnxr_poll_cq(struct ib_cq *,
int num_entries,
struct ib_wc *wc);
extern struct ib_qp *qlnxr_create_qp(struct ib_pd *,
struct ib_qp_init_attr *attrs,
struct ib_udata *);
@ -228,7 +225,6 @@ extern int qlnxr_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg,
extern struct ib_mr *qlnxr_alloc_frmr(struct ib_pd *pd,
int max_page_list_len);
extern struct ib_fast_reg_page_list *qlnxr_alloc_frmr_page_list(
struct ib_device *ibdev,
int page_list_len);
@ -242,7 +238,6 @@ extern struct ib_ucontext *qlnxr_alloc_ucontext(struct ib_device *ibdev,
extern int qlnxr_dealloc_ucontext(struct ib_ucontext *ibctx);
extern int qlnxr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
extern int qlnxr_iw_connect(struct iw_cm_id *cm_id,
@ -264,4 +259,3 @@ extern void qlnxr_iw_qp_rem_ref(struct ib_qp *qp);
extern struct ib_qp *qlnxr_iw_get_qp(struct ib_device *dev, int qpn);
#endif /* #ifndef __QLNXR_VERBS_H__ */