net/qede/base: formatting changes

Fixes white spaces and tabs.

Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
This commit is contained in:
Rasesh Mody 2016-10-18 21:11:16 -07:00 committed by Bruce Richardson
parent c018d2b49d
commit d2e7d931d0
47 changed files with 2155 additions and 2155 deletions

View File

@ -322,11 +322,11 @@ struct ecore_hw_info {
u32 resc_num[ECORE_MAX_RESC];
u32 feat_num[ECORE_MAX_FEATURES];
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
u8 num_tc;
u8 ooo_tc;
@ -440,9 +440,9 @@ struct ecore_hwfn {
struct ecore_dev *p_dev;
u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine */
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
#define ECORE_PATH_ID(_p_hwfn) \
#define ECORE_PATH_ID(_p_hwfn) \
(ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id;
bool b_active;
@ -478,7 +478,7 @@ struct ecore_hwfn {
/* EQ */
struct ecore_eq *p_eq;
/* Consolidate Q */
/* Consolidate Q*/
struct ecore_consq *p_consq;
/* Slow-Path definitions */
@ -501,7 +501,7 @@ struct ecore_hwfn {
struct ecore_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not */
/* Flag indicating whether interrupts are enabled or not*/
bool b_int_enabled;
bool b_int_requested;
@ -572,45 +572,45 @@ struct ecore_dev {
u16 device_id;
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#ifndef ASIC_ONLY
#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_EMUL_B0(_p_dev))
#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev))
#define CHIP_REV_IS_SLOW(_p_dev) \
#define CHIP_REV_IS_SLOW(_p_dev) \
(CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
#define CHIP_REV_IS_A0(_p_dev) \
#define CHIP_REV_IS_A0(_p_dev) \
(CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_FPGA_A0(_p_dev) || \
!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) \
#define CHIP_REV_IS_B0(_p_dev) \
(CHIP_REV_IS_EMUL_B0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev) || \
(_p_dev)->chip_rev == 1)
#define CHIP_REV_IS_ASIC(_p_dev) (!CHIP_REV_IS_SLOW(_p_dev))
#else
#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
#endif
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
u16 chip_bond_id;
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
u8 num_ports_in_engines;
@ -618,7 +618,7 @@ struct ecore_dev {
u8 path_id;
enum ecore_mf_mode mf_mode;
#define IS_MF_DEFAULT(_p_hwfn) \
#define IS_MF_DEFAULT(_p_hwfn) \
(((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
@ -648,7 +648,7 @@ struct ecore_dev {
/* Init */
const struct iro *iro_arr;
#define IRO (p_hwfn->p_dev->iro_arr)
#define IRO (p_hwfn->p_dev->iro_arr)
/* HW functions */
u8 num_hwfns;

View File

@ -117,7 +117,7 @@ ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info,
p_info->non_offload_tc = tc;
else
p_info->offload_tc = tc;
}
}
}
void

View File

@ -305,7 +305,7 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
alloc_err:
alloc_err:
DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
ecore_qm_info_free(p_hwfn);
return ECORE_NOMEM;
@ -494,9 +494,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
alloc_no_mem:
alloc_no_mem:
rc = ECORE_NOMEM;
alloc_err:
alloc_err:
ecore_resc_free(p_dev);
return rc;
}
@ -557,11 +557,11 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
/* Make sure notification is not set before initiating final cleanup */
/* Make sure notification is not set before initiating final cleanup */
if (REG_RD(p_hwfn, addr)) {
DP_NOTICE(p_hwfn, false,
"Unexpected; Found final cleanup notification "
"before initiating final cleanup\n");
" before initiating final cleanup\n");
REG_WR(p_hwfn, addr, 0);
}
@ -1319,7 +1319,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load request was sent.Resp:0x%x, Load code: 0x%x\n",
"Load request was sent. Resp:0x%x, Load code: 0x%x\n",
rc, load_code);
/* Only relevant for recovery:
@ -1840,7 +1840,7 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@ -2344,11 +2344,11 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
#endif
return rc;
err2:
err2:
ecore_mcp_free(p_hwfn);
err1:
err1:
ecore_hw_hwfn_free(p_hwfn);
err0:
err0:
return rc;
}
@ -2490,7 +2490,7 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
p_chain->pbl.p_phys_table, pbl_size);
out:
out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@ -2692,7 +2692,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
nomem:
nomem:
ecore_chain_free(p_dev, p_chain);
return rc;
}
@ -3033,7 +3033,7 @@ enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
}
}
return ECORE_SUCCESS;
}
}
static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@ -3089,7 +3089,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
goto out;
p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
out:
out:
return rc;
}
@ -3119,7 +3119,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
goto out;
p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
out:
out:
return rc;
}

View File

@ -276,16 +276,16 @@ struct ecore_dmae_params {
};
/**
* @brief ecore_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
* @param flags (one of the flags defined above)
*/
* @brief ecore_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
* @param flags (one of the flags defined above)
*/
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@ -293,15 +293,15 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
u32 grc_addr, u32 size_in_dwords, u32 flags);
/**
* @brief ecore_dmae_grc2host - Read data from dmae data offset
* to source address using the given ptt
*
* @param p_ptt
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
* @param flags - one of the flags defined above
*/
* @brief ecore_dmae_grc2host - Read data from dmae data offset
* to source address using the given ptt
*
* @param p_ptt
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
* @param flags - one of the flags defined above
*/
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@ -309,16 +309,16 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
dma_addr_t dest_addr, u32 size_in_dwords, u32 flags);
/**
* @brief ecore_dmae_host2host - copy data from to source address
* to a destination address (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
* @brief ecore_dmae_host2host - copy data from to source address
* to a destination address (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@ -441,7 +441,7 @@ void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
/**
*@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn

View File

@ -953,7 +953,7 @@ enum malicious_vf_error_id {
VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
ETH_PACKET_TOO_SMALL
/* TX packet is shorter then reported on BDs or from minimal size */
/* TX packet is shorter then reported on BDs or from minimal size */
,
ETH_ILLEGAL_VLAN_MODE
/* Tx packet with marked as insert VLAN when its illegal */,
@ -1060,7 +1060,7 @@ struct pf_start_ramrod_data {
u8 allow_npar_tx_switching;
u8 inner_to_outer_pri_map[8];
u8 pri_map_valid
/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
;
__le32 outer_tag;
u8 reserved0[4];
@ -1244,7 +1244,7 @@ enum tunnel_clss {
TUNNEL_CLSS_MAC_VNI
,
TUNNEL_CLSS_INNER_MAC_VLAN
/* Use MAC and VLAN from last L2 header for vport classification */
/* Use MAC and VLAN from last L2 header for vport classification */
,
TUNNEL_CLSS_INNER_MAC_VNI
,

View File

@ -872,7 +872,7 @@ struct eth_vport_tpa_param {
u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
u8 tpa_pkt_split_flg;
u8 tpa_hdr_data_split_flg
/* If set, put header of first TPA segment on bd and data on SGE */
/* If set, put header of first TPA segment on bd and data on SGE */
;
u8 tpa_gro_consistent_flg
/* If set, GRO data consistent will checked for TPA continue */;
@ -882,10 +882,10 @@ struct eth_vport_tpa_param {
__le16 tpa_min_size_to_start
/* minimum TCP payload size for a packet to start aggregation */;
__le16 tpa_min_size_to_cont
/* minimum TCP payload size for a packet to continue aggregation */
/* minimum TCP payload size for a packet to continue aggregation */
;
u8 max_buff_num
/* maximal number of buffers that can be used for one aggregation */
/* maximal number of buffers that can be used for one aggregation */
;
u8 reserved;
};
@ -1124,7 +1124,7 @@ struct vport_start_ramrod_data {
u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
;
u8 silent_vlan_removal_en;
/* If enable then innerVlan will be striped and not written to cqe */
/* If enable then innerVlan will be striped and not written to cqe */
u8 untagged;
struct eth_tx_err_vals tx_err_behav
/* Desired behavior per TX error type */;

View File

@ -116,7 +116,7 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
return p_ptt;
}
}
void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
@ -298,7 +298,7 @@ void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
/* Every pretend undos prev pretends, including previous port pretend */
/* Every pretend undos prev pretends, including previous port pretend */
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);

View File

@ -1138,7 +1138,7 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
return;
}
/* Check the validity of the DPC ptt. If not ack interrupts and fail */
/* Check the validity of the DPC ptt. If not ack interrupts and fail */
if (!p_hwfn->p_dpc_ptt) {
DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
@ -2105,7 +2105,7 @@ u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
sb_id);
return 0;
}
}
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
{

View File

@ -10,49 +10,49 @@
#define __IRO_VALUES_H__
static const struct iro iro_arr[44] = {
{0x0, 0x0, 0x0, 0x0, 0x8},
{ 0x0, 0x0, 0x0, 0x0, 0x8},
{0x4db0, 0x60, 0x0, 0x0, 0x60},
{0x6418, 0x20, 0x0, 0x0, 0x20},
{0x500, 0x8, 0x0, 0x0, 0x4},
{0x480, 0x8, 0x0, 0x0, 0x4},
{0x0, 0x8, 0x0, 0x0, 0x2},
{ 0x0, 0x8, 0x0, 0x0, 0x2},
{0x80, 0x8, 0x0, 0x0, 0x2},
{0x4938, 0x0, 0x0, 0x0, 0x78},
{0x3df0, 0x0, 0x0, 0x0, 0x78},
{0x29b0, 0x0, 0x0, 0x0, 0x78},
{ 0x3df0, 0x0, 0x0, 0x0, 0x78},
{ 0x29b0, 0x0, 0x0, 0x0, 0x78},
{0x4d38, 0x0, 0x0, 0x0, 0x78},
{0x56c8, 0x0, 0x0, 0x0, 0x78},
{0x7e48, 0x0, 0x0, 0x0, 0x78},
{0xa28, 0x8, 0x0, 0x0, 0x8},
{ 0x7e48, 0x0, 0x0, 0x0, 0x78},
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
{0x61f8, 0x10, 0x0, 0x0, 0x10},
{0xb500, 0x30, 0x0, 0x0, 0x30},
{0x95b8, 0x30, 0x0, 0x0, 0x30},
{ 0x95b8, 0x30, 0x0, 0x0, 0x30},
{0x5898, 0x40, 0x0, 0x0, 0x40},
{0x1f8, 0x10, 0x0, 0x0, 0x8},
{0xa228, 0x0, 0x0, 0x0, 0x4},
{0x8050, 0x40, 0x0, 0x0, 0x30},
{ 0x8050, 0x40, 0x0, 0x0, 0x30},
{0xcf8, 0x8, 0x0, 0x0, 0x8},
{0x2b48, 0x80, 0x0, 0x0, 0x38},
{ 0x2b48, 0x80, 0x0, 0x0, 0x38},
{0xadf0, 0x0, 0x0, 0x0, 0xf0},
{0xaee0, 0x8, 0x0, 0x0, 0x8},
{0x80, 0x8, 0x0, 0x0, 0x8},
{0xac0, 0x8, 0x0, 0x0, 0x8},
{0x2578, 0x8, 0x0, 0x0, 0x8},
{0x24f8, 0x8, 0x0, 0x0, 0x8},
{0x0, 0x8, 0x0, 0x0, 0x8},
{0x200, 0x10, 0x8, 0x0, 0x8},
{ 0xac0, 0x8, 0x0, 0x0, 0x8},
{ 0x2578, 0x8, 0x0, 0x0, 0x8},
{ 0x24f8, 0x8, 0x0, 0x0, 0x8},
{ 0x0, 0x8, 0x0, 0x0, 0x8},
{ 0x200, 0x10, 0x8, 0x0, 0x8},
{0x17f8, 0x8, 0x0, 0x0, 0x2},
{0x19f8, 0x10, 0x8, 0x0, 0x2},
{0xd988, 0x38, 0x0, 0x0, 0x24},
{0x11040, 0x10, 0x0, 0x0, 0x8},
{0x11670, 0x38, 0x0, 0x0, 0x18},
{0xaeb8, 0x30, 0x0, 0x0, 0x10},
{0x86f8, 0x28, 0x0, 0x0, 0x18},
{ 0x86f8, 0x28, 0x0, 0x0, 0x18},
{0xebf8, 0x10, 0x0, 0x0, 0x10},
{0xde08, 0x40, 0x0, 0x0, 0x30},
{0x121a0, 0x38, 0x0, 0x0, 0x8},
{0xf060, 0x20, 0x0, 0x0, 0x20},
{0x2b80, 0x80, 0x0, 0x0, 0x10},
{ 0x2b80, 0x80, 0x0, 0x0, 0x10},
{0x50a0, 0x10, 0x0, 0x0, 0x10},
};

View File

@ -429,7 +429,7 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc != ECORE_SUCCESS) {
/* Return spq entry which is taken in ecore_sp_init_request() */
/* Return spq entry which is taken in ecore_sp_init_request()*/
ecore_spq_return_entry(p_hwfn, p_ent);
return rc;
}
@ -632,7 +632,7 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod)
void OSAL_IOMEM **pp_prod)
{
struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
u8 abs_stats_id = 0;
@ -876,7 +876,7 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell)
void OSAL_IOMEM **pp_doorbell)
{
struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
union ecore_qm_pq_params pq_params;

View File

@ -182,7 +182,7 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod);
void OSAL_IOMEM **pp_prod);
/**
* @brief ecore_sp_eth_rx_queue_stop -

View File

@ -358,7 +358,7 @@ enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct public_drv_mb, union_data);
ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
sizeof(*p_union_data));
}
}
rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
o_mcp_param);
@ -577,7 +577,7 @@ static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
"Received transceiver state update [0x%08x] from mfw"
"[Addr 0x%x]\n",
" [Addr 0x%x]\n",
transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
OFFSETOF(struct public_port,
transceiver_data)));

View File

@ -345,7 +345,7 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
return OSAL_NULL;
}
/* Allocate and initialize EQ chain */
/* Allocate and initialize EQ chain*/
if (ecore_chain_alloc(p_hwfn->p_dev,
ECORE_CHAIN_USE_TO_PRODUCE,
ECORE_CHAIN_MODE_PBL,
@ -607,8 +607,7 @@ ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
p_spq->unlimited_pending_count++;
return ECORE_SUCCESS;
}
} else {
struct ecore_spq_entry *p_en2;
p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
@ -634,6 +633,7 @@ ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
p_ent = p_en2;
}
}
/* entry is to be placed in 'pending' queue */
switch (priority) {

View File

@ -106,13 +106,13 @@ struct ecore_spq {
unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
u8 comp_bitmap_idx;
#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
/* Statistics */
u32 unlimited_pending_count;

View File

@ -453,7 +453,7 @@ void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
return;
vf->to_disable = to_disable;
}
}
void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable)
{
@ -1857,7 +1857,7 @@ ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
p_data->vport_active_tx_flg = p_act_tlv->active_tx;
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
}
}
}
static void
@ -1908,7 +1908,7 @@ ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
p_data->update_tx_switching_flg = 1;
p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
}
}
}
static void
@ -1929,7 +1929,7 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
sizeof(unsigned long) *
ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
}
}
}
static void
@ -1953,7 +1953,7 @@ ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
p_data->accept_flags.tx_accept_filter =
p_accept_tlv->tx_accept_filter;
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
}
}
}
static void
@ -1973,7 +1973,7 @@ ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
p_data->update_accept_any_vlan_flg =
p_accept_any_vlan->update_accept_any_vlan_flg;
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
}
}
}
static void

View File

@ -264,7 +264,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
return 0;
}
}
enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev)
{
@ -280,7 +280,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev)
"regview should be initialized before"
" ecore_vf_hw_prepare is called\n");
return ECORE_INVAL;
}
}
/* Set the doorbell bar. Assumption: regview is set */
p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
@ -388,7 +388,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod)
void OSAL_IOMEM **pp_prod)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_start_rxq_tlv *req;
@ -481,7 +481,7 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell)
void OSAL_IOMEM **pp_doorbell)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_start_txq_tlv *req;

View File

@ -63,7 +63,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod);
void OSAL_IOMEM **pp_prod);
/**
*
@ -86,7 +86,7 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell);
void OSAL_IOMEM **pp_doorbell);
/**
*

View File

@ -131,10 +131,10 @@ struct vfpf_vport_update_rss_tlv {
struct channel_tlv tl;
u8 update_rss_flags;
#define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
#define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
#define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
#define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
#define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
#define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
#define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
#define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
u8 rss_enable;
u8 rss_caps;
@ -192,8 +192,8 @@ struct pfvf_acquire_resp_tlv {
* this struct with suggested amount of resources for next
* acquire request
*/
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 cid[PFVF_MAX_QUEUES_PER_VF];
@ -287,9 +287,9 @@ struct vfpf_update_rxq_tlv {
u16 rx_qid;
u8 num_rxqs;
u8 flags;
#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
u8 padding[4];
};
@ -297,9 +297,9 @@ struct vfpf_update_rxq_tlv {
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
@ -374,16 +374,16 @@ struct vfpf_vport_update_sge_tpa_tlv {
struct channel_tlv tl;
u16 sge_tpa_flags;
#define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
#define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
#define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
#define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
u8 update_sge_tpa_flags;
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
#define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
#define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
u8 max_buffers_per_cqe;

View File

@ -36,17 +36,17 @@ typedef u32 offsize_t; /* In DWORDS !!! */
/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
#define SECTION_OFFSET(_offsize) \
((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
/* SECTION_SIZE is calculating the size in bytes out of offsize */
#define SECTION_SIZE(_offsize) \
(((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
(((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
#define SECTION_ADDR(_offsize, idx) \
(MCP_REG_SCRATCH + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
(_pub_base + offsetof(struct mcp_public_data, sections[_section]))
(_pub_base + offsetof(struct mcp_public_data, sections[_section]))
/* PHY configuration */
struct pmm_phy_cfg {
@ -88,32 +88,32 @@ struct port_mf_cfg {
* MUST be synced with struct pmm_stats_map
*/
struct pmm_stats {
u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter */
u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter */
u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter */
u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter */
u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter */
u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter*/
u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter*/
u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
u64 r1518; /* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
u64 r1522; /* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged */
u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter */
u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter */
u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter */
u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
u64 r16383; /* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame ctr */
u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter */
u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter */
u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter */
u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter */
u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter */
u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter */
u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */
u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter */
u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter */
u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter */
u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter*/
u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter*/
u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
u64 t1518; /* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
u64 t2047; /* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
u64 t4095; /* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
@ -151,14 +151,14 @@ struct port_stats {
* | ports | | | | |
*======+==================+=========+=========+========+======================
* BB | 1x100G | This is special mode, where there are 2 HW func
* BB | 2x10/20Gbps | 0,1 | NA | No | 1 | 1
* BB | 2x10/20Gbps| 0,1 | NA | No | 1 | 1
* BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1
* BB | 2x50Gbps | 0,1 | NA | No | 1 | 1
* BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2
* BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2
* BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2
* BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1
* AH | 2x10/20Gbps | 0,1 | NA | NA | 1 | NA
* AH | 2x10/20Gbps| 0,1 | NA | NA | 1 | NA
* AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA
* AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA
* AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA
@ -427,7 +427,7 @@ struct public_path {
u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */
u32 process_kill;
/* Reset on mcp reset, and incremented for eveny process kill event. */
/* Reset on mcp reset, and incremented for eveny process kill event. */
#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
#define PROCESS_KILL_COUNTER_SHIFT 0
#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
@ -574,7 +574,7 @@ struct public_port {
struct dcbx_mib remote_dcbx_mib;
struct dcbx_mib operational_dcbx_mib;
/* FC_NPIV table offset & size in NVRAM value of 0 means not present */
/* FC_NPIV table offset & size in NVRAM value of 0 means not present */
u32 fc_npiv_nvram_tbl_addr;
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
@ -641,7 +641,7 @@ struct public_func {
/* MTU size per funciton is needed for the OV feature */
u32 mtu_size;
/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
/* For PCP values 0-3 use the map lower */
/* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
* 0x0000FF00 - PCP 2, 0x000000FF PCP 3
@ -822,7 +822,7 @@ struct public_drv_mb {
/* Vitaly: LLDP commands */
#define DRV_MSG_CODE_SET_LLDP 0x24000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
/* OneView feature driver HSI */
/* OneView feature driver HSI*/
#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
@ -893,7 +893,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
/* LLDP / DCBX params */
/* LLDP / DCBX params*/
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
@ -925,7 +925,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8
#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
/* configure vf MSIX params */
/* configure vf MSIX params*/
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8

File diff suppressed because it is too large Load Diff