Upgrade STORMFW to 8.30.0.0 and ecore version to 8.30.0.0

Add support for pci deviceID 0x8070 for QLE41xxx product line which
supports 10GbE/25GbE/40GbE

MFC after:5 days
This commit is contained in:
David C Somayajulu 2017-06-15 02:45:43 +00:00
parent ed840c526f
commit 9efd0ba788
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=319964
76 changed files with 66637 additions and 61345 deletions

View File

@ -34,12 +34,17 @@
#include "ecore_status.h"
#include <sys/bitstring.h>
#if __FreeBSD_version >= 1100090
#if __FreeBSD_version >= 1200000
#include <linux/bitmap.h>
#else
#if __FreeBSD_version >= 1100090
#include <compat/linuxkpi/common/include/linux/bitops.h>
#else
#include <ofed/include/linux/bitops.h>
#endif
#endif
#define OSAL_NUM_CPUS() mp_ncpus
/*
* prototypes of freebsd specific functions required by ecore
*/
@ -60,6 +65,7 @@ extern int qlnx_pci_find_capability(void *ecore_dev, int cap);
extern uint32_t qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr);
extern void qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value);
extern void qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value);
extern uint32_t qlnx_reg_rd32(void *p_hwfn, uint32_t reg_addr);
extern void qlnx_reg_wr32(void *p_hwfn, uint32_t reg_addr, uint32_t value);
@ -129,6 +135,8 @@ rounddown_pow_of_two(unsigned long x)
#endif /* #ifndef QLNX_RDMA */
#define OSAL_UNUSED
#define OSAL_CPU_TO_BE64(val) htobe64(val)
#define OSAL_BE64_TO_CPU(val) be64toh(val)
@ -199,6 +207,8 @@ typedef struct osal_list_t
#define REG_WR(hwfn, addr, val) qlnx_reg_wr32(hwfn, addr, val)
#define REG_WR16(hwfn, addr, val) qlnx_reg_wr16(hwfn, addr, val)
#define DIRECT_REG_WR(p_hwfn, addr, value) qlnx_direct_reg_wr32(p_hwfn, addr, value)
#define DIRECT_REG_WR64(p_hwfn, addr, value) \
qlnx_direct_reg_wr64(p_hwfn, addr, value)
#define DIRECT_REG_RD(p_hwfn, addr) qlnx_direct_reg_rd32(p_hwfn, addr)
#define REG_RD(hwfn, addr) qlnx_reg_rd32(hwfn, addr)
#define DOORBELL(hwfn, addr, value) \

View File

@ -88,7 +88,7 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
/*
* Usually LL2 queues are opened in pairs TX-RX.
* Usually LL2 queues are opened in pairs TX-RX.
* There is a hard restriction on number of RX queues (limited by Tstorm RAM) and TX counters (Pstorm RAM).
* Number of TX queues is almost unlimited.
* The constants are different so as to allow asymmetric LL2 connections
@ -99,13 +99,13 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
// Include firmware version number only- do not add constants here to avoid redundunt compilations
// Include firmware verison number only- do not add constants here to avoid redundunt compilations
///////////////////////////////////////////////////////////////////////////////////////////////////
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 18
#define FW_REVISION_VERSION 14
#define FW_MINOR_VERSION 30
#define FW_REVISION_VERSION 0
#define FW_ENGINEERING_VERSION 0
/***********************/
@ -113,60 +113,60 @@
/***********************/
/* PCI functions */
#define MAX_NUM_PORTS_K2 (4)
#define MAX_NUM_PORTS_BB (2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
#define MAX_NUM_PORTS_K2 (4)
#define MAX_NUM_PORTS_E5 (MAX_NUM_PORTS_K2)
#define MAX_NUM_PORTS (MAX_NUM_PORTS_E5)
#define MAX_NUM_PFS_K2 (16)
#define MAX_NUM_PFS_BB (8)
#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
#define MAX_NUM_PFS_K2 (16)
#define MAX_NUM_PFS_E5 (MAX_NUM_PFS_K2)
#define MAX_NUM_PFS (MAX_NUM_PFS_E5)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
#define MAX_NUM_VFS_BB (120)
#define MAX_NUM_VFS_K2 (192)
#define E4_MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define E5_MAX_NUM_VFS (240)
#define COMMON_MAX_NUM_VFS (E5_MAX_NUM_VFS)
#define MAX_NUM_VFS_E4 (MAX_NUM_VFS_K2)
#define MAX_NUM_VFS_E5 (240)
#define COMMON_MAX_NUM_VFS (MAX_NUM_VFS_E5)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + E4_MAX_NUM_VFS)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS_E4)
/* in both BB and K2, the VF number starts from 16. so for arrays containing all */
/* possible PFs and VFs - we need a constant for this size */
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + E4_MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_E4 (MAX_NUM_PFS + MAX_NUM_VFS_E4)
#define MAX_FUNCTION_NUMBER_E5 (MAX_NUM_PFS + MAX_NUM_VFS_E5)
#define COMMON_MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS_E5)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
#define MAX_NUM_VPORTS_E4 (MAX_NUM_VPORTS_K2)
#define MAX_NUM_VPORTS_E5 (256)
#define COMMON_MAX_NUM_VPORTS (MAX_NUM_VPORTS_E5)
#define MAX_NUM_L2_QUEUES_K2 (320)
#define MAX_NUM_L2_QUEUES_BB (256)
#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
// 4-Port K2.
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_PHYS_TCS_4PORT_E5 (6)
#define NUM_OF_PHYS_TCS (8)
#define PURE_LB_TC NUM_OF_PHYS_TCS
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
#define NUM_TCS_4PORT_E5 (NUM_PHYS_TCS_4PORT_E5 + 1)
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
#define LB_TC (NUM_OF_PHYS_TCS)
/* Num of possible traffic priority values */
#define NUM_OF_PRIO (8)
#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
/* CIDs */
#define E4_NUM_OF_CONNECTION_TYPES (8)
#define E5_NUM_OF_CONNECTION_TYPES (16)
#define NUM_OF_CONNECTION_TYPES_E4 (8)
#define NUM_OF_CONNECTION_TYPES_E5 (16)
#define NUM_OF_TASK_TYPES (8)
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
@ -375,11 +375,13 @@
/* number of TX queues in the QM */
#define MAX_QM_TX_QUEUES_K2 512
#define MAX_QM_TX_QUEUES_BB 448
#define MAX_QM_TX_QUEUES_E5 MAX_QM_TX_QUEUES_K2
#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2
/* number of Other queues in the QM */
#define MAX_QM_OTHER_QUEUES_BB 64
#define MAX_QM_OTHER_QUEUES_K2 128
#define MAX_QM_OTHER_QUEUES_E5 MAX_QM_OTHER_QUEUES_K2
#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2
/* number of queues in a PF queue group */
@ -413,7 +415,9 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB 12
#define PIS_PER_SB_E4 12
#define PIS_PER_SB_E5 8
#define MAX_PIS_PER_SB OSAL_MAX_T(u8, PIS_PER_SB_E4, PIS_PER_SB_E5)
#define CAU_HC_STOPPED_STATE 3 /* fsm is stopped or not valid for this sb */
@ -427,7 +431,8 @@
#define MAX_SB_PER_PATH_K2 (368)
#define MAX_SB_PER_PATH_BB (288)
#define MAX_TOT_SB_PER_PATH MAX_SB_PER_PATH_K2
#define MAX_SB_PER_PATH_E5 (512)
#define MAX_TOT_SB_PER_PATH MAX_SB_PER_PATH_E5
#define MAX_SB_PER_PF_MIMD 129
#define MAX_SB_PER_PF_SIMD 64
@ -588,7 +593,7 @@
// ILT Records
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB,PXP_NUM_ILT_RECORDS_K2)
#define MAX_NUM_ILT_RECORDS OSAL_MAX_T(u16, PXP_NUM_ILT_RECORDS_BB,PXP_NUM_ILT_RECORDS_K2)
// Host Interface
@ -633,7 +638,8 @@
/******************/
/* Number of PBF command queue lines. Each line is 32B. */
#define PBF_MAX_CMD_LINES 3328
#define PBF_MAX_CMD_LINES_E4 3328
#define PBF_MAX_CMD_LINES_E5 5280
/* Number of BTB blocks. Each block is 256B. */
#define BTB_MAX_BLOCKS 1440
@ -737,8 +743,8 @@ union rdma_eqe_data
*/
struct malicious_vf_eqe_data
{
u8 vfId /* Malicious VF ID */;
u8 errId /* Malicious VF error */;
u8 vf_id /* Malicious VF ID */;
u8 err_id /* Malicious VF error */;
__le16 reserved[3];
};
@ -747,7 +753,7 @@ struct malicious_vf_eqe_data
*/
struct initial_cleanup_eqe_data
{
u8 vfId /* VF ID */;
u8 vf_id /* VF ID */;
u8 reserved[7];
};
@ -1059,7 +1065,7 @@ struct db_rdma_dpm_data
{
__le16 icid /* internal CID */;
__le16 prod_val /* aggregated value to update */;
struct db_rdma_dpm_params params /* parameters passed to RDMA firmware */;
struct db_rdma_dpm_params params /* parametes passed to RDMA firmware */;
};
@ -1113,25 +1119,25 @@ enum igu_seg_access
/*
* Enumeration for L3 type field of parsing_and_err_flags_union. L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according to the last-ethertype)
* Enumeration for L3 type field of parsing_and_err_flags. L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according to the last-ethertype)
*/
enum l3_type
{
e_l3Type_unknown,
e_l3Type_ipv4,
e_l3Type_ipv6,
e_l3_type_unknown,
e_l3_type_ipv4,
e_l3_type_ipv6,
MAX_L3_TYPE
};
/*
* Enumeration for l4Protocol field of parsing_and_err_flags_union. L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the first fragment, the protocol-type should be set to none.
* Enumeration for l4Protocol field of parsing_and_err_flags. L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the first fragment, the protocol-type should be set to none.
*/
enum l4_protocol
{
e_l4Protocol_none,
e_l4Protocol_tcp,
e_l4Protocol_udp,
e_l4_protocol_none,
e_l4_protocol_tcp,
e_l4_protocol_udp,
MAX_L4_PROTOCOL
};
@ -1146,11 +1152,11 @@ struct parsing_and_err_flags
#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 /* L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the first fragment, the protocol-type should be set to none. (use enum l4_protocol) */
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 /* Set if the packet is IPv4 fragment. */
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 /* Set if the packet is IPv4/IPv6 fragment. */
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 /* Set if VLAN tag exists. Invalid if tunnel type are IP GRE or IP GENEVE. */
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 /* corresponds to the same 8021q tag that is selected for 8021q-tag fiel. This flag should be set if the tag appears in the packet, regardless of its value. */
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 /* Set if L4 checksum was calculated. */
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 /* Set if L4 checksum was calculated. taken from the EOP descriptor. */
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 /* Set for PTP packet. */
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
@ -1162,11 +1168,11 @@ struct parsing_and_err_flags
#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 /* Set if GRE/VXLAN/GENEVE tunnel detected. */
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 /* Set if VLAN tag exists in tunnel header. */
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 /* This flag should be set if the tag appears in the packet tunnel header, regardless of its value.. */
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 /* Set if either tunnel-ipv4-version-mismatch or tunnel-ipv4-hdr-len-error or tunnel-ipv4-cksm is set or tunneling ipv6 ver mismatch */
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 /* Set if GRE or VXLAN/GENEVE UDP checksum was calculated. */
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 /* taken from the EOP descriptor. */
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 /* Set if tunnel L4 checksum validation failed. Valid only if tunnel L4 checksum was calculated. */
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
@ -1419,21 +1425,42 @@ enum rss_hash_type
/*
* status block structure
*/
struct status_block
struct status_block_e4
{
__le16 pi_array[PIS_PER_SB];
__le16 pi_array[PIS_PER_SB_E4];
__le32 sb_num;
#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
#define STATUS_BLOCK_SB_NUM_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
__le32 prod_index;
#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
};
/*
* status block structure
*/
struct status_block_e5
{
__le16 pi_array[PIS_PER_SB_E5];
__le32 sb_num;
#define STATUS_BLOCK_E5_SB_NUM_MASK 0x1FF
#define STATUS_BLOCK_E5_SB_NUM_SHIFT 0
#define STATUS_BLOCK_E5_ZERO_PAD_MASK 0x7F
#define STATUS_BLOCK_E5_ZERO_PAD_SHIFT 9
#define STATUS_BLOCK_E5_ZERO_PAD2_MASK 0xFFFF
#define STATUS_BLOCK_E5_ZERO_PAD2_SHIFT 16
__le32 prod_index;
#define STATUS_BLOCK_E5_PROD_INDEX_MASK 0xFFFFFF
#define STATUS_BLOCK_E5_PROD_INDEX_SHIFT 0
#define STATUS_BLOCK_E5_ZERO_PAD3_MASK 0xFF
#define STATUS_BLOCK_E5_ZERO_PAD3_SHIFT 24
};

View File

@ -39,8 +39,8 @@
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
#define ECORE_MINOR_VERSION 18
#define ECORE_REVISION_VERSION 13
#define ECORE_MINOR_VERSION 30
#define ECORE_REVISION_VERSION 0
#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
@ -110,13 +110,13 @@ do { \
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
#define ECORE_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
#define GET_MFW_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _OFFSET))
#define ECORE_MFW_SET_FIELD(name, field, value) \
#define SET_MFW_FIELD(name, field, value) \
do { \
(name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
(name) &= ~((field ## _MASK) << (field ## _OFFSET)); \
(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \
} while (0)
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
@ -401,6 +401,11 @@ enum ecore_wol_support {
ECORE_WOL_SUPPORT_PME,
};
enum ecore_db_rec_exec {
DB_REC_DRY_RUN,
DB_REC_REAL_DEAL,
};
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
@ -450,10 +455,7 @@ struct ecore_hw_info {
#ifndef ETH_ALEN
#define ETH_ALEN 6 /* @@@ TBD - define somewhere else for Windows */
#endif
unsigned char hw_mac_addr[ETH_ALEN];
u64 node_wwn; /* For FCoE only */
u64 port_wwn; /* For FCoE only */
u16 num_iscsi_conns;
u16 num_fcoe_conns;
@ -537,6 +539,12 @@ struct ecore_qm_info {
u8 num_pf_rls;
};
struct ecore_db_recovery_info {
osal_list_t list;
osal_spinlock_t lock;
u32 db_recovery_counter;
};
struct storm_stats {
u32 address;
u32 len;
@ -605,6 +613,11 @@ struct ecore_hwfn {
struct ecore_ptt *p_main_ptt;
struct ecore_ptt *p_dpc_ptt;
/* PTP will be used only by the leading funtion.
* Usage of all PTP-apis should be synchronized as result.
*/
struct ecore_ptt *p_ptp_ptt;
struct ecore_sb_sp_info *p_sp_sb;
struct ecore_sb_attn_info *p_sb_attn;
@ -661,6 +674,9 @@ struct ecore_hwfn {
/* L2-related */
struct ecore_l2_info *p_l2_info;
/* Mechanism for recovering from doorbell drop */
struct ecore_db_recovery_info db_recovery_info;
};
enum ecore_mf_mode {
@ -694,7 +710,7 @@ struct ecore_dev {
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
#define ECORE_IS_E5(dev) false
#define ECORE_IS_E5(dev) ((dev)->type == ECORE_DEV_TYPE_E5)
#define ECORE_E5_MISSING_CODE OSAL_BUILD_BUG_ON(false)
@ -703,6 +719,7 @@ struct ecore_dev {
#define ECORE_DEV_ID_MASK 0xff00
#define ECORE_DEV_ID_MASK_BB 0x1600
#define ECORE_DEV_ID_MASK_AH 0x8000
#define ECORE_DEV_ID_MASK_E5 0x8100
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
@ -746,7 +763,7 @@ struct ecore_dev {
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
u8 num_ports_in_engines;
u8 num_ports_in_engine;
u8 num_funcs_in_port;
u8 path_id;
@ -836,6 +853,9 @@ struct ecore_dev {
: MAX_SB_PER_PATH_K2)
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
#define CRC8_TABLE_SIZE 256
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
@ -844,8 +864,7 @@ struct ecore_dev {
*
* @return OSAL_INLINE u8
*/
static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
u32 concrete_fid)
static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
@ -860,8 +879,8 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
return sw_fid;
}
#define PURE_LB_TC 8
#define PKT_LB_TC 9
#define MAX_NUM_VOQS_E4 20
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
@ -873,6 +892,7 @@ int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
int ecore_device_get_port_id(struct ecore_dev *p_dev);
void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
u8 *mac);
@ -892,6 +912,13 @@ u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
/* doorbell recovery mechanism */
void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
enum ecore_db_rec_exec);
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
@ -901,6 +928,4 @@ u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
#endif /* __ECORE_H */

View File

@ -214,6 +214,11 @@ static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
return p_chain->u.chain32.cons_idx;
}
/* FIXME:
* Should create OSALs for the below definitions.
* For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
* kernel versions that lack them.
*/
#define ECORE_U16_MAX ((u16)~0U)
#define ECORE_U32_MAX ((u32)~0U)

View File

@ -72,17 +72,7 @@ __FBSDID("$FreeBSD$");
#define TM_ELEM_SIZE 4
/* ILT constants */
/* If for some reason, HW P size is modified to be less than 32K,
* special handling needs to be made for CDU initialization
*/
#ifdef CONFIG_ECORE_ROCE
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. Can be
* optimized with resource management scheme
*/
#define ILT_DEFAULT_HW_P_SIZE 4
#else
#define ILT_DEFAULT_HW_P_SIZE 3
#endif
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
@ -97,22 +87,22 @@ __FBSDID("$FreeBSD$");
/* connection context union */
union conn_context {
struct core_conn_context core_ctx;
struct eth_conn_context eth_ctx;
struct iscsi_conn_context iscsi_ctx;
struct fcoe_conn_context fcoe_ctx;
struct roce_conn_context roce_ctx;
struct e4_core_conn_context core_ctx;
struct e4_eth_conn_context eth_ctx;
struct e4_iscsi_conn_context iscsi_ctx;
struct e4_fcoe_conn_context fcoe_ctx;
struct e4_roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
struct iscsi_task_context iscsi_ctx;
struct fcoe_task_context fcoe_ctx;
struct e4_iscsi_task_context iscsi_ctx;
struct e4_fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
struct rdma_task_context roce_ctx;
struct e4_rdma_task_context roce_ctx;
};
struct src_ent {
@ -274,12 +264,10 @@ struct ecore_cxt_mngr {
};
/* check if resources/configuration is required according to protocol type */
static bool src_proto(struct ecore_hwfn *p_hwfn,
enum protocol_type type)
static bool src_proto(enum protocol_type type)
{
return type == PROTOCOLID_ISCSI ||
type == PROTOCOLID_FCOE ||
type == PROTOCOLID_TOE ||
type == PROTOCOLID_IWARP;
}
@ -319,14 +307,13 @@ struct ecore_src_iids {
u32 per_vf_cids;
};
static void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_mngr *p_mngr,
static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
struct ecore_src_iids *iids)
{
u32 i;
for (i = 0; i < MAX_CONN_TYPES; i++) {
if (!src_proto(p_hwfn, i))
if (!src_proto(i))
continue;
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
@ -346,8 +333,7 @@ struct ecore_tm_iids {
u32 per_vf_tids;
};
static void ecore_cxt_tm_iids(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_mngr *p_mngr,
static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
struct ecore_tm_iids *iids)
{
bool tm_vf_required = false;
@ -454,6 +440,20 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
return p_mgr->srq_count;
}
/* set the iids (cid/tid) count per protocol */
static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
@ -779,7 +779,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
ecore_cxt_qm_iids(p_hwfn, &qm_iids);
total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
total = ecore_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
@ -797,7 +797,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
/* SRC */
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
ecore_cxt_src_iids(p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
* database. Thus sum the PF searcher cids and all the VFs searcher
@ -822,7 +822,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
/* TM PF */
p_cli = ecore_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
ecore_cxt_tm_iids(p_mngr, &tm_iids);
total = tm_iids.pf_cids + tm_iids.pf_tids_total;
if (total) {
p_blk = ecore_cxt_set_blk(&p_cli->pf_blks[0]);
@ -952,7 +952,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
if (!p_src->active)
return ECORE_SUCCESS;
ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
ecore_cxt_src_iids(p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
total_size = conn_num * sizeof(struct src_ent);
@ -1287,7 +1287,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 32K */
/* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
@ -1299,7 +1299,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
/* Initialize the dynamic ILT allocation mutex */
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
/* Set the cxt mangr pointer priori to further allocations */
@ -1347,7 +1349,9 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
p_hwfn->p_cxt_mngr = OSAL_NULL;
@ -1555,7 +1559,7 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
}
}
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct ecore_qm_iids iids;
@ -1563,9 +1567,8 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
p_hwfn->first_on_engine,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
@ -1749,7 +1752,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
if (p_shdw[line].p_virt != OSAL_NULL) {
SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
(p_shdw[line].p_phys >> 12));
(unsigned long long)(p_shdw[line].p_phys >> 12));
DP_VERBOSE(
p_hwfn, ECORE_MSG_ILT,
@ -1771,7 +1774,7 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
struct ecore_src_iids src_iids;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
ecore_cxt_src_iids(p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
if (!conn_num)
return;
@ -1817,7 +1820,7 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
u8 i;
OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
ecore_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
ecore_cxt_tm_iids(p_mngr, &tm_iids);
/* @@@TBD No pre-scan for now */
@ -1908,9 +1911,11 @@ static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_conn_type_cfg *p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
struct ecore_conn_type_cfg *p_fcoe;
struct ecore_tid_seg *p_tid;
p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
/* If FCoE is active set the MAX OX_ID (tid) in the Parser */
if (!p_fcoe->cid_count)
return;
@ -1934,9 +1939,9 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
ecore_prs_init_common(p_hwfn);
}
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
ecore_qm_init_pf(p_hwfn);
ecore_qm_init_pf(p_hwfn, p_ptt);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
@ -2119,20 +2124,6 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
return p_mgr->srq_count;
}
static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
struct ecore_rdma_pf_params *p_params,
u32 num_tasks)
@ -2143,7 +2134,7 @@ static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
/* Override personality with rdma flavor */
num_srqs = OSAL_MIN_T(u32, ECORE_RDMA_MAX_SRQS, p_params->num_srqs);
/* The only case RDMA personality can be overridden is if NVRAM is
/* The only case RDMA personality can be overriden is if NVRAM is
* configured with ETH_RDMA or if no rdma protocol was requested
*/
switch (p_params->rdma_protocol) {
@ -2170,8 +2161,12 @@ static void ecore_rdma_set_pf_params(struct ecore_hwfn *p_hwfn,
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH_IWARP:
num_qps = OSAL_MIN_T(u32, IWARP_MAX_QPS, p_params->num_qps);
num_cons = num_qps;
/* Each QP requires one connection */
num_cons = OSAL_MIN_T(u32, IWARP_MAX_QPS, p_params->num_qps);
#ifdef CONFIG_ECORE_IWARP /* required for the define */
/* additional connections required for passive tcp handling */
num_cons += ECORE_IWARP_PREALLOC_CNT;
#endif
proto = PROTOCOLID_IWARP;
p_params->roce_edpm_mode = false;
break;
@ -2576,14 +2571,14 @@ enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
u8 ctx_type,
void **pp_task_ctx)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_seg;
struct ecore_tid_seg *p_seg_info;
u32 proto, seg;
u32 total_lines;
u32 tid_size, ilt_idx;
u32 num_tids_per_block;
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_tid_seg *p_seg_info;
struct ecore_ilt_cli_blk *p_seg;
u32 num_tids_per_block;
u32 tid_size, ilt_idx;
u32 total_lines;
u32 proto, seg;
/* Verify the personality */
switch (p_hwfn->hw_info.personality) {

View File

@ -28,7 +28,6 @@
*
*/
#ifndef _ECORE_CID_
#define _ECORE_CID_
@ -130,15 +129,17 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
* @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,6 @@
*
*/
#ifndef _DBG_FW_FUNCS_H
#define _DBG_FW_FUNCS_H
/**************************** Public Functions *******************************/
@ -179,7 +178,6 @@ enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
* unit is 2 dwords (64 bits).
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param block - block to be enabled.
* @param line_num - debug line number to select.
* @param cycle_en - 4-bit value. If bit i is set, unit i is enabled.
@ -200,13 +198,12 @@ enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum block_id block,
u8 line_num,
u8 cycle_en,
u8 right_shift,
u8 force_valid,
u8 force_frame);
enum block_id block,
u8 line_num,
u8 cycle_en,
u8 right_shift,
u8 force_valid,
u8 force_frame);
/**
* @brief ecore_dbg_bus_enable_storm - Enables recording of the specified Storm

File diff suppressed because it is too large Load Diff

View File

@ -31,7 +31,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_sp_commands.h"
@ -61,13 +60,13 @@ __FBSDID("$FreeBSD$");
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_ETHTYPE);
}
static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
{
u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
@ -78,13 +77,13 @@ static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
static bool ecore_dcbx_app_port(u32 app_info_bitmap)
{
return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_PORT);
}
static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
{
u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
@ -240,7 +239,6 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
{
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
char *name;
int i;
for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
@ -250,7 +248,6 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
continue;
personality = ecore_dcbx_app_update[i].personality;
name = ecore_dcbx_app_update[i].name;
ecore_dcbx_set_params(p_data, p_hwfn, enable,
prio, tc, type, personality);
@ -338,10 +335,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PROTOCOL_ID);
priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PRI_MAP);
protocol_id = GET_MFW_FIELD(p_tbl[i].entry,
DCBX_APP_PROTOCOL_ID);
priority_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
protocol_id, priority_map);
rc = ecore_dcbx_get_app_priority(priority_map, &priority);
@ -394,17 +390,17 @@ static enum _ecore_status_t
ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
{
struct dcbx_app_priority_feature *p_app;
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_results data = { 0 };
struct dcbx_app_priority_entry *p_tbl;
struct ecore_dcbx_results data = { 0 };
struct dcbx_ets_feature *p_ets;
struct ecore_hw_info *p_info;
u32 pri_tc_tbl, flags;
u8 dcbx_version;
int num_entries;
enum _ecore_status_t rc = ECORE_SUCCESS;
flags = p_hwfn->p_dcbx_info->operational.flags;
dcbx_version = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
dcbx_version = GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
@ -413,15 +409,15 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
pri_tc_tbl = p_ets->pri_tc_tbl[0];
p_info = &p_hwfn->hw_info;
num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc != ECORE_SUCCESS)
return rc;
p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
p_hwfn->qm_info.ooo_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
p_info->num_active_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
p_hwfn->qm_info.ooo_tc = GET_MFW_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@ -439,9 +435,9 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_mib_meta_data *p_data,
enum ecore_mib_read_type type)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 prefix_seq_num, suffix_seq_num;
int read_count = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* The data is considered to be valid only if both sequence numbers are
* the same.
@ -526,26 +522,24 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
u8 pri_map;
int i;
p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags,
DCBX_APP_WILLING);
p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags,
DCBX_APP_ENABLED);
p_params->app_error = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
DCBX_APP_NUM_ENTRIES);
p_params->app_willing = GET_MFW_FIELD(p_app->flags, DCBX_APP_WILLING);
p_params->app_valid = GET_MFW_FIELD(p_app->flags, DCBX_APP_ENABLED);
p_params->app_error = GET_MFW_FIELD(p_app->flags, DCBX_APP_ERROR);
p_params->num_app_entries = GET_MFW_FIELD(p_app->flags,
DCBX_APP_NUM_ENTRIES);
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_params->app_entry[i];
if (ieee) {
u8 sf_ieee;
u32 val;
sf_ieee = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_SF_IEEE);
sf_ieee = GET_MFW_FIELD(p_tbl[i].entry,
DCBX_APP_SF_IEEE);
switch (sf_ieee) {
case DCBX_APP_SF_IEEE_RESERVED:
/* Old MFW */
val = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_SF);
val = GET_MFW_FIELD(p_tbl[i].entry,
DCBX_APP_SF);
entry->sf_ieee = val ?
ECORE_DCBX_SF_IEEE_TCP_UDP_PORT :
ECORE_DCBX_SF_IEEE_ETHTYPE;
@ -564,14 +558,14 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
break;
}
} else {
entry->ethtype = !(ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_SF));
entry->ethtype = !(GET_MFW_FIELD(p_tbl[i].entry,
DCBX_APP_SF));
}
pri_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
pri_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
ecore_dcbx_get_app_priority(pri_map, &entry->prio);
entry->proto_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PROTOCOL_ID);
entry->proto_id = GET_MFW_FIELD(p_tbl[i].entry,
DCBX_APP_PROTOCOL_ID);
ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
entry->proto_id,
&entry->proto_type, ieee);
@ -589,10 +583,10 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
{
u8 pfc_map;
p_params->pfc.willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
p_params->pfc.max_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
p_params->pfc.enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
pfc_map = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
p_params->pfc.willing = GET_MFW_FIELD(pfc, DCBX_PFC_WILLING);
p_params->pfc.max_tc = GET_MFW_FIELD(pfc, DCBX_PFC_CAPS);
p_params->pfc.enabled = GET_MFW_FIELD(pfc, DCBX_PFC_ENABLED);
pfc_map = GET_MFW_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
@ -616,13 +610,10 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
u32 bw_map[2], tsa_map[2], pri_map;
int i;
p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_WILLING);
p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_ENABLED);
p_params->ets_cbs = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
p_params->ets_willing = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING);
p_params->ets_enabled = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED);
p_params->ets_cbs = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS);
p_params->max_ets_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
p_params->ets_willing, p_params->ets_enabled,
@ -669,7 +660,6 @@ ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
static void
ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
@ -683,7 +673,6 @@ ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
static void
ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
@ -698,7 +687,6 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t
ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_operational_params *p_operational;
@ -714,7 +702,7 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
* was successfuly performed
*/
p_operational = &params->operational;
enabled = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
enabled = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) !=
DCBX_CONFIG_VERSION_DISABLED);
if (!enabled) {
p_operational->enabled = enabled;
@ -726,15 +714,15 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
p_feat = &p_hwfn->p_dcbx_info->operational.features;
p_results = &p_hwfn->p_dcbx_info->results;
val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_IEEE);
p_operational->ieee = val;
val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_CEE);
p_operational->cee = val;
val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_STATIC);
p_operational->local = val;
@ -749,7 +737,7 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
p_operational->ieee);
ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
p_results);
err = ECORE_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
err = GET_MFW_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
p_operational->enabled = enabled;
p_operational->valid = true;
@ -757,10 +745,8 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
static void
ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_dscp_params *p_dscp;
struct dcb_dscp_map *p_dscp_map;
@ -769,8 +755,8 @@ ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
p_dscp = &params->dscp;
p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
p_dscp->enabled = ECORE_MFW_GET_FIELD(p_dscp_map->flags,
DCB_DSCP_ENABLE);
p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
/* MFW encodes 64 dscp entries into 8 element array of u32 entries,
* where each entry holds the 4bit priority map for 8 dscp entries.
*/
@ -784,10 +770,8 @@ ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
}
}
static void
ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct lldp_config_params_s *p_local;
@ -800,10 +784,8 @@ ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
OSAL_ARRAY_SIZE(p_local->local_port_id));
}
static void
ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct lldp_status_params_s *p_remote;
@ -817,34 +799,32 @@ ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *p_params,
enum ecore_mib_read_type type)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
switch (type) {
case ECORE_DCBX_REMOTE_MIB:
ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
ecore_dcbx_get_remote_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_MIB:
ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
ecore_dcbx_get_local_params(p_hwfn, p_params);
break;
case ECORE_DCBX_OPERATIONAL_MIB:
ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
ecore_dcbx_get_operational_params(p_hwfn, p_params);
break;
case ECORE_DCBX_REMOTE_LLDP_MIB:
ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
ecore_dcbx_get_remote_lldp_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_LLDP_MIB:
ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
ecore_dcbx_get_local_lldp_params(p_hwfn, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
return ECORE_INVAL;
}
return rc;
return ECORE_SUCCESS;
}
static enum _ecore_status_t
@ -993,25 +973,17 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
ecore_dcbx_get_dscp_params(p_hwfn, p_ptt,
&p_hwfn->p_dcbx_info->get);
ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
rc = ecore_dcbx_process_mib_info(p_hwfn);
if (!rc) {
bool enabled;
/* reconfigure tcs of QM queues according
* to negotiation results
*/
ecore_qm_reconf(p_hwfn, p_ptt);
/* update storm FW with negotiation results */
ecore_sp_pf_update(p_hwfn);
/* set eagle enigne 1 flow control workaround
* according to negotiation results
*/
enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
ecore_sp_pf_update_dcbx(p_hwfn);
#ifdef CONFIG_ECORE_ROCE
/* for roce PFs, we may want to enable/disable DPM
@ -1023,7 +995,7 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
}
ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
struct ecore_dcbx_results *p_data;
@ -1153,7 +1125,7 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
goto out;
rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
out:
ecore_ptt_release(p_hwfn, p_ptt);
@ -1180,13 +1152,13 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
*pfc &= ~DCBX_PFC_ENABLED_MASK;
*pfc &= ~DCBX_PFC_CAPS_MASK;
*pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
*pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET;
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
if (p_params->pfc.prio[i])
pfc_map |= (1 << i);
*pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
}
@ -1216,7 +1188,7 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET;
bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
@ -1262,7 +1234,7 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
p_app->flags |= (u32)p_params->num_app_entries <<
DCBX_APP_NUM_ENTRIES_SHIFT;
DCBX_APP_NUM_ENTRIES_OFFSET;
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
@ -1272,44 +1244,44 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
switch (p_params->app_entry[i].sf_ieee) {
case ECORE_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
DCBX_APP_SF_IEEE_SHIFT);
DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
DCBX_APP_SF_SHIFT);
DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT;
DCBX_APP_SF_IEEE_OFFSET;
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
DCBX_APP_SF_OFFSET);
break;
}
} else {
*entry &= ~DCBX_APP_SF_MASK;
if (p_params->app_entry[i].ethtype)
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
DCBX_APP_SF_SHIFT);
DCBX_APP_SF_OFFSET);
else
*entry |= ((u32)DCBX_APP_SF_PORT <<
DCBX_APP_SF_SHIFT);
DCBX_APP_SF_OFFSET);
}
*entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
*entry |= ((u32)p_params->app_entry[i].proto_id <<
DCBX_APP_PROTOCOL_ID_SHIFT);
DCBX_APP_PROTOCOL_ID_OFFSET);
*entry &= ~DCBX_APP_PRI_MAP_MASK;
*entry |= ((u32)(p_params->app_entry[i].prio) <<
DCBX_APP_PRI_MAP_SHIFT);
DCBX_APP_PRI_MAP_OFFSET);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
@ -1422,12 +1394,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
if (rc != ECORE_SUCCESS) {
1 << DRV_MB_PARAM_LLDP_SEND_OFFSET, &resp, &param);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false,
"Failed to send DCBX update request\n");
return rc;
}
return rc;
}

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_DCBX_H__
#define __ECORE_DCBX_H__

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_DEV_API_H__
#define __ECORE_DEV_API_H__
@ -171,8 +170,9 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_hibernate_prepare -should be called when
@ -197,10 +197,11 @@ void ecore_hw_hibernate_resume(struct ecore_dev *p_dev);
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
* @param p_dev
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum ecore_hw_prepare_result {
ECORE_HW_PREPARE_SUCCESS,
@ -270,7 +271,6 @@ void ecore_hw_remove(struct ecore_dev *p_dev);
* @brief ecore_set_nwuf_reg -
*
* @param p_dev
* @param wol_flag - wol_capability
* @param reg_idx - Index of the pattern register
* @param pattern_size - size of pattern
* @param crc - CRC value of patter & mask
@ -278,30 +278,31 @@ void ecore_hw_remove(struct ecore_dev *p_dev);
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev,
const bool b_enable,
u32 reg_idx,
u32 pattern_size,
u32 crc);
u32 reg_idx, u32 pattern_size, u32 crc);
/**
* @brief ecore_get_wake_info - get magic packet buffer
*
* @param p_dev
* @param p_hwfn
* @param p_ppt
* @param wake_info - pointer to ecore_wake_info buffer
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev,
struct ecore_wake_info *wake_info);
enum _ecore_status_t ecore_get_wake_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_wake_info *wake_info);
/**
* @brief ecore_wol_buffer_clear - Clear magic package buffer
*
* @param p_dev
* @param p_hwfn
* @param p_ptt
*
* @return void
*/
void ecore_wol_buffer_clear(struct ecore_dev *p_dev);
void ecore_wol_buffer_clear(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_acquire - Allocate a PTT window
@ -328,6 +329,7 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#ifndef __EXTRACT__LINUX__
struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
@ -418,6 +420,7 @@ struct ecore_eth_stats {
struct ecore_eth_stats_ah ah;
};
};
#endif
enum ecore_dmae_address_type_t {
ECORE_DMAE_ADDRESS_HOST_VIRT,
@ -482,7 +485,7 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
/**
* @brief ecore_dmae_host2host - copy data from to source address
* to a destination address (for SRIOV) using the given ptt
* to a destination adress (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
@ -710,4 +713,44 @@ ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 enable);
#ifndef __EXTRACT__LINUX__
enum ecore_db_rec_width {
DB_REC_WIDTH_32B,
DB_REC_WIDTH_64B,
};
enum ecore_db_rec_space {
DB_REC_KERNEL,
DB_REC_USER,
};
#endif
/**
* @brief db_recovery_add - add doorbell information to the doorbell
* recovery mechanism.
*
* @param p_dev
* @param db_addr - doorbell address
* @param db_data - address of where db_data is stored
* @param db_width - doorbell is 32b pr 64b
* @param db_space - doorbell recovery addresses are user or kernel space
*/
enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
void OSAL_IOMEM *db_addr,
void *db_data,
enum ecore_db_rec_width db_width,
enum ecore_db_rec_space db_space);
/**
* @brief db_recovery_del - remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
* @param cdev
* @param db_addr - doorbell address
* @param db_data - address where db_data is stored. Serves as key for the
* entry to delete.
*/
enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
void OSAL_IOMEM *db_addr,
void *db_data);
#endif

View File

@ -129,6 +129,7 @@ ecore_sp_fcoe_func_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_sp_fcoe_func_stop(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);

View File

@ -28,7 +28,6 @@
*
*/
#ifndef GTT_REG_ADDR_H
#define GTT_REG_ADDR_H

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_HSI_DEBUG_TOOLS__
#define __ECORE_HSI_DEBUG_TOOLS__
/****************************************/
@ -91,8 +90,8 @@ enum block_addr
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
GRCBASE_PTLD = 0x590000,
GRCBASE_YPLD = 0x5b0000,
GRCBASE_PTLD = 0x5a0000,
GRCBASE_YPLD = 0x5c0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_HSI_FCOE__
#define __ECORE_HSI_FCOE__
/****************************************/
@ -763,7 +762,7 @@ struct mstorm_fcoe_conn_st_ctx
/*
* fcoe connection context
*/
struct fcoe_conn_context
struct e4_fcoe_conn_context
{
struct ystorm_fcoe_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context /* pstorm storm context */;
@ -783,360 +782,6 @@ struct fcoe_conn_context
};
/*
* FCoE connection offload params passed by driver to FW in FCoE offload ramrod
*/
struct fcoe_conn_offload_ramrod_params
{
struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
};
/*
* FCoE connection terminate params passed by driver to FW in FCoE terminate conn ramrod
*/
struct fcoe_conn_terminate_ramrod_params
{
struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
};
/*
* FCoE event type
*/
enum fcoe_event_type
{
FCOE_EVENT_INIT_FUNC /* Slowpath completion on INIT_FUNC ramrod */,
FCOE_EVENT_DESTROY_FUNC /* Slowpath completion on DESTROY_FUNC ramrod */,
FCOE_EVENT_STAT_FUNC /* Slowpath completion on STAT_FUNC ramrod */,
FCOE_EVENT_OFFLOAD_CONN /* Slowpath completion on OFFLOAD_CONN ramrod */,
FCOE_EVENT_TERMINATE_CONN /* Slowpath completion on TERMINATE_CONN ramrod */,
FCOE_EVENT_ERROR /* Error event */,
MAX_FCOE_EVENT_TYPE
};
/*
* FCoE init params passed by driver to FW in FCoE init ramrod
*/
struct fcoe_init_ramrod_params
{
struct fcoe_init_func_ramrod_data init_ramrod_data;
};
/*
* FCoE ramrod Command IDs
*/
enum fcoe_ramrod_cmd_id
{
FCOE_RAMROD_CMD_ID_INIT_FUNC /* FCoE function init ramrod */,
FCOE_RAMROD_CMD_ID_DESTROY_FUNC /* FCoE function destroy ramrod */,
FCOE_RAMROD_CMD_ID_STAT_FUNC /* FCoE statistics ramrod */,
FCOE_RAMROD_CMD_ID_OFFLOAD_CONN /* FCoE connection offload ramrod */,
FCOE_RAMROD_CMD_ID_TERMINATE_CONN /* FCoE connection offload ramrod. Command ID known only to FW and VBD */,
MAX_FCOE_RAMROD_CMD_ID
};
/*
* FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod
*/
struct fcoe_stat_ramrod_params
{
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
struct e4_ystorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le16 word1 /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
};
struct e5_mstorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
u8 flags0;
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0 /* word0 */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
};
struct e5_tstorm_fcoe_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 /* timer0cf */
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags6;
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 byte2 /* byte2 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
};
struct e5_ustorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
u8 flags0;
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags4;
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit2 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit3 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf7 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf8 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf7en */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* cf8en */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
u8 byte2 /* byte2 */;
__le16 word0 /* conn_dpi */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
};
struct e5_xstorm_fcoe_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
@ -1391,6 +1036,383 @@ struct e5_xstorm_fcoe_conn_ag_ctx
__le32 reg7 /* reg7 */;
};
struct e5_tstorm_fcoe_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 /* timer0cf */
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags6;
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_FCOE_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 byte2 /* byte2 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
};
struct e5_ustorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
u8 flags0;
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags4;
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit2 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit3 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf7 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf8 */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf7en */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* cf8en */
#define E5_USTORM_FCOE_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
u8 byte2 /* byte2 */;
__le16 word0 /* conn_dpi */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
};
struct e5_mstorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
u8 flags0;
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0 /* word0 */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
};
/*
* fcoe connection context
*/
struct e5_fcoe_conn_context
{
struct ystorm_fcoe_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context /* pstorm storm context */;
struct regpair pstorm_st_padding[2] /* padding */;
struct xstorm_fcoe_conn_st_ctx xstorm_st_context /* xstorm storm context */;
struct e5_xstorm_fcoe_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
struct regpair xstorm_ag_padding[6] /* padding */;
struct ustorm_fcoe_conn_st_ctx ustorm_st_context /* ustorm storm context */;
struct regpair ustorm_st_padding[2] /* padding */;
struct e5_tstorm_fcoe_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct regpair tstorm_ag_padding[2] /* padding */;
struct timers_context timer_context /* timer context */;
struct e5_ustorm_fcoe_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context /* tstorm storm context */;
struct e5_mstorm_fcoe_conn_ag_ctx mstorm_ag_context /* mstorm aggregative context */;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context /* mstorm storm context */;
};
/*
* FCoE connection offload params passed by driver to FW in FCoE offload ramrod
*/
struct fcoe_conn_offload_ramrod_params
{
struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
};
/*
* FCoE connection terminate params passed by driver to FW in FCoE terminate conn ramrod
*/
struct fcoe_conn_terminate_ramrod_params
{
struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
};
/*
* FCoE event type
*/
enum fcoe_event_type
{
FCOE_EVENT_INIT_FUNC /* Slowpath completion on INIT_FUNC ramrod */,
FCOE_EVENT_DESTROY_FUNC /* Slowpath completion on DESTROY_FUNC ramrod */,
FCOE_EVENT_STAT_FUNC /* Slowpath completion on STAT_FUNC ramrod */,
FCOE_EVENT_OFFLOAD_CONN /* Slowpath completion on OFFLOAD_CONN ramrod */,
FCOE_EVENT_TERMINATE_CONN /* Slowpath completion on TERMINATE_CONN ramrod */,
FCOE_EVENT_ERROR /* Error event */,
MAX_FCOE_EVENT_TYPE
};
/*
* FCoE init params passed by driver to FW in FCoE init ramrod
*/
struct fcoe_init_ramrod_params
{
struct fcoe_init_func_ramrod_data init_ramrod_data;
};
/*
* FCoE ramrod Command IDs
*/
enum fcoe_ramrod_cmd_id
{
FCOE_RAMROD_CMD_ID_INIT_FUNC /* FCoE function init ramrod */,
FCOE_RAMROD_CMD_ID_DESTROY_FUNC /* FCoE function destroy ramrod */,
FCOE_RAMROD_CMD_ID_STAT_FUNC /* FCoE statistics ramrod */,
FCOE_RAMROD_CMD_ID_OFFLOAD_CONN /* FCoE connection offload ramrod */,
FCOE_RAMROD_CMD_ID_TERMINATE_CONN /* FCoE connection offload ramrod. Command ID known only to FW and VBD */,
MAX_FCOE_RAMROD_CMD_ID
};
/*
* FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod
*/
struct fcoe_stat_ramrod_params
{
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
struct e4_ystorm_fcoe_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le16 word1 /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
};
struct e5_ystorm_fcoe_conn_ag_ctx
{

View File

@ -559,7 +559,7 @@ struct ustorm_iscsi_conn_st_ctx
/*
* iscsi connection context
*/
struct iscsi_conn_context
struct e4_iscsi_conn_context
{
struct ystorm_iscsi_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
@ -582,307 +582,6 @@ struct iscsi_conn_context
};
/*
* iSCSI init params passed by driver to FW in iSCSI init ramrod
*/
struct iscsi_init_ramrod_params
{
struct iscsi_spe_func_init iscsi_init_spe /* parameters initialized by the miniport and handed to bus-driver */;
struct tcp_init_params tcp_init /* TCP parameters initialized by the bus-driver */;
};
struct e4_ystorm_iscsi_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le16 word1 /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
};
struct e5_mstorm_iscsi_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0 /* word0 */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
};
struct e5_tstorm_iscsi_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 /* timer2cf */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf9 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags6;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 cid_offload_cnt /* byte2 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
__le32 reg6 /* reg6 */;
__le32 reg7 /* reg7 */;
__le32 reg8 /* reg8 */;
};
struct e5_ustorm_iscsi_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
u8 flags0;
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags4;
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit2 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit3 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf7 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf8 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf7en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* cf8en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
u8 byte2 /* byte2 */;
__le16 word0 /* conn_dpi */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
};
struct e5_xstorm_iscsi_conn_ag_ctx
{
u8 cdu_validation /* cdu_validation */;
@ -1155,6 +854,333 @@ struct e5_xstorm_iscsi_conn_ag_ctx
__le32 reg17 /* reg17 */;
};
struct e5_tstorm_iscsi_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 /* timer2cf */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf9 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags6;
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_ISCSI_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 cid_offload_cnt /* byte2 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
__le32 reg6 /* reg6 */;
__le32 reg7 /* reg7 */;
__le32 reg8 /* reg8 */;
};
struct e5_ustorm_iscsi_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
u8 flags0;
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags4;
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit2 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit3 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf7 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf8 */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED4_SHIFT 4
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf7en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* cf8en */
#define E5_USTORM_ISCSI_CONN_AG_CTX_E4_RESERVED6_SHIFT 7
u8 byte2 /* byte2 */;
__le16 word0 /* conn_dpi */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
};
struct e5_mstorm_iscsi_conn_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0 /* word0 */;
__le16 word1 /* word1 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
};
/*
* iscsi connection context
*/
struct e5_iscsi_conn_context
{
struct ystorm_iscsi_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context /* pstorm storm context */;
struct regpair pstorm_st_padding[2] /* padding */;
struct pb_context xpb2_context /* xpb2 context */;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context /* xstorm storm context */;
struct regpair xstorm_st_padding[2] /* padding */;
struct e5_xstorm_iscsi_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
struct e5_tstorm_iscsi_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct regpair tstorm_ag_padding[2] /* padding */;
struct timers_context timer_context /* timer context */;
struct e5_ustorm_iscsi_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
struct pb_context upb_context /* upb context */;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context /* tstorm storm context */;
struct regpair tstorm_st_padding[2] /* padding */;
struct e5_mstorm_iscsi_conn_ag_ctx mstorm_ag_context /* mstorm aggregative context */;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context /* mstorm storm context */;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context /* ustorm storm context */;
};
/*
* iSCSI init params passed by driver to FW in iSCSI init ramrod
*/
struct iscsi_init_ramrod_params
{
struct iscsi_spe_func_init iscsi_init_spe /* parameters initialized by the miniport and handed to bus-driver */;
struct tcp_init_params tcp_init /* TCP parameters initialized by the bus-driver */;
};
struct e4_ystorm_iscsi_conn_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le16 word1 /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le32 reg2 /* reg2 */;
__le32 reg3 /* reg3 */;
};
struct e5_ystorm_iscsi_conn_ag_ctx
{

View File

@ -458,7 +458,7 @@ struct ustorm_iwarp_conn_st_ctx
/*
* iwarp connection context
*/
struct iwarp_conn_context
struct e4_iwarp_conn_context
{
struct ystorm_iwarp_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
@ -477,6 +477,419 @@ struct iwarp_conn_context
};
struct e5_xstorm_iwarp_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 /* exist_in_qm1 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED1_MASK 0x1 /* exist_in_qm2 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED1_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 /* exist_in_qm3 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 /* cf_array_active */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 /* bit6 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 /* bit7 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 /* bit8 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 /* bit9 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 /* bit10 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 /* bit14 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 /* bit15 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 /* cf14 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 /* cf16 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 /* cf_array_cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 /* cf18 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 /* cf19 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 /* cf21 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 /* cf14en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 /* cf16en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 /* cf_array_cf_en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 /* cf18en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 /* cf19en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 /* cf21en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 /* rule1en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
#define E5_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 /* rule2en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 /* rule4en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 /* rule10en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 /* rule14en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
#define E5_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 /* rule18en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 /* rule19en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 /* rule20en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 /* rule21en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 /* rule23en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 /* bit18 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT20_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RDMA_EDPM_ENABLE_MASK 0x1 /* bit21 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RDMA_EDPM_ENABLE_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
__le16 physical_q1 /* physical_q1 */;
__le16 sq_comp_cons /* physical_q2 */;
__le16 sq_tx_cons /* word3 */;
__le16 sq_prod /* word4 */;
__le16 word5 /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
u8 byte5 /* byte5 */;
u8 byte6 /* byte6 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 more_to_send_seq /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 rewinded_snd_max /* cf_array0 */;
__le32 rd_msn /* cf_array1 */;
u8 flags15;
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit22 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit23 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit24 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf24 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf24en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule26en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule27en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 byte7 /* byte7 */;
__le16 irq_prod_via_msdm /* word7 */;
__le16 irq_cons /* word8 */;
__le16 hq_cons_th_or_mpa_data /* word9 */;
__le16 hq_cons /* word10 */;
__le16 tx_rdma_edpm_usg_cnt /* word11 */;
__le32 atom_msn /* reg7 */;
__le32 orq_cons /* reg8 */;
__le32 orq_cons_th /* reg9 */;
u8 max_ord /* byte8 */;
u8 wqe_data_pad_bytes /* byte9 */;
u8 former_hq_prod /* byte10 */;
u8 irq_prod_via_msem /* byte11 */;
u8 byte12 /* byte12 */;
u8 max_pkt_pdu_size_lo /* byte13 */;
u8 max_pkt_pdu_size_hi /* byte14 */;
u8 byte15 /* byte15 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
__le32 reg12 /* reg12 */;
__le32 shared_queue_page_addr_lo /* reg13 */;
__le32 shared_queue_page_addr_hi /* reg14 */;
__le32 reg15 /* reg15 */;
__le32 reg16 /* reg16 */;
__le32 reg17 /* reg17 */;
};
struct e5_tstorm_iwarp_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 /* bit3 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 /* bit5 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 /* timer2cf */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf9 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 /* rule6en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags6;
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 orq_cache_idx /* byte2 */;
__le16 sq_tx_cons_th /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 unaligned_nxt_seq /* reg2 */;
__le32 reg3 /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
__le32 reg6 /* reg6 */;
__le32 reg7 /* reg7 */;
__le32 reg8 /* reg8 */;
u8 hq_prod /* byte3 */;
u8 orq_prod /* byte4 */;
u8 irq_cons /* byte5 */;
u8 e4_reserved8 /* byte6 */;
__le16 sq_tx_cons /* word1 */;
__le16 conn_dpi /* conn_dpi */;
__le32 snd_seq /* reg9 */;
__le16 rq_prod /* word3 */;
__le16 e4_reserved9 /* word4 */;
};
/*
* iwarp connection context
*/
struct e5_iwarp_conn_context
{
struct ystorm_iwarp_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
struct pstorm_iwarp_conn_st_ctx pstorm_st_context /* pstorm storm context */;
struct regpair pstorm_st_padding[2] /* padding */;
struct xstorm_iwarp_conn_st_ctx xstorm_st_context /* xstorm storm context */;
struct regpair xstorm_st_padding[2] /* padding */;
struct e5_xstorm_iwarp_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
struct e5_tstorm_iwarp_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct timers_context timer_context /* timer context */;
struct e5_ustorm_rdma_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context /* tstorm storm context */;
struct regpair tstorm_st_padding[2] /* padding */;
struct mstorm_iwarp_conn_st_ctx mstorm_st_context /* mstorm storm context */;
struct ustorm_iwarp_conn_st_ctx ustorm_st_context /* ustorm storm context */;
};
/*
* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod
*/
@ -701,7 +1114,7 @@ struct iwarp_mpa_offload_ramrod_data
struct mpa_ulp_buffer incoming_ulp_buffer /* host buffer for placing the incoming MPA reply */;
struct regpair async_eqe_output_buf /* host buffer for async tcp/mpa completion information - must have space for at least 8 bytes */;
struct regpair handle_for_async /* a host cookie that will be echoed back with in every qp-specific async EQE */;
struct regpair shared_queue_addr /* Address of shared queue address that consist of SQ/RQ and FW internal queues (IRQ/ORQ/HQ) */;
struct regpair shared_queue_addr /* Address of shared queue adress that consist of SQ/RQ and FW internal queues (IRQ/ORQ/HQ) */;
u8 stats_counter_id /* Statistics counter ID to use */;
u8 reserved3[15];
};
@ -1033,127 +1446,6 @@ struct e5_mstorm_iwarp_conn_ag_ctx
};
struct e5_tstorm_iwarp_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 /* bit3 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 /* bit5 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 /* timer1cf */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 /* timer2cf */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf9 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 /* cf1en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 /* cf2en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf9en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
#define E5_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 /* rule6en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 flags6;
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit6 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit7 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit8 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf11 */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf11en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule9en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule10en */
#define E5_TSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 orq_cache_idx /* byte2 */;
__le16 sq_tx_cons_th /* word0 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 unaligned_nxt_seq /* reg2 */;
__le32 reg3 /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
__le32 reg6 /* reg6 */;
__le32 reg7 /* reg7 */;
__le32 reg8 /* reg8 */;
u8 hq_prod /* byte3 */;
u8 orq_prod /* byte4 */;
u8 irq_cons /* byte5 */;
u8 e4_reserved8 /* byte6 */;
__le16 sq_tx_cons /* word1 */;
__le16 conn_dpi /* conn_dpi */;
__le32 snd_seq /* reg9 */;
__le16 rq_prod /* word3 */;
__le16 e4_reserved9 /* word4 */;
};
struct e5_ustorm_iwarp_conn_ag_ctx
{
@ -1238,276 +1530,6 @@ struct e5_ustorm_iwarp_conn_ag_ctx
};
struct e5_xstorm_iwarp_conn_ag_ctx
{
u8 reserved0 /* cdu_validation */;
u8 state_and_core_id /* state_and_core_id */;
u8 flags0;
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 /* exist_in_qm1 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED1_MASK 0x1 /* exist_in_qm2 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED1_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 /* exist_in_qm3 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 /* cf_array_active */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 /* bit6 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 /* bit7 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 /* bit8 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 /* bit9 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 /* bit10 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 /* bit14 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 /* bit15 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 /* timer_stop_all */
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 /* cf14 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 /* cf16 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 /* cf_array_cf */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 /* cf18 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 /* cf19 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 /* cf21 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 /* cf3en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 /* cf14en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 /* cf16en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 /* cf_array_cf_en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 /* cf18en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 /* cf19en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 /* cf21en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 /* rule1en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
#define E5_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 /* rule2en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 /* rule4en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 /* rule10en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 /* rule14en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
#define E5_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 /* rule18en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 /* rule19en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 /* rule20en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 /* rule21en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 /* rule23en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 /* bit18 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_BIT20_SHIFT 4
#define E5_XSTORM_IWARP_CONN_AG_CTX_RDMA_EDPM_ENABLE_MASK 0x1 /* bit21 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_RDMA_EDPM_ENABLE_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
__le16 physical_q1 /* physical_q1 */;
__le16 sq_comp_cons /* physical_q2 */;
__le16 sq_tx_cons /* word3 */;
__le16 sq_prod /* word4 */;
__le16 word5 /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
u8 byte5 /* byte5 */;
u8 byte6 /* byte6 */;
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 more_to_send_seq /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 rewinded_snd_max /* cf_array0 */;
__le32 rd_msn /* cf_array1 */;
u8 flags15;
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit22 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit23 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit24 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED3_SHIFT 2
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_MASK 0x3 /* cf24 */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED4_SHIFT 3
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf24en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED5_SHIFT 5
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule26en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED6_SHIFT 6
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule27en */
#define E5_XSTORM_IWARP_CONN_AG_CTX_E4_RESERVED7_SHIFT 7
u8 byte7 /* byte7 */;
__le16 irq_prod_via_msdm /* word7 */;
__le16 irq_cons /* word8 */;
__le16 hq_cons_th_or_mpa_data /* word9 */;
__le16 hq_cons /* word10 */;
__le16 tx_rdma_edpm_usg_cnt /* word11 */;
__le32 atom_msn /* reg7 */;
__le32 orq_cons /* reg8 */;
__le32 orq_cons_th /* reg9 */;
u8 max_ord /* byte8 */;
u8 wqe_data_pad_bytes /* byte9 */;
u8 former_hq_prod /* byte10 */;
u8 irq_prod_via_msem /* byte11 */;
u8 byte12 /* byte12 */;
u8 max_pkt_pdu_size_lo /* byte13 */;
u8 max_pkt_pdu_size_hi /* byte14 */;
u8 byte15 /* byte15 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
__le32 reg12 /* reg12 */;
__le32 shared_queue_page_addr_lo /* reg13 */;
__le32 shared_queue_page_addr_hi /* reg14 */;
__le32 reg15 /* reg15 */;
__le32 reg16 /* reg16 */;
__le32 reg17 /* reg17 */;
};
struct e5_ystorm_iwarp_conn_ag_ctx
{

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_HSI_RDMA__
#define __ECORE_HSI_RDMA__
/************************************************************************/
@ -36,6 +35,128 @@
/************************************************************************/
#include "rdma_common.h"
/*
* The rdma task context of Mstorm
*/
struct ystorm_rdma_task_st_ctx
{
struct regpair temp[4];
};
struct e4_ystorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
__le16 msem_ctx_upd_seq /* icid */;
u8 flags0;
#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 /* bit2 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 /* cf2special */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key /* byte2 */;
__le32 mw_cnt /* reg0 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
__le16 dif_flags /* word1 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* word5 */;
__le32 fbo_lo /* reg1 */;
__le32 fbo_hi /* reg2 */;
};
struct e4_mstorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
__le16 icid /* icid */;
u8 flags0;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key /* byte2 */;
__le32 mw_cnt /* reg0 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
__le16 dif_flags /* word1 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* word5 */;
__le32 fbo_lo /* reg1 */;
__le32 fbo_hi /* reg2 */;
};
/*
* The roce task context of Mstorm
*/
@ -44,6 +165,325 @@ struct mstorm_rdma_task_st_ctx
struct regpair temp[4];
};
/*
* The roce task context of Ustorm
*/
struct ustorm_rdma_task_st_ctx
{
struct regpair temp[2];
};
struct e4_ustorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
__le16 icid /* icid */;
u8 flags0;
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 /* exist_in_qm1 */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 /* timer0cf */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 /* timer1cf */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 /* timer2cf */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 /* cf4 */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 /* cf0en */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 /* cf1en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 /* cf2en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 /* cf4en */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF /* nibble1 */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals /* reg0 */;
__le32 dif_error_1st_interval /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 dif_runt_value /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
};
/*
* RDMA task context
*/
struct e4_rdma_task_context
{
struct ystorm_rdma_task_st_ctx ystorm_st_context /* ystorm storm context */;
struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context /* ystorm aggregative context */;
struct tdif_task_context tdif_context /* tdif context */;
struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context /* mstorm aggregative context */;
struct mstorm_rdma_task_st_ctx mstorm_st_context /* mstorm storm context */;
struct rdif_task_context rdif_context /* rdif context */;
struct ustorm_rdma_task_st_ctx ustorm_st_context /* ustorm storm context */;
struct regpair ustorm_st_padding[2] /* padding */;
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
};
struct e5_ystorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 msem_ctx_upd_seq /* icid */;
u8 flags0;
#define E5_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E5_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 /* bit2 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 /* cf2special */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 flags3;
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit5 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 mw_cnt /* reg0 */;
u8 key /* byte2 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
u8 e4_reserved7 /* byte5 */;
__le16 dif_flags /* word1 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* word5 */;
__le16 e4_reserved8 /* word6 */;
__le32 fbo_lo /* reg1 */;
};
struct e5_mstorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 icid /* icid */;
u8 flags0;
#define E5_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 flags3;
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit4 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 mw_cnt /* reg0 */;
u8 key /* byte2 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
u8 e4_reserved7 /* byte5 */;
__le16 dif_flags /* regpair0 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* regpair1 */;
__le16 e4_reserved8 /* word6 */;
__le32 fbo_lo /* reg1 */;
};
struct e5_ustorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 icid /* icid */;
u8 flags0;
#define E5_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 /* exist_in_qm1 */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 /* timer0cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 /* timer1cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 /* timer2cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 /* dif_error_cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 /* cf0en */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 /* cf1en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 /* cf2en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 /* cf4en */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit2 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit3 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_SHIFT 5
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit4 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_SHIFT 6
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* rule7en */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_SHIFT 7
u8 flags4;
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_MASK 0x3 /* cf5 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* cf5en */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule8en */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED7_SHIFT 3
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF /* dif_error_type */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
u8 e4_reserved8 /* byte4 */;
__le32 dif_err_intervals /* dif_err_intervals */;
__le32 dif_error_1st_interval /* dif_error_1st_interval */;
__le32 reg2 /* reg2 */;
__le32 dif_runt_value /* reg3 */;
__le32 reg4 /* reg4 */;
};
/*
* RDMA task context
*/
struct e5_rdma_task_context
{
struct ystorm_rdma_task_st_ctx ystorm_st_context /* ystorm storm context */;
struct e5_ystorm_rdma_task_ag_ctx ystorm_ag_context /* ystorm aggregative context */;
struct tdif_task_context tdif_context /* tdif context */;
struct e5_mstorm_rdma_task_ag_ctx mstorm_ag_context /* mstorm aggregative context */;
struct mstorm_rdma_task_st_ctx mstorm_st_context /* mstorm storm context */;
struct rdif_task_context rdif_context /* rdif context */;
struct ustorm_rdma_task_st_ctx ustorm_st_context /* ustorm storm context */;
struct regpair ustorm_st_padding[2] /* padding */;
struct e5_ustorm_rdma_task_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
};
/*
* rdma function init ramrod data
@ -331,212 +771,6 @@ struct rdma_srq_modify_ramrod_data
};
/*
* The rdma task context of Mstorm
*/
struct ystorm_rdma_task_st_ctx
{
struct regpair temp[4];
};
struct e4_ystorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
__le16 msem_ctx_upd_seq /* icid */;
u8 flags0;
#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 /* bit2 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 /* cf2special */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key /* byte2 */;
__le32 mw_cnt /* reg0 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
__le16 dif_flags /* word1 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* word5 */;
__le32 fbo_lo /* reg1 */;
__le32 fbo_hi /* reg2 */;
};
struct e4_mstorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
__le16 icid /* icid */;
u8 flags0;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key /* byte2 */;
__le32 mw_cnt /* reg0 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
__le16 dif_flags /* word1 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* word5 */;
__le32 fbo_lo /* reg1 */;
__le32 fbo_hi /* reg2 */;
};
/*
* The roce task context of Ustorm
*/
struct ustorm_rdma_task_st_ctx
{
struct regpair temp[2];
};
struct e4_ustorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
__le16 icid /* icid */;
u8 flags0;
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 /* exist_in_qm1 */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 /* timer0cf */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 /* timer1cf */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 /* timer2cf */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 /* cf4 */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 /* cf0en */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 /* cf1en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 /* cf2en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 /* cf4en */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF /* nibble1 */
#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals /* reg0 */;
__le32 dif_error_1st_interval /* reg1 */;
__le32 reg2 /* reg2 */;
__le32 dif_runt_value /* reg3 */;
__le32 reg4 /* reg4 */;
__le32 reg5 /* reg5 */;
};
/*
* RDMA task context
*/
struct rdma_task_context
{
struct ystorm_rdma_task_st_ctx ystorm_st_context /* ystorm storm context */;
struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context /* ystorm aggregative context */;
struct tdif_task_context tdif_context /* tdif context */;
struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context /* mstorm aggregative context */;
struct mstorm_rdma_task_st_ctx mstorm_st_context /* mstorm storm context */;
struct rdif_task_context rdif_context /* rdif context */;
struct ustorm_rdma_task_st_ctx ustorm_st_context /* ustorm storm context */;
struct regpair ustorm_st_padding[2] /* padding */;
struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
};
/*
* RDMA Tid type enumeration (for register_tid ramrod)
*/
@ -1397,77 +1631,6 @@ struct e5_mstorm_rdma_conn_ag_ctx
};
struct e5_mstorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 icid /* icid */;
u8 flags0;
#define E5_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 flags3;
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit4 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_MSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 mw_cnt /* reg0 */;
u8 key /* byte2 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
u8 e4_reserved7 /* byte5 */;
__le16 dif_flags /* regpair0 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* regpair1 */;
__le16 e4_reserved8 /* word6 */;
__le32 fbo_lo /* reg1 */;
};
struct e5_tstorm_rdma_conn_ag_ctx
{
@ -1755,82 +1918,6 @@ struct e5_ustorm_rdma_conn_ag_ctx
};
struct e5_ustorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 icid /* icid */;
u8 flags0;
#define E5_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 /* exist_in_qm1 */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 /* timer0cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 /* timer1cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 /* timer2cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 /* dif_error_cf */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 /* cf0en */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 /* cf1en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 /* cf2en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define E5_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 /* cf4en */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit2 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_SHIFT 4
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_MASK 0x1 /* bit3 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_SHIFT 5
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_MASK 0x1 /* bit4 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_SHIFT 6
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* rule7en */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_SHIFT 7
u8 flags4;
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_MASK 0x3 /* cf5 */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_SHIFT 0
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* cf5en */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_SHIFT 2
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED7_MASK 0x1 /* rule8en */
#define E5_USTORM_RDMA_TASK_AG_CTX_E4_RESERVED7_SHIFT 3
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF /* dif_error_type */
#define E5_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
u8 e4_reserved8 /* byte4 */;
__le32 dif_err_intervals /* dif_err_intervals */;
__le32 dif_error_1st_interval /* dif_error_1st_interval */;
__le32 reg2 /* reg2 */;
__le32 dif_runt_value /* reg3 */;
__le32 reg4 /* reg4 */;
};
struct e5_xstorm_rdma_conn_ag_ctx
{
@ -2109,75 +2196,4 @@ struct e5_ystorm_rdma_conn_ag_ctx
};
struct e5_ystorm_rdma_task_ag_ctx
{
u8 reserved /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 msem_ctx_upd_seq /* icid */;
u8 flags0;
#define E5_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
#define E5_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 /* bit2 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 /* cf2special */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 flags3;
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit5 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_YSTORM_RDMA_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 mw_cnt /* reg0 */;
u8 key /* byte2 */;
u8 ref_cnt_seq /* byte3 */;
u8 ctx_upd_seq /* byte4 */;
u8 e4_reserved7 /* byte5 */;
__le16 dif_flags /* word1 */;
__le16 tx_ref_count /* word2 */;
__le16 last_used_ltid /* word3 */;
__le16 parent_mr_lo /* word4 */;
__le16 parent_mr_hi /* word5 */;
__le16 e4_reserved8 /* word6 */;
__le32 fbo_lo /* reg1 */;
};
#endif /* __ECORE_HSI_RDMA__ */

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_HSI_ROCE__
#define __ECORE_HSI_ROCE__
/************************************************************************/
@ -41,14 +40,13 @@
#include "roce_common.h"
/*
* The roce storm context of Mstorm
* The roce storm context of Ystorm
*/
struct mstorm_roce_conn_st_ctx
struct ystorm_roce_conn_st_ctx
{
struct regpair temp[6];
struct regpair temp[2];
};
/*
* The roce storm context of Mstorm
*/
@ -57,15 +55,6 @@ struct pstorm_roce_conn_st_ctx
struct regpair temp[16];
};
/*
* The roce storm context of Ystorm
*/
struct ystorm_roce_conn_st_ctx
{
struct regpair temp[2];
};
/*
* The roce storm context of Xstorm
*/
@ -82,6 +71,14 @@ struct tstorm_roce_conn_st_ctx
struct regpair temp[30];
};
/*
* The roce storm context of Mstorm
*/
struct mstorm_roce_conn_st_ctx
{
struct regpair temp[6];
};
/*
* The roce storm context of Ystorm
*/
@ -93,7 +90,7 @@ struct ustorm_roce_conn_st_ctx
/*
* roce connection context
*/
struct roce_conn_context
struct e4_roce_conn_context
{
struct ystorm_roce_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
@ -111,6 +108,29 @@ struct roce_conn_context
};
/*
* roce connection context
*/
struct e5_roce_conn_context
{
struct ystorm_roce_conn_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
struct pstorm_roce_conn_st_ctx pstorm_st_context /* pstorm storm context */;
struct xstorm_roce_conn_st_ctx xstorm_st_context /* xstorm storm context */;
struct regpair xstorm_st_padding[2] /* padding */;
struct e5_xstorm_rdma_conn_ag_ctx xstorm_ag_context /* xstorm aggregative context */;
struct e5_tstorm_rdma_conn_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct timers_context timer_context /* timer context */;
struct e5_ustorm_rdma_conn_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
struct tstorm_roce_conn_st_ctx tstorm_st_context /* tstorm storm context */;
struct mstorm_roce_conn_st_ctx mstorm_st_context /* mstorm storm context */;
struct ustorm_roce_conn_st_ctx ustorm_st_context /* ustorm storm context */;
struct regpair ustorm_st_padding[2] /* padding */;
};
/*
* roce create qp requester ramrod data
*/

View File

@ -31,7 +31,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "ecore_hsi_common.h"
#include "ecore_status.h"
@ -90,7 +89,9 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
}
p_hwfn->p_ptt_pool = p_pool;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
return ECORE_SUCCESS;
@ -109,8 +110,10 @@ void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
#ifdef CONFIG_ECORE_LOCK_ALLOC
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
p_hwfn->p_ptt_pool = OSAL_NULL;
}
@ -156,8 +159,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
}
u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
@ -181,7 +183,7 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
{
u32 prev_hw_addr;
prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
if (new_hw_addr == prev_hw_addr)
return;
@ -204,7 +206,7 @@ static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 hw_addr)
{
u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
u32 offset;
offset = hw_addr - win_hw_addr;
@ -442,7 +444,7 @@ u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
#if 0
/* Ecore HW lock
* =============
* Although the implementation is ready, today we don't have any flow that
* Although the implemention is ready, today we don't have any flow that
* utliizes said locks - and we want to keep it this way.
* If this changes, this needs to be revisted.
*/

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_HW_H__
#define __ECORE_HW_H__
@ -94,8 +93,10 @@ enum _dmae_cmd_crc_mask {
* @brief ecore_gtt_init - Initialize GTT windows
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
@ -123,13 +124,11 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
@ -281,35 +280,6 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
*/
void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
union ecore_qm_pq_params {
struct {
u8 q_idx;
} iscsi;
struct {
u8 tc;
} core;
struct {
u8 is_vf;
u8 vf_id;
u8 tc;
} eth;
struct {
u8 dcqcn;
u8 qpid; /* roce relative */
} roce;
struct {
u8 qidx;
} iwarp;
};
u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
enum protocol_type proto,
union ecore_qm_pq_params *params);
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *fw_data);

View File

@ -31,7 +31,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "ecore_hw.h"
#include "ecore_init_ops.h"
@ -46,12 +45,12 @@ __FBSDID("$FreeBSD$");
#define CDU_VALIDATION_DEFAULT_CFG 61
static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
{ 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
{ 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
{ 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
};
static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
{ 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
@ -67,6 +66,9 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* VOQ constants */
#define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
/* WFQ constants: */
/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
@ -76,7 +78,8 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5
#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
#define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
@ -84,6 +87,9 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* 0.7 * upper bound (62500000) */
#define QM_WFQ_MAX_INC_VAL 43750000
/* Number of VOQs in E5 QmWfqCrd register */
#define QM_WFQ_CRD_E5_NUM_VOQS 16
/* RL constants: */
/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
@ -117,9 +123,11 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
#define PBF_CMDQ_LINES_RT_OFFSET(voq) (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + ext_voq * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
@ -155,23 +163,41 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
/* QM: VOQ macros */
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * (max_phys_tcs_per_port) + (tc))
#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
#define QM_INIT_TX_PQ_MAP(map, chip, pq_id, rl_valid, vp_pq_id, rl_id, ext_voq, wrr) OSAL_MEMSET(&map, 0, sizeof(map)); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32 *)&map))
#define WRITE_PQ_INFO_TO_RAM 1
#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4
/******************** INTERNAL IMPLEMENTATION *********************/
/* Returns the external VOQ number */
static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 tc,
u8 max_phys_tcs_per_port)
{
if (tc == PURE_LB_TC)
return NUM_OF_PHYS_TCS * (ECORE_IS_E5(p_hwfn->p_dev) ? MAX_NUM_PORTS_E5 : MAX_NUM_PORTS_BB) + port_id;
else
return port_id * (ECORE_IS_E5(p_hwfn->p_dev) ? NUM_OF_PHYS_TCS : max_phys_tcs_per_port) + tc;
}
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn,
bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
/* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (1 << MAX_NUM_VOQS) - 1);
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, (u32)voq_bit_mask);
#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
if (num_ext_voqs >= 32)
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, (u32)(voq_bit_mask >> 32));
#endif
/* Write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
@ -226,16 +252,16 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn,
* the specified VOQ.
*/
static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq,
u8 ext_voq,
u16 cmdq_lines)
{
u32 qm_line_crd;
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), (u32)cmdq_lines);
STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, qm_line_crd);
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines);
STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd);
}
/* Prepare runtime init values to allocate PBF command queue lines. */
@ -244,11 +270,12 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u8 tc, voq, port_id, num_tcs_in_port;
u8 tc, ext_voq, port_id, num_tcs_in_port;
u8 num_ext_voqs = ECORE_IS_E5(p_hwfn->p_dev) ? QM_E5_NUM_EXT_VOQ : MAX_NUM_VOQS_E4;
/* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
/* Clear PBF lines of all VOQs */
for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u16 phys_lines, phys_lines_per_tc;
@ -256,26 +283,32 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
if (!port_params[port_id].active)
continue;
/* Find #lines to divide between the active physical TCs */
phys_lines = port_params[port_id].num_pbf_cmd_lines - PBF_CMDQ_PURE_LB_LINES;
/* Find number of command queue lines to divide between the
* active physical TCs. In E5, 1/8 of the lines are reserved.
* the lines for pure LB TC are subtracted.
*/
phys_lines = port_params[port_id].num_pbf_cmd_lines;
if (ECORE_IS_E5(p_hwfn->p_dev))
phys_lines -= DIV_ROUND_UP(phys_lines, PBF_CMDQ_LINES_E5_RSVD_RATIO);
phys_lines -= PBF_CMDQ_PURE_LB_LINES;
/* Find #lines per active physical TC */
num_tcs_in_port = 0;
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
for (tc = 0; tc < max_phys_tcs_per_port; tc++)
if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
num_tcs_in_port++;
phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* Init registers per active TC */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) {
voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
ecore_cmdq_lines_voq_rt_init(p_hwfn, voq, phys_lines_per_tc);
}
for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port);
if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1)
ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, phys_lines_per_tc);
}
/* Init registers for pure LB TC */
ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), PBF_CMDQ_PURE_LB_LINES);
ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, PBF_CMDQ_PURE_LB_LINES);
}
}
@ -290,7 +323,7 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
* headroom.
* b. B = B - 38 (remainder after global headroom allocation).
* c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
* d. B = B % MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
* d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
* e. B/C blocks are allocated for each physical TC.
* Assumptions:
* - MTU is up to 9700 bytes (38 blocks)
@ -304,7 +337,7 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
u8 tc, voq, port_id, num_tcs_in_port;
u8 tc, ext_voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (!port_params[port_id].active)
@ -328,32 +361,32 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
/* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) {
voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), phys_blocks);
ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc, max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), phys_blocks);
}
}
/* Init pure LB TC */
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)), pure_lb_blocks);
ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC, max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks);
}
}
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u32 base_mem_addr_4kb,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u32 base_mem_addr_4kb,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
/* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
@ -381,12 +414,11 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
struct qm_rf_pq_map tx_pq_map;
u8 ext_voq, vport_id_in_pf;
bool is_vf_pq, rl_valid;
u8 voq, vport_id_in_pf;
u16 first_tx_pq_id;
voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < max_qm_global_rls;
@ -394,34 +426,41 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
vport_id_in_pf = pq_params[i].vport_id - start_vport;
first_tx_pq_id = vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << (ECORE_IS_E5(p_hwfn->p_dev) ? QM_WFQ_VP_PQ_PF_E5_SHIFT : QM_WFQ_VP_PQ_PF_E4_SHIFT));
/* Create new VP PQ */
vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
/* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << QM_WFQ_VP_PQ_PF_SHIFT));
STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, map_val);
}
/* Check RL ID */
if (pq_params[i].rl_valid && pq_params[i].vport_id >= max_qm_global_rls)
DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter configuration\n");
/* Fill PQ map entry */
OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, rl_valid ? pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, pq_params[i].wrr_group);
/* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, *((u32*)&tx_pq_map));
/* Prepare PQ map entry */
if (ECORE_IS_E5(p_hwfn->p_dev)) {
struct qm_rf_pq_map_e5 tx_pq_map;
QM_INIT_TX_PQ_MAP(tx_pq_map, E5, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
}
else {
struct qm_rf_pq_map_e4 tx_pq_map;
QM_INIT_TX_PQ_MAP(tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group);
}
/* Set base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb);
if (WRITE_PQ_INFO_TO_RAM != 0)
{
u32 pq_info = 0;
pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, pq_params[i].tc_id, port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0);
ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), pq_info);
}
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |= (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
@ -440,11 +479,10 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
u8 pf_id,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
u16 i, pq_id, pq_group;
@ -482,25 +520,25 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params)
{
u32 inc_val, crd_reg_offset;
u8 voq;
u8 ext_voq;
u16 i;
crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration\n");
return -1;
}
for(i = 0; i < num_tx_pqs; i++) {
voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
for (i = 0; i < num_tx_pqs; i++) {
ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
crd_reg_offset = ECORE_IS_E5(p_hwfn->p_dev) ?
(ext_voq < QM_WFQ_CRD_E5_NUM_VOQS ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (ext_voq % QM_WFQ_CRD_E5_NUM_VOQS) * MAX_NUM_PFS_E5 + pf_id :
(pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : QM_REG_WFQPFCRD_MSB_RT_OFFSET) + ext_voq * MAX_NUM_PFS_BB + (pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
}
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
@ -614,9 +652,9 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd_addr,
u32 cmd_data_lsb,
u32 cmd_data_msb)
u32 cmd_addr,
u32 cmd_data_lsb,
u32 cmd_data_msb)
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
@ -633,12 +671,11 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
/******************** INTERFACE IMPLEMENTATION *********************/
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs)
u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@ -689,23 +726,22 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
}
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
u32 other_mem_size_4kb;
u8 tc, i;
@ -719,12 +755,12 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids, num_tids, 0);
ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
#endif
/* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, is_first_pf, num_pf_cids, num_vf_cids,
start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params);
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
@ -1204,8 +1240,7 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
#ifndef UNUSED_HSI_FUNC
/* In MF, should be called once per engine to set EtherType of OuterTag */
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
/* Update PRS register */
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
@ -1218,8 +1253,7 @@ void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
}
/* In MF, should be called once per port to set EtherType of OuterTag */
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
/* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
@ -1533,8 +1567,7 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
/* Calculate and return CDU validation byte per connection type/region/cid */
static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn * p_hwfn, u8 conn_type,
u8 region, u32 cid)
static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
{
const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
@ -1588,8 +1621,8 @@ static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn * p_hwfn, u8 conn_typ
}
/* Calcualte and set validation bytes for session context */
void ecore_calc_session_ctx_validation(struct ecore_hwfn * p_hwfn, void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 cid)
void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
@ -1600,14 +1633,14 @@ void ecore_calc_session_ctx_validation(struct ecore_hwfn * p_hwfn, void *p_ctx_m
OSAL_MEMSET(p_ctx, 0, ctx_size);
*x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
*t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
*u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
*x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
*t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
*u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
void ecore_calc_task_ctx_validation(struct ecore_hwfn * p_hwfn, void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 tid)
void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
@ -1616,8 +1649,7 @@ void ecore_calc_task_ctx_validation(struct ecore_hwfn * p_hwfn, void *p_ctx_mem,
OSAL_MEMSET(p_ctx, 0, ctx_size);
*region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
1, tid);
*region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
}
/* Memset session context to 0 while preserving validation bytes */

View File

@ -40,7 +40,6 @@ struct init_qm_pq_params;
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
* @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@ -49,12 +48,11 @@ struct init_qm_pq_params;
*
* @return The required host memory size in 4KB units.
*/
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
/**
* @brief ecore_qm_common_rt_init - Prepare QM runtime init values for the
@ -89,7 +87,6 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
* @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@ -114,23 +111,22 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq - Initializes the WFQ weight of the specified PF
@ -302,24 +298,18 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
* is in BD mode.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 ethType);
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType. should Be called once per port.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 ethType);
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType);
#endif /* UNUSED_HSI_FUNC */
@ -481,57 +471,50 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
* session context.
*
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param cid - context cid.
*/
void ecore_calc_session_ctx_validation(struct ecore_hwfn * p_hwfn,
void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 cid);
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
* session context.
*
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param cid - context cid.
*/
void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
u8 ctx_type, u32 cid);
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
* context.
*
* @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param tid - context tid.
*/
void ecore_calc_task_ctx_validation(struct ecore_hwfn * p_hwfn,
void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
* context.
*
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param tid - context tid.
*/
void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
u32 tid);
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
* preserving validation bytes.
*
* @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
* @brief ecore_memset_session_ctx - Memset session context to 0 while
* preserving validation bytes.
*
* @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
void ecore_memset_session_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
/**
* @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
* validation bytes.
*
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
* @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
* validation bytes.
*
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
void ecore_memset_task_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);

View File

@ -31,7 +31,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* include the precompiled configuration values - only once */
#include "bcm_osal.h"
#include "ecore_hsi_common.h"
@ -75,6 +74,13 @@ void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
u32 rt_offset, u32 val)
{
if (rt_offset >= RUNTIME_ARRAY_SIZE) {
DP_ERR(p_hwfn,
"Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
val, rt_offset, RUNTIME_ARRAY_SIZE);
return;
}
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
@ -85,6 +91,14 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
{
osal_size_t i;
if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
DP_ERR(p_hwfn,
"Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
rt_offset, (u32)(rt_offset + size - 1),
RUNTIME_ARRAY_SIZE);
return;
}
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
@ -201,8 +215,7 @@ static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr, u32 fill,
u32 fill_count)
u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
@ -335,8 +348,7 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
case INIT_SRC_ZEROS:
data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
rc = ecore_init_fill_dmae(p_hwfn, p_ptt,
addr, 0, data);
rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
@ -425,14 +437,6 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
OSAL_LE32_TO_CPU(cmd->op_data));
}
/* init_ops callbacks entry point */
static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_callback_op *p_cmd)
{
DP_NOTICE(p_hwfn, true, "Currently init values have no need of callbacks\n");
}
static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
u16 *p_offset, int modes)
{
@ -471,8 +475,7 @@ static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
INIT_IF_MODE_OP_CMD_OFFSET);
}
static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
@ -529,8 +532,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
modes);
break;
case INIT_OP_IF_PHASE:
cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
phase_id);
b_dmae = GET_FIELD(data,
INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
@ -542,7 +545,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
break;
case INIT_OP_CALLBACK:
ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
DP_NOTICE(p_hwfn, true,
"Currently init values have no need of callbacks\n");
break;
}
@ -556,7 +560,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
return rc;
}
void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 gtt_base;
u32 i;
@ -574,7 +579,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* initialize PTT/GTT (poll for completion) */
if (!initialized) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_START_INIT_PTT_GTT, 1);
initialized = true;
}
@ -583,7 +588,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* ptt might be overrided by HW until this is done */
OSAL_UDELAY(10);
ecore_ptt_invalidate(p_hwfn);
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INIT_DONE_PTT_GTT);
} while ((val != 1) && --poll_cnt);
@ -602,7 +607,11 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
}
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *data)
#ifdef CONFIG_ECORE_BINARY_FW
const u8 *fw_data)
#else
const u8 OSAL_UNUSED *fw_data)
#endif
{
struct ecore_fw_data *fw = p_dev->fw_data;
@ -610,24 +619,24 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
struct bin_buffer_hdr *buf_hdr;
u32 offset, len;
if (!data) {
if (!fw_data) {
DP_NOTICE(p_dev, true, "Invalid fw data\n");
return ECORE_INVAL;
}
buf_hdr = (struct bin_buffer_hdr *)data;
buf_hdr = (struct bin_buffer_hdr *)fw_data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
fw->fw_ver_info = (struct fw_ver_info *)(fw_data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
fw->init_ops = (union init_op *)(data + offset);
fw->init_ops = (union init_op *)(fw_data + offset);
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
fw->arr_data = (u32 *)(data + offset);
fw->arr_data = (u32 *)(fw_data + offset);
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
fw->modes_tree_buf = (u8 *)(data + offset);
fw->modes_tree_buf = (u8 *)(fw_data + offset);
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
#else

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_INIT_OPS__
#define __ECORE_INIT_OPS__
@ -130,5 +129,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#endif /* __ECORE_INIT_OPS__ */

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_spq.h"
@ -64,10 +63,14 @@ enum dbg_status dbg_read_attn(struct ecore_hwfn *dev,
enum dbg_status dbg_parse_attn(struct ecore_hwfn *dev,
struct dbg_attn_block_result *results);
const char* dbg_get_status_str(enum dbg_status status);
#define ecore_dbg_read_attn(hwfn, ptt, id, type, clear, results) \
dbg_read_attn(hwfn, ptt, id, type, clear, results)
#define ecore_dbg_parse_attn(hwfn, results) \
dbg_parse_attn(hwfn, results)
#define ecore_dbg_get_status_str(status) \
dbg_get_status_str(status)
#endif
struct ecore_pi_info {
@ -78,7 +81,7 @@ struct ecore_pi_info {
struct ecore_sb_sp_info {
struct ecore_sb_info sb_info;
/* per protocol index data */
struct ecore_pi_info pi_info_arr[PIS_PER_SB];
struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
};
enum ecore_attention_type {
@ -250,7 +253,7 @@ static const char* grc_timeout_attn_master_to_str(u8 master)
case 9: return "DBU";
case 10: return "DMAE";
default:
return "Unknown";
return "Unkown";
}
}
@ -272,19 +275,20 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
DP_INFO(p_hwfn->p_dev,
"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
tmp2, tmp,
(tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
(tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
ECORE_GRC_ATTENTION_MASTER_SHIFT),
(tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
(((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
DP_NOTICE(p_hwfn->p_dev, false,
"GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
tmp2, tmp,
(tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
: "Read from",
(tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
grc_timeout_attn_master_to_str((tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
ECORE_GRC_ATTENTION_MASTER_SHIFT),
(tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
(((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
out:
/* Regardles of anything else, clean the validity bit */
@ -415,30 +419,124 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
#define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
#define ECORE_DB_REC_COUNT 10
#define ECORE_DB_REC_INTERVAL 100
/* assumes sticky overflow indication was set for this PF */
static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u8 count = ECORE_DB_REC_COUNT;
u32 usage = 1;
/* wait for usage to zero or count to run out. This is necessary since
* EDPM doorbell transactions can take multiple 64b cycles, and as such
* can "split" over the pci. Possibly, the doorbell drop can happen with
* half an EDPM in the queue and other half dropped. Another EDPM
* doorbell to the same address (from doorbell recovery mechanism or
* from the doorbelling entity) could have first half dropped and second
* half interperted as continuation of the first. To prevent such
* malformed doorbells from reaching the device, flush the queue before
* releaseing the overflow sticky indication.
*/
while (count-- && usage) {
usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
}
/* should have been depleted by now */
if (usage) {
DP_NOTICE(p_hwfn->p_dev, false,
"DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
return ECORE_TIMEOUT;
}
/* flush any pedning (e)dpm as they may never arrive */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
/* release overflow sticky indication (stop silently dropping everything) */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
/* repeat all last doorbells (doorbell drop recovery) */
ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
{
u32 reason;
u32 int_sts, first_drop_reason, details, address, overflow,
all_drops_reason;
struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
enum _ecore_status_t rc;
reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
ECORE_DORQ_ATTENTION_REASON_MASK;
if (reason) {
u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
DORQ_REG_DB_DROP_DETAILS);
int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
int_sts);
DP_INFO(p_hwfn->p_dev,
"DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS),
(u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
/* check if db_drop or overflow happened */
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
/* obtain data about db drop/overflow */
first_drop_reason = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_REASON) &
ECORE_DORQ_ATTENTION_REASON_MASK;
details = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS);
address = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS);
overflow = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_PF_OVFL_STICKY);
all_drops_reason = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_REASON);
/* log info */
DP_NOTICE(p_hwfn->p_dev, false,
"Doorbell drop occurred\n"
"Address\t\t0x%08x\t(second BAR address)\n"
"FID\t\t0x%04x\t\t(Opaque FID)\n"
"Size\t\t0x%04x\t\t(in bytes)\n"
"1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
"Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
"Overflow\t0x%x\t\t(a per PF indication)\n",
address, GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
first_drop_reason, all_drops_reason, overflow);
/* if this PF caused overflow, initiate recovery */
if (overflow) {
rc = ecore_db_rec_attn(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
/* clear the doorbell drop details and prepare for next drop */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
/* mark interrupt as handeld (note: even if drop was due to a diffrent
* reason than overflow we mark as handled)
*/
ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
DORQ_REG_INT_STS_DB_DROP | DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
/* if there are no indications otherthan drop indications, success */
if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
return ECORE_SUCCESS;
}
/* some other indication was present - non recoverable */
DP_INFO(p_hwfn, "DORQ fatal attention\n");
return ECORE_INVAL;
}
@ -767,14 +865,19 @@ static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
status = ecore_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
b_clear, &attn_results);
#ifdef ATTN_DESC
if (status != DBG_STATUS_OK)
DP_NOTICE(p_hwfn, true,
"Failed to parse attention information [status %d]\n",
status);
"Failed to parse attention information [status: %s]\n",
ecore_dbg_get_status_str(status));
else
#ifdef ATTN_DESC
ecore_dbg_parse_attn(p_hwfn, &attn_results);
#else
if (status != DBG_STATUS_OK)
DP_NOTICE(p_hwfn, true,
"Failed to parse attention information [status: %d]\n",
status);
else
ecore_dbg_print_attn(p_hwfn, &attn_results);
#endif
}
@ -1384,7 +1487,7 @@ static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
if (IS_VF(p_hwfn->p_dev))
return;/* @@@TBD MichalK- VF CAU... */
sb_offset = igu_sb_id * PIS_PER_SB;
sb_offset = igu_sb_id * PIS_PER_SB_E4;
OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
@ -2576,10 +2679,10 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
IGU_REG_CONSUMER_MEM + sbid * 4);
for (i = 0; i < PIS_PER_SB; i++)
for (i = 0; i < PIS_PER_SB_E4; i++)
p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY +
sbid * 4 * PIS_PER_SB + i * 4);
sbid * 4 * PIS_PER_SB_E4 + i * 4);
return ECORE_SUCCESS;
}

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_INT_H__
#define __ECORE_INT_H__
@ -42,7 +41,7 @@
#define ECORE_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
#define ECORE_SB_INVALID_IDX 0xffff

View File

@ -28,10 +28,12 @@
*
*/
#ifndef __ECORE_INT_API_H__
#define __ECORE_INT_API_H__
#include "common_hsi.h"
#ifndef __EXTRACT__LINUX__
#define ECORE_SB_IDX 0x0002
#define RX_PI 0
@ -48,7 +50,7 @@ enum ecore_int_mode {
#endif
struct ecore_sb_info {
struct status_block *sb_virt;
struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@ -66,7 +68,7 @@ struct ecore_sb_info {
struct ecore_sb_info_dbg {
u32 igu_prod;
u32 igu_cons;
u16 pi[PIS_PER_SB];
u16 pi[PIS_PER_SB_E4];
};
struct ecore_sb_cnt_info {
@ -89,7 +91,7 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
// barrier(); /* status block is written to by the chip */
// FIXME: need some sort of barrier.
prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK;
STATUS_BLOCK_E4_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= ECORE_SB_IDX;
@ -168,6 +170,7 @@ static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
__internal_ram_wr(OSAL_NULL, addr, size, data);
}
#endif
#endif
struct ecore_hwfn;
struct ecore_ptt;

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_SRIOV_API_H__
#define __ECORE_SRIOV_API_H__
@ -179,6 +178,7 @@ struct ecore_hw_sriov_info {
};
#ifdef CONFIG_ECORE_SRIOV
#ifndef LINUX_REMOVE
/**
* @brief mark/clear all VFs before/after an incoming PCIe sriov
* disable.
@ -709,6 +709,7 @@ int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate);
#endif
/**
* @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce parameters
@ -736,66 +737,68 @@ ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param rel_vf_id
*
* @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
* @return MAX_NUM_VFS_E4 in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port);
#else
static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev, u8 to_disable) {}
static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev, u16 rel_vf_id, u8 to_disable) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_iov_vf_init_params *p_params) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id) {return ECORE_SUCCESS;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, u16 vf_id, void *ctx) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid, struct ecore_mcp_link_params *params, struct ecore_mcp_link_state *link, struct ecore_mcp_link_capabilities *p_caps) {}
static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid, struct ecore_mcp_link_params *params, struct ecore_mcp_link_state *link, struct ecore_mcp_link_capabilities *p_caps) {}
static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only) {return false;}
static OSAL_INLINE struct ecore_public_vf_info* ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid, bool b_enabled_only) {return OSAL_NULL;}
static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid) {}
static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn, u64 *events) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, struct ecore_ptt *ptt, int vfid) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, u8 *mac, int vfid) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, u8 *mac, int vfid) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, bool b_untagged_only, int vfid) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, u16 *opaque_fid) {}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn p_hwfn, u16 pvid, int vfid) {}
#ifndef LINUX_REMOVE
static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_dev OSAL_UNUSED *p_dev, u8 OSAL_UNUSED to_disable) {}
static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_dev OSAL_UNUSED *p_dev, u16 OSAL_UNUSED rel_vf_id, u8 OSAL_UNUSED to_disable) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, struct ecore_iov_vf_init_params OSAL_UNUSED *p_params) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, u16 OSAL_UNUSED rel_vf_id) {return ECORE_SUCCESS;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vf_id, OSAL_UNUSED void *ctx) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, u16 OSAL_UNUSED rel_vf_id) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, struct ecore_mcp_link_params OSAL_UNUSED *params, struct ecore_mcp_link_state OSAL_UNUSED *link, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_caps) {}
static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, struct ecore_mcp_link_params OSAL_UNUSED *params, struct ecore_mcp_link_state OSAL_UNUSED *link, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_caps) {}
static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED rel_vf_id, bool OSAL_UNUSED b_enabled_only) {return false;}
static OSAL_INLINE struct ecore_public_vf_info* ecore_iov_get_public_vf_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED vfid, bool OSAL_UNUSED b_enabled_only) {return OSAL_NULL;}
static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED vfid) {}
static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u64 OSAL_UNUSED *events) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *ptt, int OSAL_UNUSED vfid) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *mac, int OSAL_UNUSED vfid) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *mac, OSAL_UNUSED int vfid) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn OSAL_UNUSED *p_hwfn, bool OSAL_UNUSED b_untagged_only, int OSAL_UNUSED vfid) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, u16 OSAL_UNUSED *opaque_fid) {}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn OSAL_UNUSED p_hwfn, u16 OSAL_UNUSED pvid, int OSAL_UNUSED vfid) {}
static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, int vfid, struct ecore_ptt *p_ptt) {return ECORE_INVAL;}
static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, int vfid, bool val) {return ECORE_INVAL;}
static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn) {return 0;}
static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, u16 rel_vf_id, void **pp_req_virt_addr, u16 *p_req_virt_size) {}
static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, u16 rel_vf_id, void **pp_reply_virt_addr, u16 *p_reply_virt_size) {}
static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length) {return false;}
static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, struct ecore_ptt OSAL_UNUSED *p_ptt) {return ECORE_INVAL;}
static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, bool OSAL_UNUSED val) {return ECORE_INVAL;}
static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) {return false;}
static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return 0;}
static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id, void OSAL_UNUSED **pp_req_virt_addr, u16 OSAL_UNUSED *p_req_virt_size) {}
static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id, void OSAL_UNUSED **pp_reply_virt_addr, u16 OSAL_UNUSED *p_reply_virt_size) {}
static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 OSAL_UNUSED length) {return false;}
static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void) {return 0;}
static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return OSAL_NULL;}
static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid, int val) { return ECORE_INVAL; }
static OSAL_INLINE enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid, struct ecore_eth_stats *p_stats) { return ECORE_INVAL; }
static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return OSAL_NULL;}
static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid, int OSAL_UNUSED val) { return ECORE_INVAL; }
static OSAL_INLINE enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_ptt OSAL_UNUSED *p_ptt, int OSAL_UNUSED vfid, struct ecore_eth_stats OSAL_UNUSED *p_stats) { return ECORE_INVAL; }
static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return OSAL_NULL;}
static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid) { return 0; }
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, int vfid, u32 rate) { return ECORE_INVAL; }
static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return OSAL_NULL;}
static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return 0;}
static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) {return false;}
static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid) { return 0; }
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev OSAL_UNUSED *p_dev, int OSAL_UNUSED vfid, OSAL_UNUSED u32 rate) { return ECORE_INVAL; }
#endif
static OSAL_INLINE void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn OSAL_UNUSED *p_hwfn, int OSAL_UNUSED vfid, u16 OSAL_UNUSED vxlan_port, u16 OSAL_UNUSED geneve_port) { return; }
static OSAL_INLINE u16 ecore_iov_get_next_active_vf(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED rel_vf_id) { return MAX_NUM_VFS_E4; }
#endif
static OSAL_INLINE void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port) { return; }
static OSAL_INLINE u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) { return E4_MAX_NUM_VFS; }
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
_i < E4_MAX_NUM_VFS; \
_i < MAX_NUM_VFS_E4; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __IRO_H__
#define __IRO_H__

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
@ -68,8 +67,8 @@ ARRAY_DECL struct iro iro_arr[49] = {
{ 0x2578, 0x8, 0x0, 0x0, 0x8}, /* USTORM_TOE_CQ_PROD_OFFSET(rss_id) */
{ 0x24f8, 0x8, 0x0, 0x0, 0x8}, /* USTORM_TOE_GRQ_PROD_OFFSET(pf_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
{ 0x200, 0x10, 0x8, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x10, 0x8, 0x0, 0x2}, /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0x200, 0x18, 0x8, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x18, 0x8, 0x0, 0x2}, /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xd9a8, 0x38, 0x0, 0x0, 0x24}, /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12988, 0x10, 0x0, 0x0, 0x8}, /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa0, 0x38, 0x0, 0x0, 0x18}, /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_ISCSI_H__
#define __ECORE_ISCSI_H__

View File

@ -119,6 +119,34 @@ struct ecore_iscsi_conn {
u16 physical_q0;
u16 physical_q1;
u8 abortive_dsconnect;
u8 dif_on_immediate;
#define ECORE_ISCSI_CONN_DIF_ON_IMM_DIS 0
#define ECORE_ISCSI_CONN_DIF_ON_IMM_DEFAULT 1
#define ECORE_ISCSI_CONN_DIF_ON_IMM_LUN_MAPPER 2
dma_addr_t lun_mapper_phys_addr;
u32 initial_ref_tag;
u16 application_tag;
u16 application_tag_mask;
u8 validate_guard;
u8 validate_app_tag;
u8 validate_ref_tag;
u8 forward_guard;
u8 forward_app_tag;
u8 forward_ref_tag;
u8 interval_size; /* 0=512B, 1=4KB */
u8 network_interface; /* 0=None, 1=DIF */
u8 host_interface; /* 0=None, 1=DIF, 2=DIX */
u8 ref_tag_mask; /* mask for refernce tag handling */
u8 forward_app_tag_with_mask;
u8 forward_ref_tag_with_mask;
u8 ignore_app_tag;
u8 initial_ref_tag_is_valid;
u8 host_guard_type; /* 0 = IP checksum, 1 = CRC */
u8 protection_type; /* 1/2/3 - Protection Type */
u8 crc_seed; /* 0=0x0000, 1=0xffff */
u8 keep_ref_tag_const;
};
struct ecore_iscsi_stats

View File

@ -457,8 +457,7 @@ enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
p_params->concrete_fid);
p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
@ -660,8 +659,7 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
}
static void
ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sge_tpa_params *p_params)
{
struct eth_vport_tpa_param *p_tpa;
@ -692,8 +690,7 @@ ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
}
static void
ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sp_vport_update_params *p_params)
{
int i;
@ -799,11 +796,10 @@ enum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
}
/* Update mcast bins for VFs, PF doesn't use this functionality */
ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
ecore_sp_update_mcast_bin(p_ramrod, p_params);
ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
p_params->sge_tpa_params);
ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
@ -1508,10 +1504,7 @@ enum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
* Note: crc32_length MUST be aligned to 8
* Return:
******************************************************************************/
static u32 ecore_calc_crc32c(u8 *crc32_packet,
u32 crc32_length,
u32 crc32_seed,
u8 complement)
static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
{
u32 byte = 0, bit = 0, crc32_result = crc32_seed;
u8 msb = 0, current_byte = 0;
@ -1537,25 +1530,23 @@ static u32 ecore_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
static u32 ecore_crc32c_le(u32 seed, u8 *mac)
{
u32 packet_buf[2] = {0};
OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
}
u8 ecore_mcast_bin_from_mac(u8 *mac)
{
u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
mac, ETH_ALEN);
u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
return crc & 0xff;
}
static enum _ecore_status_t
ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
struct ecore_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
@ -1647,16 +1638,13 @@ enum _ecore_status_t ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u16 opaque_fid;
if (IS_VF(p_dev)) {
ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_eth_filter_mcast(p_hwfn,
opaque_fid,
p_filter_cmd,
comp_mode,
p_comp_data);
@ -1748,8 +1736,7 @@ static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_eth_stats *p_stats,
u16 statistics_bin)
struct ecore_eth_stats *p_stats)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@ -1964,7 +1951,7 @@ void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
{
__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
#ifndef ASIC_ONLY
@ -2101,7 +2088,6 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_L2_H__
#define __ECORE_L2_H__
@ -163,32 +162,4 @@ ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
/**
* @brief - ecore_configure_rfs_ntuple_filter
*
* This ramrod should be used to add or remove arfs hw filter
*
* @params p_hwfn
* @params p_ptt
* @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
it with cookie and callback function address, if not
using this mode then client must pass NULL.
* @params p_addr p_addr is an actual packet header that needs to be
* filter. It has to mapped with IO to read prior to
* calling this, [contains 4 tuples- src ip, dest ip,
* src port, dest port].
* @params length length of p_addr header up to past the transport header.
* @params qid receive packet will be directed to this queue.
* @params vport_id
* @params b_is_add flag to add or remove filter.
*
*/
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
bool b_is_add);
#endif

View File

@ -51,10 +51,21 @@ enum ecore_rss_caps {
#define ECORE_MAX_PHC_DRIFT_PPB 291666666
enum ecore_ptp_filter_type {
ECORE_PTP_FILTER_L2,
ECORE_PTP_FILTER_IPV4,
ECORE_PTP_FILTER_IPV4_IPV6,
ECORE_PTP_FILTER_L2_IPV4_IPV6
ECORE_PTP_FILTER_NONE,
ECORE_PTP_FILTER_ALL,
ECORE_PTP_FILTER_V1_L4_EVENT,
ECORE_PTP_FILTER_V1_L4_GEN,
ECORE_PTP_FILTER_V2_L4_EVENT,
ECORE_PTP_FILTER_V2_L4_GEN,
ECORE_PTP_FILTER_V2_L2_EVENT,
ECORE_PTP_FILTER_V2_L2_GEN,
ECORE_PTP_FILTER_V2_EVENT,
ECORE_PTP_FILTER_V2_GEN
};
enum ecore_ptp_hwtstamp_tx_type {
ECORE_PTP_HWTSTAMP_TX_OFF,
ECORE_PTP_HWTSTAMP_TX_ON,
};
struct ecore_queue_start_common_params {
@ -246,7 +257,7 @@ ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
* different from the RXQ opaque
* otherwise on CQe.
* @param cqe_completion If True completion will be
* receive on CQe.
* recieve on CQe.
* @return enum _ecore_status_t
*/
enum _ecore_status_t
@ -460,4 +471,30 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev);
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params);
/**
* @brief - ecore_configure_rfs_ntuple_filter
*
* This ramrod should be used to add or remove arfs hw filter
*
* @params p_hwfn
* @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
* it with cookie and callback function address, if not
* using this mode then client must pass NULL.
* @params p_addr p_addr is an actual packet header that needs to be
* filter. It has to mapped with IO to read prior to
* calling this, [contains 4 tuples- src ip, dest ip,
* src port, dest port].
* @params length length of p_addr header up to past the transport header.
* @params qid receive packet will be directed to this queue.
* @params vport_id
* @params b_is_add flag to add or remove filter.
*
*/
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
bool b_is_add);
#endif

View File

@ -111,21 +111,14 @@ struct ecore_ll2_tx_queue {
struct ecore_ll2_info {
osal_mutex_t mutex;
enum ecore_ll2_conn_type conn_type;
struct ecore_ll2_acquire_data_inputs input;
u32 cid;
u8 my_id;
u8 queue_id;
u8 tx_stats_id;
bool b_active;
u16 mtu;
u8 rx_drop_ttl0_flg;
u8 rx_vlan_removal_en;
u8 tx_tc;
u8 tx_max_bds_per_packet;
enum core_tx_dest tx_dest;
enum core_error_handle ai_err_packet_too_big;
enum core_error_handle ai_err_no_buf;
u8 gsi_enable;
u8 tx_stats_en;
u8 main_func_queue;
struct ecore_ll2_rx_queue rx_queue;

View File

@ -143,15 +143,22 @@ void (*ecore_ll2_release_tx_packet_cb)(void *cxt,
bool b_last_fragment,
bool b_last_packet);
typedef
void (*ecore_ll2_slowpath_cb)(void *cxt,
u8 connection_handle,
u32 opaque_data_0,
u32 opaque_data_1);
struct ecore_ll2_cbs {
ecore_ll2_complete_rx_packet_cb rx_comp_cb;
ecore_ll2_release_rx_packet_cb rx_release_cb;
ecore_ll2_complete_tx_packet_cb tx_comp_cb;
ecore_ll2_release_tx_packet_cb tx_release_cb;
ecore_ll2_slowpath_cb slowpath_cb;
void *cookie;
};
struct ecore_ll2_acquire_data {
struct ecore_ll2_acquire_data_inputs {
enum ecore_ll2_conn_type conn_type;
u16 mtu; /* Maximum bytes that can be placed on a BD*/
u16 rx_num_desc;
@ -170,10 +177,14 @@ struct ecore_ll2_acquire_data {
enum ecore_ll2_error_handle ai_err_no_buf;
u8 secondary_queue;
u8 gsi_enable;
};
struct ecore_ll2_acquire_data {
struct ecore_ll2_acquire_data_inputs input;
const struct ecore_ll2_cbs *cbs;
/* Output container for LL2 connection's handle */
u8 *p_connection_handle;
const struct ecore_ll2_cbs *cbs;
};
/**

File diff suppressed because it is too large Load Diff

View File

@ -51,21 +51,26 @@
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->p_dev->num_ports_in_engines * \
((_p_hwfn)->p_dev->num_ports_in_engine * \
ecore_device_num_engines((_p_hwfn)->p_dev)))
struct ecore_mcp_info {
/* Spinlock used for protecting the access to the MFW mailbox */
osal_spinlock_t lock;
/* List for mailbox commands which were sent and wait for a response */
osal_list_t cmd_list;
/* Spinglock used for syncing SW link-changes and link-changes
/* Spinlock used for protecting the access to the mailbox commands list
* and the sending of the commands.
*/
osal_spinlock_t cmd_lock;
/* Flag to indicate whether sending a MFW mailbox command is blocked */
bool b_block_cmd;
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
*/
osal_spinlock_t link_lock;
/* Flag to indicate whether sending a MFW mailbox is forbidden */
bool block_mb_sending;
/* Address of the MCP public area */
u32 public_base;
/* Address of the driver mailbox */
@ -89,7 +94,7 @@ struct ecore_mcp_info {
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
u16 mcp_hist;
u32 mcp_hist;
/* Capabilties negotiated with the MFW */
u32 capabilities;
@ -136,7 +141,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
* Can only be called after `num_ports_in_engines' is set
* Can only be called after `num_ports_in_engine' is set
*/
void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
@ -278,56 +283,6 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Sends an NVM write command request to the MFW with
* payload.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
* DRV_MSG_CODE_NVM_PUT_FILE_DATA
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param i_txn_size - Buffer size
* @param i_buf - Pointer to the buffer
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 i_txn_size,
u32 *i_buf);
/**
* @brief - Sends an NVM read command request to the MFW to get
* a buffer.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
* DRV_MSG_CODE_NVM_READ_NVRAM commands
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param o_txn_size - Buffer size output
* @param o_buf - Pointer to the buffer returned by the MFW.
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 *o_txn_size,
u32 *o_buf);
/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
@ -433,17 +388,6 @@ enum _ecore_status_t
ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mdump_retain_data *p_mdump_retain);
/**
* @brief - Clear the mdump retained data.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Sets the MFW's max value for the given resource
*
@ -513,7 +457,12 @@ enum ecore_resc_lock {
ECORE_RESC_LOCK_PTP_PORT1,
ECORE_RESC_LOCK_PTP_PORT2,
ECORE_RESC_LOCK_PTP_PORT3,
ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL,
/* A dummy value to be used for auxillary functions in need of
* returning an 'error' value.
*/
ECORE_RESC_LOCK_RESC_INVALID,
};
struct ecore_resc_lock_params {
@ -527,9 +476,11 @@ struct ecore_resc_lock_params {
/* Number of times to retry locking */
u8 retry_num;
#define ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
/* The interval in usec between retries */
u16 retry_interval;
#define ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
/* Use sleep or delay between retries */
bool sleep_b4_retry;
@ -580,6 +531,19 @@ enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params);
/**
* @brief - default initialization for lock/unlock resource structs
*
* @param p_lock - lock params struct to be initialized; Can be OSAL_NULL
* @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL
* @param resource - the requested resource
* @paral b_is_permanent - disable retries & aging when set
*/
void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
struct ecore_resc_unlock_params *p_unlock,
enum ecore_resc_lock resource,
bool b_is_permanent);
void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u32 offset, u32 val);

View File

@ -51,6 +51,7 @@ enum ecore_mcp_eee_mode {
ECORE_MCP_EEE_UNSUPPORTED
};
#ifndef __EXTRACT__LINUX__
struct ecore_link_eee_params {
u32 tx_lpi_timer;
#define ECORE_EEE_1G_ADV (1 << 0)
@ -61,6 +62,7 @@ struct ecore_link_eee_params {
bool enable;
bool tx_lpi_enable;
};
#endif
struct ecore_mcp_link_params {
struct ecore_mcp_link_speed_params speed;
@ -134,42 +136,13 @@ struct ecore_mcp_function_info {
u16 mtu;
};
struct ecore_mcp_nvm_common {
u32 offset;
u32 param;
u32 resp;
u32 cmd;
};
struct ecore_mcp_nvm_rd {
u32 *buf_size;
u32 *buf;
};
struct ecore_mcp_nvm_wr {
u32 buf_size;
u32 *buf;
};
struct ecore_mcp_nvm_params {
#define ECORE_MCP_CMD (1 << 0)
#define ECORE_MCP_NVM_RD (1 << 1)
#define ECORE_MCP_NVM_WR (1 << 2)
u8 type;
struct ecore_mcp_nvm_common nvm_common;
union {
struct ecore_mcp_nvm_rd nvm_rd;
struct ecore_mcp_nvm_wr nvm_wr;
};
};
#ifndef __EXTRACT__LINUX__
enum ecore_nvm_images {
ECORE_NVM_IMAGE_ISCSI_CFG,
ECORE_NVM_IMAGE_FCOE_CFG,
ECORE_NVM_IMAGE_MDUMP,
};
#endif
struct ecore_mcp_drv_version {
u32 version;
@ -238,6 +211,7 @@ enum ecore_ov_wol {
ECORE_OV_WOL_ENABLED
};
#ifndef __EXTRACT__LINUX__
#define ECORE_MAX_NPIV_ENTRIES 128
#define ECORE_WWN_SIZE 8
struct ecore_fc_npiv_tbl {
@ -252,6 +226,7 @@ enum ecore_led_mode {
ECORE_LED_MODE_ON,
ECORE_LED_MODE_RESTORE
};
#endif
struct ecore_temperature_sensor {
u8 sensor_location;
@ -312,6 +287,7 @@ struct ecore_mfw_tlv_generic {
bool tx_bytes_set;
};
#ifndef __EXTRACT__LINUX__
struct ecore_mfw_tlv_eth {
u16 lso_maxoff_size;
bool lso_maxoff_size_set;
@ -576,6 +552,7 @@ struct ecore_mfw_tlv_iscsi {
u64 tx_bytes;
bool tx_bytes_set;
};
#endif
union ecore_mfw_tlv_data {
struct ecore_mfw_tlv_generic generic;
@ -698,6 +675,7 @@ enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#ifndef LINUX_REMOVE
/**
* @brief - return the mcp function info of the hw function
*
@ -707,45 +685,9 @@ enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
*/
const struct ecore_mcp_function_info
*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
#endif
/**
* @brief - Function for reading/manipulating the nvram. Following are supported
* functionalities.
* 1. Read: Read the specified nvram offset.
* input values:
* type - ECORE_MCP_NVM_RD
* cmd - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
* offset - nvm offset
*
* output values:
* buf - buffer
* buf_size - buffer size
*
* 2. Write: Write the data at the specified nvram offset
* input values:
* type - ECORE_MCP_NVM_WR
* cmd - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
* offset - nvm offset
* buf - buffer
* buf_size - buffer size
*
* 3. Command: Send the NVM command to MCP.
* input values:
* type - ECORE_MCP_CMD
* cmd - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
* offset - nvm offset
*
*
* @param p_hwfn
* @param p_ptt
* @param params
*
* @return ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_nvm_params *params);
#ifndef LINUX_REMOVE
/**
* @brief - count number of function with a matching personality on engine.
*
@ -759,6 +701,7 @@ enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 personalities);
#endif
/**
* @brief Get the flash size value
@ -1037,6 +980,56 @@ enum _ecore_status_t ecore_mcp_get_nvm_image(struct ecore_hwfn *p_hwfn,
enum ecore_nvm_images image_id,
u8 *p_buffer, u32 buffer_len);
/**
* @brief - Sends an NVM write command request to the MFW with
* payload.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
* DRV_MSG_CODE_NVM_PUT_FILE_DATA
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param i_txn_size - Buffer size
* @param i_buf - Pointer to the buffer
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 i_txn_size,
u32 *i_buf);
/**
* @brief - Sends an NVM read command request to the MFW to get
* a buffer.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
* DRV_MSG_CODE_NVM_READ_NVRAM commands
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param o_txn_size - Buffer size output
* @param o_buf - Pointer to the buffer returned by the MFW.
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 *o_txn_size,
u32 *o_buf);
/**
* @brief Read from sfp
*
@ -1245,6 +1238,17 @@ ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Clear the mdump retained data.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Gets the LLDP MAC address.
*

View File

@ -85,33 +85,25 @@ void ecore_ooo_setup(struct ecore_hwfn *p_hwfn);
void ecore_ooo_free(struct ecore_hwfn *p_hwfn);
void ecore_ooo_save_history_entry(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
void ecore_ooo_save_history_entry(struct ecore_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
void ecore_ooo_release_connection_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid);
void ecore_ooo_release_connection_isles(struct ecore_ooo_info *p_ooo_info,
u32 cid);
void ecore_ooo_release_all_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info);
void ecore_ooo_release_all_isles(struct ecore_ooo_info *p_ooo_info);
void ecore_ooo_put_free_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer);
void ecore_ooo_put_free_buffer(struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer);
struct ecore_ooo_buffer *
ecore_ooo_get_free_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info);
ecore_ooo_get_free_buffer(struct ecore_ooo_info *p_ooo_info);
void ecore_ooo_put_ready_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer,
u8 on_tail);
void ecore_ooo_put_ready_buffer(struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer, u8 on_tail);
struct ecore_ooo_buffer *
ecore_ooo_get_ready_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info);
ecore_ooo_get_ready_buffer(struct ecore_ooo_info *p_ooo_info);
void ecore_ooo_delete_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,

View File

@ -92,7 +92,7 @@ struct ecore_fcoe_pf_params {
struct ecore_iscsi_pf_params {
u64 glbl_q_params_addr;
u64 bdq_pbl_base_addr[2];
u64 bdq_pbl_base_addr[3];
u16 cq_num_entries;
u16 cmdq_num_entries;
u32 two_msl_timer;
@ -106,8 +106,8 @@ struct ecore_iscsi_pf_params {
/* The following parameters are used during protocol-init */
u16 half_way_close_timeout;
u16 bdq_xoff_threshold[2];
u16 bdq_xon_threshold[2];
u16 bdq_xoff_threshold[3];
u16 bdq_xon_threshold[3];
u16 cmdq_xoff_threshold;
u16 cmdq_xon_threshold;
u16 rq_buffer_size;
@ -126,7 +126,8 @@ struct ecore_iscsi_pf_params {
u8 ooo_enable;
u8 is_target;
u8 bdq_pbl_num_entries[2];
u8 is_tmwo_en;
u8 bdq_pbl_num_entries[3];
};
enum ecore_rdma_protocol {

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_RDMA_H__
#define __ECORE_RDMA_H__
@ -100,11 +99,14 @@ void ecore_rdma_dpm_bar(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#ifdef CONFIG_ECORE_IWARP
#define ECORE_IWARP_PREALLOC_CNT (256)
#define ECORE_IWARP_LL2_SYN_TX_SIZE (128)
#define ECORE_IWARP_LL2_SYN_RX_SIZE (256)
#define ECORE_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define ECORE_IWARP_LL2_OOO_DEF_RX_SIZE (4096)
#define ECORE_IWARP_LL2_OOO_MAX_RX_SIZE (16384)
#define ECORE_IWARP_MAX_SYN_PKT_SIZE (128)
#define ECORE_IWARP_HANDLE_INVAL (0xff)
@ -309,6 +311,13 @@ union async_output {
struct iwarp_eqe_data_tcp_async_completion mpa_request;
};
#define ECORE_MAX_PRIV_DATA_LEN (512)
struct ecore_iwarp_ep_memory {
u8 in_pdata[ECORE_MAX_PRIV_DATA_LEN];
u8 out_pdata[ECORE_MAX_PRIV_DATA_LEN];
union async_output async_output;
};
/* Endpoint structure represents a TCP connection. This connection can be
* associated with a QP or not (in which case QP==NULL)
*/
@ -322,16 +331,9 @@ struct ecore_iwarp_ep {
* only one actually allocated and freed. The rest are pointers into
* this buffer
*/
void *ep_buffer_virt;
struct ecore_iwarp_ep_memory *ep_buffer_virt;
dma_addr_t ep_buffer_phys;
/* Asynce EQE events contain only the ep pointer on the completion. The
* rest of the data is written to an output buffer pre-allocated by
* the driver. This buffer points to a location in the ep_buffer.
*/
union async_output *async_output_virt;
dma_addr_t async_output_phys;
struct ecore_iwarp_cm_info cm_info;
enum tcp_connect_mode connect_mode;
enum mpa_rtr_type rtr_type;

View File

@ -31,8 +31,11 @@
#ifndef __ECORE_RDMA_API_H__
#define __ECORE_RDMA_API_H__
#ifndef LINUX_REMOVE
#define ETH_ALEN 6
#endif
#ifndef __EXTRACT__LINUX__
enum ecore_roce_ll2_tx_dest
{
@ -324,6 +327,7 @@ struct ecore_rdma_create_cq_in_params {
u16 int_timeout;
};
#endif
struct ecore_rdma_resize_cq_in_params {
/* input variables (given by miniport) */
@ -338,6 +342,7 @@ struct ecore_rdma_resize_cq_in_params {
*/
};
#ifndef __EXTRACT__LINUX__
enum roce_mode
{
@ -546,6 +551,7 @@ struct ecore_rdma_modify_srq_in_params {
u32 wqe_limit;
u16 srq_id;
};
#endif
struct ecore_rdma_resize_cq_out_params {
/* output variables, provided to the upper layer */
@ -562,6 +568,7 @@ struct ecore_rdma_resize_cnq_in_params {
u64 pbl_ptr;
};
#ifndef __EXTRACT__LINUX__
struct ecore_rdma_stats_out_params {
u64 sent_bytes;
u64 sent_pkts;
@ -592,6 +599,7 @@ struct ecore_rdma_counters_out_params {
u64 tid_count;
u64 max_tid;
};
#endif
enum _ecore_status_t
ecore_rdma_add_user(void *rdma_cxt,
@ -699,16 +707,30 @@ ecore_rdma_query_counters(void *rdma_cxt,
u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
u32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
u32 ecore_rdma_query_cau_timer_res(void);
void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t
ecore_rdma_create_srq(void *rdma_cxt,
struct ecore_rdma_create_srq_in_params *in_params,
struct ecore_rdma_create_srq_out_params *out_params);
enum _ecore_status_t
ecore_rdma_destroy_srq(void *rdma_cxt,
struct ecore_rdma_destroy_srq_in_params *in_params);
enum _ecore_status_t
ecore_rdma_modify_srq(void *rdma_cxt,
struct ecore_rdma_modify_srq_in_params *in_params);
#ifdef CONFIG_ECORE_IWARP
/* iWARP API */
#ifndef __EXTRACT__LINUX__
enum ecore_iwarp_event_type {
ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
@ -832,6 +854,7 @@ struct ecore_iwarp_tcp_abort_in {
void *ep_context;
};
#endif
enum _ecore_status_t
ecore_iwarp_connect(void *rdma_cxt,

View File

@ -28,6 +28,7 @@
*
*/
#ifndef __RT_DEFS_H__
#define __RT_DEFS_H__
@ -57,417 +58,417 @@
#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 1049
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 1049
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 2073
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
#define CAU_REG_PI_MEMORY_RT_OFFSET 3097
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 7513
#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 7514
#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 7515
#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 7516
#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 7517
#define PRS_REG_SEARCH_TCP_RT_OFFSET 7518
#define PRS_REG_SEARCH_FCOE_RT_OFFSET 7519
#define PRS_REG_SEARCH_ROCE_RT_OFFSET 7520
#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 7521
#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 7522
#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 7523
#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 7524
#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 7525
#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 7526
#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 7527
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 7528
#define SRC_REG_FIRSTFREE_RT_OFFSET 7529
#define SRC_REG_FIRSTFREE_RT_SIZE 2
#define SRC_REG_LASTFREE_RT_OFFSET 6667
#define SRC_REG_LASTFREE_RT_OFFSET 7531
#define SRC_REG_LASTFREE_RT_SIZE 2
#define SRC_REG_COUNTFREE_RT_OFFSET 6669
#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define SRC_REG_COUNTFREE_RT_OFFSET 7533
#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 7534
#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 7535
#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 7536
#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 7537
#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 7538
#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 7539
#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 7540
#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 7541
#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 7542
#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 7543
#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 7544
#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 7545
#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 7546
#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 7547
#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 7548
#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 7549
#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 7550
#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 7551
#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 7552
#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 7553
#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 7554
#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 7555
#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 7556
#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 7557
#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 7558
#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 7559
#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 7560
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 7561
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 7562
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 7563
#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 7564
#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 7565
#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 7566
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define PGLUE_REG_B_VF_BASE_RT_OFFSET 29566
#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 29567
#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 29568
#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 29569
#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 29570
#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 29571
#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 29572
#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 29573
#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 29574
#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 29575
#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 29576
#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 29577
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 29578
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29994
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
#define QM_REG_MAXPQSIZE_0_RT_OFFSET 30602
#define QM_REG_MAXPQSIZE_1_RT_OFFSET 30603
#define QM_REG_MAXPQSIZE_2_RT_OFFSET 30604
#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 30605
#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 30606
#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 30607
#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 30608
#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 30609
#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 30610
#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 30611
#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 30612
#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 30613
#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 30614
#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 30615
#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 30616
#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 30617
#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 30618
#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 30619
#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 30620
#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 30621
#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 30622
#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 30623
#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 30624
#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 30625
#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 30626
#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 30627
#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 30628
#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 30629
#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 30630
#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 30631
#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 30632
#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 30633
#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 30634
#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 30635
#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 30636
#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 30637
#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 30638
#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 30639
#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 30640
#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 30641
#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 30642
#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 30643
#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 30644
#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 30645
#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 30646
#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 30647
#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 30648
#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 30649
#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 30650
#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 30651
#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 30652
#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 30653
#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 30654
#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 30655
#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 30656
#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 30657
#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 30658
#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 30659
#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 30660
#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 30661
#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 30662
#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 30663
#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 30664
#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 30665
#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 30666
#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 30667
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 30668
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 30669
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 30797
#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 30798
#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 30799
#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 30800
#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 30801
#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 30802
#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 30803
#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 30804
#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 30805
#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 30806
#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 30807
#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 30808
#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 30809
#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 30810
#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 30811
#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 30812
#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 30813
#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 30814
#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 30815
#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 30816
#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 30817
#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 30818
#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 30819
#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 30820
#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 30821
#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 30822
#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 30823
#define QM_REG_PQTX2PF_0_RT_OFFSET 30824
#define QM_REG_PQTX2PF_1_RT_OFFSET 30825
#define QM_REG_PQTX2PF_2_RT_OFFSET 30826
#define QM_REG_PQTX2PF_3_RT_OFFSET 30827
#define QM_REG_PQTX2PF_4_RT_OFFSET 30828
#define QM_REG_PQTX2PF_5_RT_OFFSET 30829
#define QM_REG_PQTX2PF_6_RT_OFFSET 30830
#define QM_REG_PQTX2PF_7_RT_OFFSET 30831
#define QM_REG_PQTX2PF_8_RT_OFFSET 30832
#define QM_REG_PQTX2PF_9_RT_OFFSET 30833
#define QM_REG_PQTX2PF_10_RT_OFFSET 30834
#define QM_REG_PQTX2PF_11_RT_OFFSET 30835
#define QM_REG_PQTX2PF_12_RT_OFFSET 30836
#define QM_REG_PQTX2PF_13_RT_OFFSET 30837
#define QM_REG_PQTX2PF_14_RT_OFFSET 30838
#define QM_REG_PQTX2PF_15_RT_OFFSET 30839
#define QM_REG_PQTX2PF_16_RT_OFFSET 30840
#define QM_REG_PQTX2PF_17_RT_OFFSET 30841
#define QM_REG_PQTX2PF_18_RT_OFFSET 30842
#define QM_REG_PQTX2PF_19_RT_OFFSET 30843
#define QM_REG_PQTX2PF_20_RT_OFFSET 30844
#define QM_REG_PQTX2PF_21_RT_OFFSET 30845
#define QM_REG_PQTX2PF_22_RT_OFFSET 30846
#define QM_REG_PQTX2PF_23_RT_OFFSET 30847
#define QM_REG_PQTX2PF_24_RT_OFFSET 30848
#define QM_REG_PQTX2PF_25_RT_OFFSET 30849
#define QM_REG_PQTX2PF_26_RT_OFFSET 30850
#define QM_REG_PQTX2PF_27_RT_OFFSET 30851
#define QM_REG_PQTX2PF_28_RT_OFFSET 30852
#define QM_REG_PQTX2PF_29_RT_OFFSET 30853
#define QM_REG_PQTX2PF_30_RT_OFFSET 30854
#define QM_REG_PQTX2PF_31_RT_OFFSET 30855
#define QM_REG_PQTX2PF_32_RT_OFFSET 30856
#define QM_REG_PQTX2PF_33_RT_OFFSET 30857
#define QM_REG_PQTX2PF_34_RT_OFFSET 30858
#define QM_REG_PQTX2PF_35_RT_OFFSET 30859
#define QM_REG_PQTX2PF_36_RT_OFFSET 30860
#define QM_REG_PQTX2PF_37_RT_OFFSET 30861
#define QM_REG_PQTX2PF_38_RT_OFFSET 30862
#define QM_REG_PQTX2PF_39_RT_OFFSET 30863
#define QM_REG_PQTX2PF_40_RT_OFFSET 30864
#define QM_REG_PQTX2PF_41_RT_OFFSET 30865
#define QM_REG_PQTX2PF_42_RT_OFFSET 30866
#define QM_REG_PQTX2PF_43_RT_OFFSET 30867
#define QM_REG_PQTX2PF_44_RT_OFFSET 30868
#define QM_REG_PQTX2PF_45_RT_OFFSET 30869
#define QM_REG_PQTX2PF_46_RT_OFFSET 30870
#define QM_REG_PQTX2PF_47_RT_OFFSET 30871
#define QM_REG_PQTX2PF_48_RT_OFFSET 30872
#define QM_REG_PQTX2PF_49_RT_OFFSET 30873
#define QM_REG_PQTX2PF_50_RT_OFFSET 30874
#define QM_REG_PQTX2PF_51_RT_OFFSET 30875
#define QM_REG_PQTX2PF_52_RT_OFFSET 30876
#define QM_REG_PQTX2PF_53_RT_OFFSET 30877
#define QM_REG_PQTX2PF_54_RT_OFFSET 30878
#define QM_REG_PQTX2PF_55_RT_OFFSET 30879
#define QM_REG_PQTX2PF_56_RT_OFFSET 30880
#define QM_REG_PQTX2PF_57_RT_OFFSET 30881
#define QM_REG_PQTX2PF_58_RT_OFFSET 30882
#define QM_REG_PQTX2PF_59_RT_OFFSET 30883
#define QM_REG_PQTX2PF_60_RT_OFFSET 30884
#define QM_REG_PQTX2PF_61_RT_OFFSET 30885
#define QM_REG_PQTX2PF_62_RT_OFFSET 30886
#define QM_REG_PQTX2PF_63_RT_OFFSET 30887
#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30888
#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30889
#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30890
#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30891
#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30892
#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30893
#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30894
#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30895
#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30896
#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30897
#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30898
#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30899
#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30900
#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30901
#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30902
#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30903
#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30904
#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30905
#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30906
#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30907
#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30908
#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30909
#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30910
#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30911
#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30912
#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30913
#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30914
#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30915
#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30916
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 31172
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_OFFSET 31428
#define QM_REG_RLGLBLCRD_RT_SIZE 256
#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLGLBLENABLE_RT_OFFSET 31684
#define QM_REG_RLPFPERIOD_RT_OFFSET 31685
#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 31686
#define QM_REG_RLPFINCVAL_RT_OFFSET 31687
#define QM_REG_RLPFINCVAL_RT_SIZE 16
#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 31703
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_OFFSET 31719
#define QM_REG_RLPFCRD_RT_SIZE 16
#define QM_REG_RLPFENABLE_RT_OFFSET 30871
#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_RLPFENABLE_RT_OFFSET 31735
#define QM_REG_RLPFVOQENABLE_RT_OFFSET 31736
#define QM_REG_WFQPFWEIGHT_RT_OFFSET 31737
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 31753
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
#define QM_REG_WFQPFCRD_RT_OFFSET 30905
#define QM_REG_WFQPFCRD_RT_OFFSET 31769
#define QM_REG_WFQPFCRD_RT_SIZE 256
#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_WFQPFENABLE_RT_OFFSET 32025
#define QM_REG_WFQVPENABLE_RT_OFFSET 32026
#define QM_REG_BASEADDRTXPQ_RT_OFFSET 32027
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_OFFSET 32539
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 33051
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_OFFSET 33563
#define QM_REG_WFQVPCRD_RT_SIZE 512
#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_OFFSET 34075
#define QM_REG_WFQVPMAP_RT_SIZE 512
#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34587
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
#define QM_REG_VOQCRDLINE_RT_OFFSET 34907
#define QM_REG_VOQCRDLINE_RT_SIZE 36
#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34943
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34979
#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34980
#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34981
#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34982
#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34983
#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34984
#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34985
#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34986
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34990
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34994
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34998
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34999
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 35031
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 35047
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 35063
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 35079
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 35095
#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 35096
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 35097
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 35098
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 35099
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 35100
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 35101
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 35102
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 35103
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 35104
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 35105
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 35106
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 35107
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 35108
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 35109
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 35110
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 35111
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 35112
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 35113
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 35114
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 35115
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 35116
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 35117
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 35118
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 35119
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 35120
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 35121
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 35122
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 35123
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 35124
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 35125
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 35126
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 35127
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 35128
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 35129
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 35130
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 35131
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 35132
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 35133
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 35134
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 35135
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 35136
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 35137
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 35138
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 35139
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 35140
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 35141
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 35142
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 35143
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 35144
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 35145
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 35146
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 35147
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 35148
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 35149
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 35150
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 35151
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 35152
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 35153
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 35154
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 35155
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 35156
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 35157
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 35158
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 35159
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 35160
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 35161
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 35162
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 35163
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 35164
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 35165
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 35166
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 35167
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 35168
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 35169
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 35170
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 35171
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 35172
#define RUNTIME_ARRAY_SIZE 34309
#define RUNTIME_ARRAY_SIZE 35173
#endif /* __RT_DEFS_H__ */

View File

@ -71,6 +71,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
* for a physical function (PF).
*
* @param p_hwfn
* @param p_ptt
* @param p_tunn - pf update tunneling parameters
* @param comp_mode - completion mode
* @param p_comp_data - callback function
@ -80,6 +81,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);

View File

@ -31,7 +31,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "ecore.h"
@ -257,6 +256,7 @@ static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
}
static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn)
{
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
@ -266,14 +266,14 @@ static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
}
if (p_tunn->vxlan_port.b_update_port)
ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
ecore_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
@ -319,6 +319,7 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
@ -426,12 +427,13 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (p_tunn)
ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
&p_hwfn->p_dev->tunnel);
return rc;
}
enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
@ -501,6 +503,7 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
@ -541,7 +544,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
return rc;
}
@ -587,3 +590,27 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_CB;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
p_ent->ramrod.pf_update.mf_vlan = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}

View File

@ -28,6 +28,7 @@
*
*/
#ifndef __ECORE_SP_COMMANDS_H__
#define __ECORE_SP_COMMANDS_H__
@ -81,6 +82,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
* @param p_ptt
* @param p_tunn - pf start tunneling configuration
* @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
@ -90,6 +92,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
@ -107,7 +110,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_pf_stop - PF Function Stop Ramrod
@ -165,4 +168,14 @@ struct ecore_rl_update_params {
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params);
/**
* @brief ecore_sp_pf_update_stag - PF STAG value update Ramrod
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn);
#endif /*__ECORE_SP_COMMANDS_H__*/

View File

@ -116,10 +116,9 @@ static void ecore_iscsi_eq_dump(struct ecore_hwfn *p_hwfn,
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
***************************************************************************/
static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
void *cookie,
union event_ring_data *data,
u8 fw_return_code)
static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
union event_ring_data OSAL_UNUSED *data,
u8 fw_return_code)
{
struct ecore_spq_comp_done *comp_done;
@ -168,6 +167,7 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct ecore_spq_comp_done *comp_done;
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@ -184,8 +184,14 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
if (rc == ECORE_SUCCESS)
return ECORE_SUCCESS;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_NOTICE(p_hwfn, true, "ptt, failed to acquire\n");
return ECORE_AGAIN;
}
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
rc = ecore_mcp_drain(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
goto err;
@ -194,15 +200,20 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
/* Retry after drain */
rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
if (rc == ECORE_SUCCESS)
return ECORE_SUCCESS;
goto out;
comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return ECORE_SUCCESS;
}
out:
ecore_ptt_release(p_hwfn, p_ptt);
return ECORE_SUCCESS;
err:
ecore_ptt_release(p_hwfn, p_ptt);
DP_NOTICE(p_hwfn, true,
"Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
@ -253,10 +264,10 @@ static enum _ecore_status_t ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn,
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
struct e4_core_conn_context *p_cxt;
struct ecore_cxt_info cxt_info;
struct core_conn_context *p_cxt;
enum _ecore_status_t rc;
u16 physical_q;
enum _ecore_status_t rc;
cxt_info.iid = p_spq->cid;
@ -530,7 +541,7 @@ void ecore_eq_free(struct ecore_hwfn *p_hwfn)
}
/***************************************************************************
* CQE API - manipulate EQ functionality
* CQE API - manipulate EQ functionallity
***************************************************************************/
static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe,
@ -648,7 +659,9 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_virt = p_virt;
p_spq->p_phys = p_phys;
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
#endif
p_hwfn->p_spq = p_spq;
return ECORE_SUCCESS;
@ -677,7 +690,9 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn)
}
ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
#endif
OSAL_FREE(p_hwfn->p_dev, p_spq);
p_hwfn->p_spq = OSAL_NULL;
@ -736,7 +751,7 @@ void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
* list. Should be used while lock is being held.
*
* Addes an entry to the pending list is there is room (en empty
* element is available in the free_pool), or else places the
* element is avaliable in the free_pool), or else places the
* entry in the unlimited_pending pool.
*
* @param p_hwfn
@ -937,7 +952,7 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
/* For entries in ECORE BLOCK mode, the completion code cannot
* perform the necessary cleanup - if it did, we couldn't
* access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer - perform the cleanup here.
* Thus, after gaining the answer perform the cleanup here.
*/
rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
p_ent->queue == &p_spq->unlimited_pending);
@ -1120,4 +1135,3 @@ void ecore_consq_free(struct ecore_hwfn *p_hwfn)
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
p_hwfn->p_consq = OSAL_NULL;
}

View File

@ -38,7 +38,7 @@
#include "ecore_l2.h"
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
(E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
(MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
@ -108,7 +108,7 @@ struct ecore_vf_queue {
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
VF_ACQUIRED = 1, /* VF, aquired, but not initalized */
VF_ENABLED = 2, /* VF, Enabled */
VF_RESET = 3, /* VF, FLR'd, pending cleanup */
VF_STOPPED = 4 /* VF, Stopped */
@ -192,7 +192,7 @@ struct ecore_vf_info {
* capability enabled.
*/
struct ecore_pf_iov {
struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
struct ecore_vf_info vfs_array[MAX_NUM_VFS_E4];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
#ifndef REMOVE_DBG
@ -228,17 +228,13 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
*
* @param p_hwfn
* @param p_iov
* @param offset
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
u8 **offset,
u16 type,
u16 length);
void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
@ -262,10 +258,8 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
* @brief ecore_iov_setup - setup sriov related resources
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
void ecore_iov_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_iov_free - free sriov related resources
@ -300,7 +294,7 @@ enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
* @return true iff one of the PF's vfs got FLRed. false otherwise.
*/
bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
u32 *disabled_vfs);
@ -332,18 +326,18 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
u16 relative_vf_id,
bool b_enabled_only);
#else
static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) {return OSAL_NULL;}
static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) {}
static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn) {}
static OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev *p_dev) {}
static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data) {return ECORE_INVAL;}
static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length) {return 0;}
static OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs) {return 0;}
static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type) {return OSAL_NULL;}
static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, u16 relative_vf_id, bool b_enabled_only) {return OSAL_NULL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED **offset, OSAL_UNUSED u16 type, OSAL_UNUSED u16 length) {return OSAL_NULL;}
static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *tlvs_list) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {}
static OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev OSAL_UNUSED *p_dev) {}
static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED opcode, __le16 OSAL_UNUSED echo, union event_ring_data OSAL_UNUSED *data) {return ECORE_INVAL;}
static OSAL_INLINE u32 ecore_crc32(u32 OSAL_UNUSED crc, u8 OSAL_UNUSED *ptr, u32 OSAL_UNUSED length) {return 0;}
static OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u32 OSAL_UNUSED *disabled_vfs) {return false;}
static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, void OSAL_UNUSED *p_tlvs_list, u16 OSAL_UNUSED req_type) {return OSAL_NULL;}
static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED relative_vf_id, bool OSAL_UNUSED b_enabled_only) {return OSAL_NULL;}
#endif
#endif /* __ECORE_SRIOV_H__ */

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __ECORE_VF_H__
#define __ECORE_VF_H__
@ -69,7 +68,7 @@ struct ecore_vf_iov {
* start, and as they lack an IGU mapping they need to store the
* addresses of previously registered SBs.
* Even if we were to change configuration flow, due to backward
* compatibility [with older PFs] we'd still need to store these.
* compatability [with older PFs] we'd still need to store these.
*/
struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
};
@ -174,6 +173,7 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
/* TODO - fix all the !SRIOV prototypes */
#ifndef LINUX_REMOVE
/**
* @brief VF - update the RX queue by sending a message to the
* PF
@ -191,6 +191,7 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
u8 num_rxqs,
u8 comp_cqe_flg,
u8 comp_event_flg);
#endif
/**
* @brief VF - send a vport update command
@ -293,63 +294,59 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
/**
* @brief - return the link params in a given bulletin board
*
* @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *p_params,
void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *p_link,
void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps,
void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
enum _ecore_status_t
ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn);
void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, void OSAL_IOMEM **pp_prod) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid, dma_addr_t pbl_addr, u16 pbl_size, void OSAL_IOMEM **pp_doorbell) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid, u16 OSAL_UNUSED bd_max_bytes, dma_addr_t OSAL_UNUSED bd_chain_phys_addr, dma_addr_t OSAL_UNUSED cqe_pbl_addr, u16 OSAL_UNUSED cqe_pbl_size, void OSAL_IOMEM OSAL_UNUSED **pp_prod) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid, dma_addr_t OSAL_UNUSED pbl_addr, u16 OSAL_UNUSED pbl_size, void OSAL_IOMEM OSAL_UNUSED **pp_doorbell) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid, bool cqe_completion) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid **pp_cid, u8 num_rxqs, u8 comp_cqe_flg, u8 comp_event_flg) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, struct ecore_sp_vport_update_params *p_params) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) {return 0;}
static OSAL_INLINE void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, u16 sb_id, struct ecore_sb_info *p_sb) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid, bool OSAL_UNUSED cqe_completion) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED *p_cid) {return ECORE_INVAL;}
#ifndef LINUX_REMOVE
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_queue_cid OSAL_UNUSED **pp_cid, u8 OSAL_UNUSED num_rxqs, u8 OSAL_UNUSED comp_cqe_flg, u8 OSAL_UNUSED comp_event_flg) {return ECORE_INVAL;}
#endif
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_sp_vport_update_params OSAL_UNUSED *p_params) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED sb_id) {return 0;}
static OSAL_INLINE void ecore_vf_set_sb_info(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED sb_id, struct ecore_sb_info OSAL_UNUSED *p_sb) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu, u8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe, u8 only_untagged) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, struct ecore_filter_mcast *p_filter_cmd) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_params *p_params, struct ecore_bulletin_content *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_state *p_link, struct ecore_bulletin_content *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_capabilities *p_link_caps, struct ecore_bulletin_content *p_bulletin) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, struct ecore_tunnel_info *p_tunn) { return ECORE_INVAL; }
static OSAL_INLINE void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) { return; }
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED vport_id, u16 OSAL_UNUSED mtu, u8 OSAL_UNUSED inner_vlan_removal, enum ecore_tpa_mode OSAL_UNUSED tpa_mode, u8 OSAL_UNUSED max_buffers_per_cqe, u8 OSAL_UNUSED only_untagged) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_filter_ucast OSAL_UNUSED *p_param) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_filter_mcast OSAL_UNUSED *p_filter_cmd) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_params OSAL_UNUSED *p_params, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_state OSAL_UNUSED *p_link, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_link_caps, struct ecore_bulletin_content OSAL_UNUSED *p_bulletin) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_tunnel_param_update(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_tunnel_info OSAL_UNUSED *p_tunn) { return ECORE_INVAL; }
static OSAL_INLINE void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info OSAL_UNUSED *p_tun) { return; }
#endif
#endif /* __ECORE_VF_H__ */

View File

@ -47,7 +47,7 @@ enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
u8 *p_change);
/**
* @brief Get link parameters for VF from ecore
* @brief Get link paramters for VF from ecore
*
* @param p_hwfn
* @param params - the link params structure to be filled for the VF
@ -128,6 +128,7 @@ void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
*/
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
#ifndef LINUX_REMOVE
/**
* @brief Copy forced MAC address from bulletin board
*
@ -162,6 +163,7 @@ bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
*/
bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn);
#endif
/**
* @brief Set firmware version information in dev_info from VFs acquire response tlv
@ -180,19 +182,21 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
u16 *p_vxlan_port, u16 *p_geneve_port);
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, u8 *p_change) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_params *params) {}
static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_state *link) {}
static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_capabilities *p_link_caps) {}
static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs) {}
static OSAL_INLINE void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, u8 *num_txqs) {}
static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac) {}
static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, u8 *num_vlan_filters) {}
static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, u8 *num_mac_filters) {}
static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) {return false;}
static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, u8 *p_is_forced) {return false;}
static OSAL_INLINE bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn) {return false; }
static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng) {}
static OSAL_INLINE void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, u16 *p_vxlan_port, u16 *p_geneve_port) { return; }
static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *p_change) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_params OSAL_UNUSED *params) {}
static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_state OSAL_UNUSED *link) {}
static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn OSAL_UNUSED *p_hwfn, struct ecore_mcp_link_capabilities OSAL_UNUSED *p_link_caps) {}
static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_rxqs) {}
static OSAL_INLINE void ecore_vf_get_num_txqs(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_txqs) {}
static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *port_mac) {}
static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_vlan_filters) {}
static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *num_mac_filters) {}
static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u8 OSAL_UNUSED *mac) {return false;}
#ifndef LINUX_REMOVE
static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn OSAL_UNUSED *hwfn, u8 OSAL_UNUSED *dst_mac, u8 OSAL_UNUSED *p_is_forced) {return false;}
static OSAL_INLINE bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn OSAL_UNUSED *p_hwfn) {return false; }
#endif
static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED *fw_major, u16 OSAL_UNUSED *fw_minor, u16 OSAL_UNUSED *fw_rev, u16 OSAL_UNUSED *fw_eng) {}
static OSAL_INLINE void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn OSAL_UNUSED *p_hwfn, u16 OSAL_UNUSED *p_vxlan_port, u16 OSAL_UNUSED *p_geneve_port) { return; }
#endif
#endif

View File

@ -28,12 +28,15 @@
*
*/
#ifndef __ECORE_VF_PF_IF_H__
#define __ECORE_VF_PF_IF_H__
#define T_ETH_INDIRECTION_TABLE_SIZE 128 /* @@@ TBD MichalK this should be HSI? */
#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */
#ifndef LINUX_REMOVE
#define ETH_ALEN 6 /* @@@ TBD MichalK - should this be defined here?*/
#endif
/***********************************************
*
@ -105,11 +108,13 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
#ifndef LINUX_REMOVE
/* First bit was used on 8.7.x and 8.8.x versions, which had different
* FWs used but with the same faspath HSI. As this was prior to the
* fastpath versioning, wanted to have ability to override fw matching
* and allow them to interact.
*/
#endif
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
@ -191,6 +196,11 @@ struct pfvf_acquire_resp_tlv {
* To overcome this, PFs now indicate that they're past that point and the new
* VFs would fail probe on the older PFs that fail to do so.
*/
#ifndef LINUX_REMOVE
/* Said bug was in quest/serpens; Can't be certain no official release included
* the bug since the fix arrived very late in the programs.
*/
#endif
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2)
/* PF expects queues to be received with additional qids */

View File

@ -28,6 +28,7 @@
*
*/
#ifndef __ETH_COMMON__
#define __ETH_COMMON__
/********************/

View File

@ -28,6 +28,7 @@
*
*/
#ifndef __FCOE_COMMON__
#define __FCOE_COMMON__
/*********************/
@ -40,249 +41,6 @@
/*
* fields coppied from ABTSrsp pckt
*/
struct fcoe_abts_pkt
{
__le32 abts_rsp_fc_payload_lo /* Abts flow: last 32 bits of fcPayload, out of 96 */;
__le16 abts_rsp_rx_id /* Abts flow: rxId parameter of the abts packet */;
u8 abts_rsp_rctl /* Abts flow: rctl parameter of the abts packet */;
u8 reserved2;
};
/*
* FCoE additional WQE (Sq/ XferQ) information
*/
union fcoe_additional_info_union
{
__le32 previous_tid /* Previous tid. Used for Send XFER WQEs in Multiple continuation mode - Target only. */;
__le32 parent_tid /* Parent tid. Used for write tasks in a continuation mode - Target only */;
__le32 burst_length /* The desired burst length. */;
__le32 seq_rec_updated_offset /* The updated offset in SGL - Used in sequence recovery */;
};
/*
* Cached data sges
*/
struct fcoe_exp_ro
{
__le32 data_offset /* data-offset */;
__le32 reserved /* High data-offset */;
};
/*
* Union of Cleanup address \ expected relative offsets
*/
union fcoe_cleanup_addr_exp_ro_union
{
struct regpair abts_rsp_fc_payload_hi /* Abts flow: first 64 bits of fcPayload, out of 96 */;
struct fcoe_exp_ro exp_ro /* Expected relative offsets */;
};
/*
* FCoE Ramrod Command IDs
*/
enum fcoe_completion_status
{
FCOE_COMPLETION_STATUS_SUCCESS /* FCoE ramrod completed successfully */,
FCOE_COMPLETION_STATUS_FCOE_VER_ERR /* Wrong FCoE version */,
FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR /* src_mac_arr for the current physical port is full- allocation failed */,
MAX_FCOE_COMPLETION_STATUS
};
/*
* FC address (SID/DID) network presentation
*/
struct fc_addr_nw
{
u8 addr_lo /* First byte of the SID/DID address that comes/goes from/to the NW (for example if SID is 11:22:33 - this is 0x11) */;
u8 addr_mid;
u8 addr_hi;
};
/*
* FCoE connection offload
*/
struct fcoe_conn_offload_ramrod_data
{
struct regpair sq_pbl_addr /* SQ Pbl base address */;
struct regpair sq_curr_page_addr /* SQ current page address */;
struct regpair sq_next_page_addr /* SQ next page address */;
struct regpair xferq_pbl_addr /* XFERQ Pbl base address */;
struct regpair xferq_curr_page_addr /* XFERQ current page address */;
struct regpair xferq_next_page_addr /* XFERQ next page address */;
struct regpair respq_pbl_addr /* RESPQ Pbl base address */;
struct regpair respq_curr_page_addr /* RESPQ current page address */;
struct regpair respq_next_page_addr /* RESPQ next page address */;
__le16 dst_mac_addr_lo /* First word of the MAC address that comes/goes from/to the NW (for example if MAC is 11:22:33:44:55:66 - this is 0x2211) */;
__le16 dst_mac_addr_mid;
__le16 dst_mac_addr_hi;
__le16 src_mac_addr_lo /* Source MAC address in NW order - First word of the MAC address that comes/goes from/to the NW (for example if MAC is 11:22:33:44:55:66 - this is 0x2211) */;
__le16 src_mac_addr_mid;
__le16 src_mac_addr_hi;
__le16 tx_max_fc_pay_len /* The maximum acceptable FC payload size (Buffer-to-buffer Receive Data_Field size) supported by target, received during both FLOGI and PLOGI, minimum value should be taken */;
__le16 e_d_tov_timer_val /* E_D_TOV timeout value in resolution of 1 msec */;
__le16 rx_max_fc_pay_len /* Maximum acceptable FC payload size supported by us */;
__le16 vlan_tag;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF /* Vlan id */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 /* Canonical format indicator */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 /* Vlan priority */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
__le16 physical_q0 /* Physical QM queue to be linked to logical queue 0 (fastPath queue) */;
__le16 rec_rr_tov_timer_val /* REC_TOV timeout value in resolution of 1 msec */;
struct fc_addr_nw s_id /* Source ID in NW order, received during FLOGI */;
u8 max_conc_seqs_c3 /* Maximum concurrent Sequences for Class 3 supported by target, received during PLOGI */;
struct fc_addr_nw d_id /* Destination ID in NW order, received after inquiry of the fabric network */;
u8 flags;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 /* Continuously increasing SEQ_CNT indication, received during PLOGI */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 /* Confirmation request supported */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 /* REC allowed */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 /* Does inner vlan exist */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 /* indication for conn mode: 0=Initiator, 1=Target, 2=Both Initiator and Traget */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
__le16 conn_id /* Drivers connection ID. Should be sent in EQs to speed-up drivers access to connection data. */;
u8 def_q_idx /* Default queue number to be used for unsolicited traffic */;
u8 reserved[5];
};
/*
* FCoE terminate connection request
*/
struct fcoe_conn_terminate_ramrod_data
{
struct regpair terminate_params_addr /* Terminate params ptr */;
};
/*
* Data sgl
*/
struct fcoe_slow_sgl_ctx
{
struct regpair base_sgl_addr /* Address of first SGE in SGL */;
__le16 curr_sge_off /* Offset in current BD (in bytes) */;
__le16 remainder_num_sges /* Number of BDs */;
__le16 curr_sgl_index /* Index of current SGE */;
__le16 reserved;
};
/*
* Union of DIX SGL \ cached DIX sges
*/
union fcoe_dix_desc_ctx
{
struct fcoe_slow_sgl_ctx dix_sgl /* DIX slow-SGL data base */;
struct scsi_sge cached_dix_sge /* Cached DIX sge */;
};
/*
* Data sgl
*/
struct fcoe_fast_sgl_ctx
{
struct regpair sgl_start_addr /* Current sge address */;
__le32 sgl_byte_offset /* Byte offset from the beginning of the first page in the SGL. In case SGL starts in the middle of page then driver should init this value with the start offset */;
__le16 task_reuse_cnt /* The reuse count for that task. Wrap ion 4K value. */;
__le16 init_offset_in_first_sge /* offset from the beginning of the first page in the SGL, never changed by FW */;
};
/*
* FCP CMD payload
*/
struct fcoe_fcp_cmd_payload
{
__le32 opaque[8] /* The FCP_CMD payload */;
};
/*
* FCP RSP payload
*/
struct fcoe_fcp_rsp_payload
{
__le32 opaque[6] /* The FCP_RSP payload */;
};
/*
* FCP RSP payload
*/
struct fcoe_fcp_xfer_payload
{
__le32 opaque[3] /* The FCP_XFER payload */;
};
/*
* FCoE firmware function init
*/
struct fcoe_init_func_ramrod_data
{
struct scsi_init_func_params func_params /* Common SCSI init params passed by driver to FW in function init ramrod */;
struct scsi_init_func_queues q_params /* SCSI RQ/CQ/CMDQ firmware function init parameters */;
__le16 mtu /* Max transmission unit */;
__le16 sq_num_pages_in_pbl /* Number of pages at Send Queue */;
__le32 reserved;
};
/*
* FCoE: Mode of the connection: Target or Initiator or both
*/
enum fcoe_mode_type
{
FCOE_INITIATOR_MODE=0x0,
FCOE_TARGET_MODE=0x1,
FCOE_BOTH_OR_NOT_CHOSEN=0x3,
MAX_FCOE_MODE_TYPE
};
/*
* Per PF FCoE receive path statistics - tStorm RAM structure
*/
struct fcoe_rx_stat
{
struct regpair fcoe_rx_byte_cnt /* Number of FCoE bytes that were received */;
struct regpair fcoe_rx_data_pkt_cnt /* Number of FCoE FCP DATA packets that were received */;
struct regpair fcoe_rx_xfer_pkt_cnt /* Number of FCoE FCP XFER RDY packets that were received */;
struct regpair fcoe_rx_other_pkt_cnt /* Number of FCoE packets which are not DATA/XFER_RDY that were received */;
__le32 fcoe_silent_drop_pkt_cmdq_full_cnt /* Number of packets that were silently dropped since CMDQ was full */;
__le32 fcoe_silent_drop_pkt_rq_full_cnt /* Number of packets that were silently dropped since RQ (BDQ) was full */;
__le32 fcoe_silent_drop_pkt_crc_error_cnt /* Number of packets that were silently dropped due to FC CRC error */;
__le32 fcoe_silent_drop_pkt_task_invalid_cnt /* Number of packets that were silently dropped since task was not valid */;
__le32 fcoe_silent_drop_total_pkt_cnt /* Number of FCoE packets that were silently dropped */;
__le32 rsrv;
};
/*
* FCoe statistics request
*/
struct fcoe_stat_ramrod_data
{
struct regpair stat_params_addr /* Statistics host address */;
};
/*
* The fcoe storm task context protection-information of Ystorm
*/
@ -314,6 +72,22 @@ union protection_info_union_ctx
__le32 value /* If and only if this field is not 0 then protection is set */;
};
/*
* FCP CMD payload
*/
struct fcoe_fcp_cmd_payload
{
__le32 opaque[8] /* The FCP_CMD payload */;
};
/*
* FCP RSP payload
*/
struct fcoe_fcp_rsp_payload
{
__le32 opaque[6] /* The FCP_RSP payload */;
};
/*
* FCP RSP payload
*/
@ -323,6 +97,14 @@ struct fcp_rsp_payload_padded
__le32 reserved[2];
};
/*
* FCP RSP payload
*/
struct fcoe_fcp_xfer_payload
{
__le32 opaque[3] /* The FCP_XFER payload */;
};
/*
* FCP RSP payload
*/
@ -390,6 +172,27 @@ union fcoe_tx_info_union_ctx
struct fcoe_tx_params tx_params /* Task TX params */;
};
/*
* Data sgl
*/
struct fcoe_slow_sgl_ctx
{
struct regpair base_sgl_addr /* Address of first SGE in SGL */;
__le16 curr_sge_off /* Offset in current BD (in bytes) */;
__le16 remainder_num_sges /* Number of BDs */;
__le16 curr_sgl_index /* Index of current SGE */;
__le16 reserved;
};
/*
* Union of DIX SGL \ cached DIX sges
*/
union fcoe_dix_desc_ctx
{
struct fcoe_slow_sgl_ctx dix_sgl /* DIX slow-SGL data base */;
struct scsi_sge cached_dix_sge /* Cached DIX sge */;
};
/*
* The fcoe storm task context of Ystorm
*/
@ -554,6 +357,35 @@ struct e4_tstorm_fcoe_task_ag_ctx
__le32 data_offset_next /* reg2 */;
};
/*
* Cached data sges
*/
struct fcoe_exp_ro
{
__le32 data_offset /* data-offset */;
__le32 reserved /* High data-offset */;
};
/*
* Union of Cleanup address \ expected relative offsets
*/
union fcoe_cleanup_addr_exp_ro_union
{
struct regpair abts_rsp_fc_payload_hi /* Abts flow: first 64 bits of fcPayload, out of 96 */;
struct fcoe_exp_ro exp_ro /* Expected relative offsets */;
};
/*
* fields coppied from ABTSrsp pckt
*/
struct fcoe_abts_pkt
{
__le32 abts_rsp_fc_payload_lo /* Abts flow: last 32 bits of fcPayload, out of 96 */;
__le16 abts_rsp_rx_id /* Abts flow: rxId parameter of the abts packet */;
u8 abts_rsp_rctl /* Abts flow: rctl parameter of the abts packet */;
u8 reserved2;
};
/*
* FW read- write (modifyable) part The fcoe task storm context of Tstorm
*/
@ -764,7 +596,7 @@ struct e4_ustorm_fcoe_task_ag_ctx
/*
* fcoe task context
*/
struct fcoe_task_context
struct e4_fcoe_task_context
{
struct ystorm_fcoe_task_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
@ -781,150 +613,77 @@ struct fcoe_task_context
};
/*
* Per PF FCoE transmit path statistics - pStorm RAM structure
*/
struct fcoe_tx_stat
{
struct regpair fcoe_tx_byte_cnt /* Transmitted FCoE bytes count */;
struct regpair fcoe_tx_data_pkt_cnt /* Transmitted FCoE FCP DATA packets count */;
struct regpair fcoe_tx_xfer_pkt_cnt /* Transmitted FCoE XFER_RDY packets count */;
struct regpair fcoe_tx_other_pkt_cnt /* Transmitted FCoE packets which are not DATA/XFER_RDY count */;
};
/*
* FCoE SQ/XferQ element
*/
struct fcoe_wqe
{
__le16 task_id /* Initiator - The task identifier (OX_ID). Target - Continuation tid or RX_ID in non-continuation mode */;
__le16 flags;
#define FCOE_WQE_REQ_TYPE_MASK 0xF /* Type of the wqe request. use enum fcoe_sqe_request_type (use enum fcoe_sqe_request_type) */
#define FCOE_WQE_REQ_TYPE_SHIFT 0
#define FCOE_WQE_SGL_MODE_MASK 0x1 /* The driver will give a hint about sizes of SGEs for better credits evaluation at Xstorm. use enum scsi_sgl_mode (use enum scsi_sgl_mode) */
#define FCOE_WQE_SGL_MODE_SHIFT 4
#define FCOE_WQE_CONTINUATION_MASK 0x1 /* Indication if this wqe is a continuation to an existing task (Target only) */
#define FCOE_WQE_CONTINUATION_SHIFT 5
#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 /* Indication to FW to send FCP_RSP after all data was sent - Target only */
#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
#define FCOE_WQE_RESERVED_MASK 0x1
#define FCOE_WQE_RESERVED_SHIFT 7
#define FCOE_WQE_NUM_SGES_MASK 0xF /* Number of SGEs. 8 = at least 8 sges */
#define FCOE_WQE_NUM_SGES_SHIFT 8
#define FCOE_WQE_RESERVED1_MASK 0xF
#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union /* Additional wqe information (if needed) */;
};
/*
* FCoE XFRQ element
*/
struct xfrqe_prot_flags
{
u8 flags;
#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF /* Protection log interval (9=512 10=1024 11=2048 12=4096 13=8192) */
#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 /* If DIF protection is configured against target (0=no, 1=yes) */
#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 /* If DIF/DIX protection is configured against the host (0=none, 1=DIF, 2=DIX) */
#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 /* Must set to 0 */
#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
};
struct e5_mstorm_fcoe_task_ag_ctx
struct e5_ystorm_fcoe_task_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 icid /* icid */;
__le16 word0 /* icid */;
u8 flags0;
#define E5_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
#define E5_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF /* connection_type */
#define E5_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 /* cf0 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 /* cf2special */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 /* rule5en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 flags3;
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit4 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 received_bytes /* reg0 */;
u8 cleanup_state /* byte2 */;
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit5 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 reg0 /* reg0 */;
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
u8 glbl_q_num /* byte4 */;
u8 byte4 /* byte4 */;
u8 e4_reserved7 /* byte5 */;
__le16 word1 /* regpair0 */;
__le16 tid_to_xfer /* word2 */;
__le16 rx_id /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le16 word5 /* regpair1 */;
__le16 word5 /* word5 */;
__le16 e4_reserved8 /* word6 */;
__le32 expected_bytes /* reg1 */;
__le32 reg1 /* reg1 */;
};
struct e5_tstorm_fcoe_task_ag_ctx
{
u8 reserved /* cdu_validation */;
@ -1005,6 +764,76 @@ struct e5_tstorm_fcoe_task_ag_ctx
__le32 data_offset_next /* reg2 */;
};
struct e5_mstorm_fcoe_task_ag_ctx
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 icid /* icid */;
u8 flags0;
#define E5_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF /* connection_type */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E5_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 /* exist_in_qm1 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 /* cf0 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 /* cf0en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 /* rule5en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 flags3;
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit4 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_MSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 received_bytes /* reg0 */;
u8 cleanup_state /* byte2 */;
u8 byte3 /* byte3 */;
u8 glbl_q_num /* byte4 */;
u8 e4_reserved7 /* byte5 */;
__le16 word1 /* regpair0 */;
__le16 tid_to_xfer /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le16 word5 /* regpair1 */;
__le16 e4_reserved8 /* word6 */;
__le32 expected_bytes /* reg1 */;
};
struct e5_ustorm_fcoe_task_ag_ctx
{
@ -1082,79 +911,271 @@ struct e5_ustorm_fcoe_task_ag_ctx
__le32 reg4 /* reg4 */;
};
struct e5_ystorm_fcoe_task_ag_ctx
/*
* fcoe task context
*/
struct e5_fcoe_task_context
{
u8 byte0 /* cdu_validation */;
u8 byte1 /* state_and_core_id */;
__le16 word0 /* icid */;
u8 flags0;
#define E5_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF /* connection_type */
#define E5_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 /* cf2special */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 flags3;
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_MASK 0x1 /* bit5 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED1_SHIFT 0
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_MASK 0x3 /* cf3 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED2_SHIFT 1
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_MASK 0x3 /* cf4 */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED3_SHIFT 3
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_MASK 0x1 /* cf3en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED4_SHIFT 5
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_MASK 0x1 /* cf4en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED5_SHIFT 6
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_MASK 0x1 /* rule7en */
#define E5_YSTORM_FCOE_TASK_AG_CTX_E4_RESERVED6_SHIFT 7
__le32 reg0 /* reg0 */;
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
u8 e4_reserved7 /* byte5 */;
__le16 rx_id /* word1 */;
__le16 word2 /* word2 */;
__le16 word3 /* word3 */;
__le16 word4 /* word4 */;
__le16 word5 /* word5 */;
__le16 e4_reserved8 /* word6 */;
__le32 reg1 /* reg1 */;
struct ystorm_fcoe_task_st_ctx ystorm_st_context /* ystorm storm context */;
struct regpair ystorm_st_padding[2] /* padding */;
struct tdif_task_context tdif_context /* tdif context */;
struct e5_ystorm_fcoe_task_ag_ctx ystorm_ag_context /* ystorm aggregative context */;
struct e5_tstorm_fcoe_task_ag_ctx tstorm_ag_context /* tstorm aggregative context */;
struct timers_context timer_context /* timer context */;
struct tstorm_fcoe_task_st_ctx tstorm_st_context /* tstorm storm context */;
struct regpair tstorm_st_padding[2] /* padding */;
struct e5_mstorm_fcoe_task_ag_ctx mstorm_ag_context /* mstorm aggregative context */;
struct mstorm_fcoe_task_st_ctx mstorm_st_context /* mstorm storm context */;
struct e5_ustorm_fcoe_task_ag_ctx ustorm_ag_context /* ustorm aggregative context */;
struct rdif_task_context rdif_context /* rdif context */;
};
/*
* FCoE additional WQE (Sq/ XferQ) information
*/
union fcoe_additional_info_union
{
__le32 previous_tid /* Previous tid. Used for Send XFER WQEs in Multiple continuation mode - Target only. */;
__le32 parent_tid /* Parent tid. Used for write tasks in a continuation mode - Target only */;
__le32 burst_length /* The desired burst length. */;
__le32 seq_rec_updated_offset /* The updated offset in SGL - Used in sequence recovery */;
};
/*
* FCoE Ramrod Command IDs
*/
enum fcoe_completion_status
{
FCOE_COMPLETION_STATUS_SUCCESS /* FCoE ramrod completed successfully */,
FCOE_COMPLETION_STATUS_FCOE_VER_ERR /* Wrong FCoE version */,
FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR /* src_mac_arr for the current physical port is full- allocation failed */,
MAX_FCOE_COMPLETION_STATUS
};
/*
* FC address (SID/DID) network presentation
*/
struct fc_addr_nw
{
u8 addr_lo /* First byte of the SID/DID address that comes/goes from/to the NW (for example if SID is 11:22:33 - this is 0x11) */;
u8 addr_mid;
u8 addr_hi;
};
/*
* FCoE connection offload
*/
struct fcoe_conn_offload_ramrod_data
{
struct regpair sq_pbl_addr /* SQ Pbl base address */;
struct regpair sq_curr_page_addr /* SQ current page address */;
struct regpair sq_next_page_addr /* SQ next page address */;
struct regpair xferq_pbl_addr /* XFERQ Pbl base address */;
struct regpair xferq_curr_page_addr /* XFERQ current page address */;
struct regpair xferq_next_page_addr /* XFERQ next page address */;
struct regpair respq_pbl_addr /* RESPQ Pbl base address */;
struct regpair respq_curr_page_addr /* RESPQ current page address */;
struct regpair respq_next_page_addr /* RESPQ next page address */;
__le16 dst_mac_addr_lo /* First word of the MAC address that comes/goes from/to the NW (for example if MAC is 11:22:33:44:55:66 - this is 0x2211) */;
__le16 dst_mac_addr_mid;
__le16 dst_mac_addr_hi;
__le16 src_mac_addr_lo /* Source MAC address in NW order - First word of the MAC address that comes/goes from/to the NW (for example if MAC is 11:22:33:44:55:66 - this is 0x2211) */;
__le16 src_mac_addr_mid;
__le16 src_mac_addr_hi;
__le16 tx_max_fc_pay_len /* The maximum acceptable FC payload size (Buffer-to-buffer Receive Data_Field size) supported by target, received during both FLOGI and PLOGI, minimum value should be taken */;
__le16 e_d_tov_timer_val /* E_D_TOV timeout value in resolution of 1 msec */;
__le16 rx_max_fc_pay_len /* Maximum acceptable FC payload size supported by us */;
__le16 vlan_tag;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF /* Vlan id */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 /* Canonical format indicator */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 /* Vlan priority */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13
__le16 physical_q0 /* Physical QM queue to be linked to logical queue 0 (fastPath queue) */;
__le16 rec_rr_tov_timer_val /* REC_TOV timeout value in resolution of 1 msec */;
struct fc_addr_nw s_id /* Source ID in NW order, received during FLOGI */;
u8 max_conc_seqs_c3 /* Maximum concurrent Sequences for Class 3 supported by target, received during PLOGI */;
struct fc_addr_nw d_id /* Destination ID in NW order, received after inquiry of the fabric network */;
u8 flags;
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 /* Continuously increasing SEQ_CNT indication, received during PLOGI */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 /* Confirmation request supported */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 /* REC allowed */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 /* Does inner vlan exist */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 /* indication for conn mode: 0=Initiator, 1=Target, 2=Both Initiator and Traget */
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
__le16 conn_id /* Drivers connection ID. Should be sent in EQs to speed-up drivers access to connection data. */;
u8 def_q_idx /* Default queue number to be used for unsolicited traffic */;
u8 reserved[5];
};
/*
* FCoE terminate connection request
*/
struct fcoe_conn_terminate_ramrod_data
{
struct regpair terminate_params_addr /* Terminate params ptr */;
};
/*
* Data sgl
*/
struct fcoe_fast_sgl_ctx
{
struct regpair sgl_start_addr /* Current sge address */;
__le32 sgl_byte_offset /* Byte offset from the beginning of the first page in the SGL. In case SGL starts in the middle of page then driver should init this value with the start offset */;
__le16 task_reuse_cnt /* The reuse count for that task. Wrap ion 4K value. */;
__le16 init_offset_in_first_sge /* offset from the beginning of the first page in the SGL, never changed by FW */;
};
/*
* FCoE firmware function init
*/
struct fcoe_init_func_ramrod_data
{
struct scsi_init_func_params func_params /* Common SCSI init params passed by driver to FW in function init ramrod */;
struct scsi_init_func_queues q_params /* SCSI RQ/CQ/CMDQ firmware function init parameters */;
__le16 mtu /* Max transmission unit */;
__le16 sq_num_pages_in_pbl /* Number of pages at Send Queue */;
__le32 reserved[3];
};
/*
* FCoE: Mode of the connection: Target or Initiator or both
*/
enum fcoe_mode_type
{
FCOE_INITIATOR_MODE=0x0,
FCOE_TARGET_MODE=0x1,
FCOE_BOTH_OR_NOT_CHOSEN=0x3,
MAX_FCOE_MODE_TYPE
};
/*
* Per PF FCoE receive path statistics - tStorm RAM structure
*/
struct fcoe_rx_stat
{
struct regpair fcoe_rx_byte_cnt /* Number of FCoE bytes that were received */;
struct regpair fcoe_rx_data_pkt_cnt /* Number of FCoE FCP DATA packets that were received */;
struct regpair fcoe_rx_xfer_pkt_cnt /* Number of FCoE FCP XFER RDY packets that were received */;
struct regpair fcoe_rx_other_pkt_cnt /* Number of FCoE packets which are not DATA/XFER_RDY that were received */;
__le32 fcoe_silent_drop_pkt_cmdq_full_cnt /* Number of packets that were silently dropped since CMDQ was full */;
__le32 fcoe_silent_drop_pkt_rq_full_cnt /* Number of packets that were silently dropped since RQ (BDQ) was full */;
__le32 fcoe_silent_drop_pkt_crc_error_cnt /* Number of packets that were silently dropped due to FC CRC error */;
__le32 fcoe_silent_drop_pkt_task_invalid_cnt /* Number of packets that were silently dropped since task was not valid */;
__le32 fcoe_silent_drop_total_pkt_cnt /* Number of FCoE packets that were silently dropped */;
__le32 rsrv;
};
/*
* FCoe statistics request
*/
struct fcoe_stat_ramrod_data
{
struct regpair stat_params_addr /* Statistics host address */;
};
/*
* Per PF FCoE transmit path statistics - pStorm RAM structure
*/
struct fcoe_tx_stat
{
struct regpair fcoe_tx_byte_cnt /* Transmitted FCoE bytes count */;
struct regpair fcoe_tx_data_pkt_cnt /* Transmitted FCoE FCP DATA packets count */;
struct regpair fcoe_tx_xfer_pkt_cnt /* Transmitted FCoE XFER_RDY packets count */;
struct regpair fcoe_tx_other_pkt_cnt /* Transmitted FCoE packets which are not DATA/XFER_RDY count */;
};
/*
* FCoE SQ/XferQ element
*/
struct fcoe_wqe
{
__le16 task_id /* Initiator - The task identifier (OX_ID). Target - Continuation tid or RX_ID in non-continuation mode */;
__le16 flags;
#define FCOE_WQE_REQ_TYPE_MASK 0xF /* Type of the wqe request. use enum fcoe_sqe_request_type (use enum fcoe_sqe_request_type) */
#define FCOE_WQE_REQ_TYPE_SHIFT 0
#define FCOE_WQE_SGL_MODE_MASK 0x1 /* The driver will give a hint about sizes of SGEs for better credits evaluation at Xstorm. use enum scsi_sgl_mode (use enum scsi_sgl_mode) */
#define FCOE_WQE_SGL_MODE_SHIFT 4
#define FCOE_WQE_CONTINUATION_MASK 0x1 /* Indication if this wqe is a continuation to an existing task (Target only) */
#define FCOE_WQE_CONTINUATION_SHIFT 5
#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 /* Indication to FW to send FCP_RSP after all data was sent - Target only */
#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
#define FCOE_WQE_RESERVED_MASK 0x1
#define FCOE_WQE_RESERVED_SHIFT 7
#define FCOE_WQE_NUM_SGES_MASK 0xF /* Number of SGEs. 8 = at least 8 sges */
#define FCOE_WQE_NUM_SGES_SHIFT 8
#define FCOE_WQE_RESERVED1_MASK 0xF
#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union /* Additional wqe information (if needed) */;
};
/*
* FCoE XFRQ element
*/
struct xfrqe_prot_flags
{
u8 flags;
#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF /* Protection log interval (9=512 10=1024 11=2048 12=4096 13=8192) */
#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0
#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 /* If DIF protection is configured against target (0=no, 1=yes) */
#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4
#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 /* If DIF/DIX protection is configured against the host (0=none, 1=DIF, 2=DIX) */
#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5
#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 /* Must set to 0 */
#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7
};
/*
* FCoE doorbell data
*/

File diff suppressed because it is too large Load Diff

View File

@ -145,9 +145,9 @@ struct private_path {
u32 drv_load_vars; /* When the seconds_since_mcp_reset gets here */
#define DRV_LOAD_TIMEOUT_MASK 0x0000ffff
#define DRV_LOAD_TIMEOUT_SHIFT 0
#define DRV_LOAD_TIMEOUT_OFFSET 0
#define DRV_LOAD_NEED_FORCE_MASK 0xffff0000
#define DRV_LOAD_NEED_FORCE_SHIFT 16
#define DRV_LOAD_NEED_FORCE_OFFSET 16
struct load_rsp_stc drv_load_params;
};
@ -164,13 +164,13 @@ struct drv_port_info_t {
/* There are maximum 8 PFs per port */
#define DRV_STATE_LOADED_MASK 0x0000ff00
#define DRV_STATE_LOADED_SHIFT 8
#define DRV_STATE_LOADED_OFFSET 8
#define DRV_STATE_PF_TRANSITION_MASK 0x00ff0000
#define DRV_STATE_PF_TRANSITION_SHIFT 16
#define DRV_STATE_PF_TRANSITION_OFFSET 16
#define DRV_STATE_PF_PHY_INIT_MASK 0xff000000
#define DRV_STATE_PF_PHY_INIT_SHIFT 24
#define DRV_STATE_PF_PHY_INIT_OFFSET 24
};
typedef enum _lldp_subscriber_e {
@ -186,9 +186,9 @@ typedef struct {
u16 valid;
u16 type_len;
#define LLDP_LEN_MASK (0x01ff)
#define LLDP_LEN_SHIFT (0)
#define LLDP_LEN_OFFSET (0)
#define LLDP_TYPE_MASK (0xfe00)
#define LLDP_TYPE_SHIFT (9)
#define LLDP_TYPE_OFFSET (9)
u8 *value_p;
} tlv_s;
@ -211,7 +211,7 @@ typedef struct {
} subscriber_callback_receive_s;
#define MAX_ETH_HEADER 14 /* TODO: to be extended per requirements */
#define MAX_PACKET_SIZE (1516) /* So it can be divided by 4 */
#define MAX_PACKET_SIZE (1516) /* So it can be devided by 4 */
#define LLDP_CHASSIS_ID_TLV_LEN 7
#define LLDP_PORT_ID_TLV_LEN 7
#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/
@ -244,19 +244,19 @@ typedef struct {
typedef struct {
u32 config; /* Uses same defines as local config plus some more below*/
#define DCBX_MODE_MASK 0x00000010
#define DCBX_MODE_SHIFT 4
#define DCBX_MODE_OFFSET 4
#define DCBX_MODE_DRIVER 0
#define DCBX_MODE_DEFAULT 1
#define DCBX_CHANGED_MASK 0x00000f00
#define DCBX_CHANGED_SHIFT 8
#define DCBX_CHANGED_OFFSET 8
#define DCBX_CONTROL_CHANGED_MASK 0x00000100
#define DCBX_CONTROL_CHANGED_SHIFT 8
#define DCBX_CONTROL_CHANGED_OFFSET 8
#define DCBX_PFC_CHANGED_MASK 0x00000200
#define DCBX_PFC_CHANGED_SHIFT 9
#define DCBX_PFC_CHANGED_OFFSET 9
#define DCBX_ETS_CHANGED_MASK 0x00000400
#define DCBX_ETS_CHANGED_SHIFT 10
#define DCBX_ETS_CHANGED_OFFSET 10
#define DCBX_APP_CHANGED_MASK 0x00000800
#define DCBX_APP_CHANGED_SHIFT 11
#define DCBX_APP_CHANGED_OFFSET 11
u32 seq_no;
u32 ack_no;
@ -268,14 +268,14 @@ typedef struct {
#ifdef CONFIG_HP_DCI_SUPPORT
struct dci_info_port {
u32 config;
#define DCI_PORT_CFG_ENABLE_SHIFT (0)
#define DCI_PORT_CFG_ENABLE_MASK (1 << DCI_PORT_CFG_ENABLE_SHIFT)
#define DCI_PORT_CFG_ENABLE_DIAG_SHIFT (1)
#define DCI_PORT_CFG_ENABLE_DIAG_MASK (1 << DCI_PORT_CFG_ENABLE_DIAG_SHIFT)
#define DCI_PORT_CFG_DIAG_L_LOOP_SHIFT (2)
#define DCI_PORT_CFG_DIAG_L_LOOP_MASK (1 << DCI_PORT_CFG_DIAG_L_LOOP_SHIFT)
#define DCI_PORT_CFG_DIAG_R_LOOP_SHIFT (3)
#define DCI_PORT_CFG_DIAG_R_LOOP_MASK (1 << DCI_PORT_CFG_DIAG_R_LOOP_SHIFT)
#define DCI_PORT_CFG_ENABLE_OFFSET (0)
#define DCI_PORT_CFG_ENABLE_MASK (1 << DCI_PORT_CFG_ENABLE_OFFSET)
#define DCI_PORT_CFG_ENABLE_DIAG_OFFSET (1)
#define DCI_PORT_CFG_ENABLE_DIAG_MASK (1 << DCI_PORT_CFG_ENABLE_DIAG_OFFSET)
#define DCI_PORT_CFG_DIAG_L_LOOP_OFFSET (2)
#define DCI_PORT_CFG_DIAG_L_LOOP_MASK (1 << DCI_PORT_CFG_DIAG_L_LOOP_OFFSET)
#define DCI_PORT_CFG_DIAG_R_LOOP_OFFSET (3)
#define DCI_PORT_CFG_DIAG_R_LOOP_MASK (1 << DCI_PORT_CFG_DIAG_R_LOOP_OFFSET)
};
#endif
@ -343,12 +343,12 @@ struct drv_func_info_t {
struct dci_info_func {
u8 config;
#define DCI_FUNC_CFG_FNIC_ENABLE_SHIFT (0)
#define DCI_FUNC_CFG_FNIC_ENABLE_MASK (1 << DCI_FUNC_CFG_FNIC_ENABLE_SHIFT)
#define DCI_FUNC_CFG_OS_MTU_OVERRIDE_SHIFT (1)
#define DCI_FUNC_CFG_OS_MTU_OVERRIDE_MASK (1 << DCI_FUNC_CFG_OS_MTU_OVERRIDE_SHIFT)
#define DCI_FUNC_CFG_DIAG_WOL_ENABLE_SHIFT (2)
#define DCI_FUNC_CFG_DIAG_WOL_ENABLE_MASK (1 << DCI_FUNC_CFG_DIAG_WOL_ENABLE_SHIFT)
#define DCI_FUNC_CFG_FNIC_ENABLE_OFFSET (0)
#define DCI_FUNC_CFG_FNIC_ENABLE_MASK (1 << DCI_FUNC_CFG_FNIC_ENABLE_OFFSET)
#define DCI_FUNC_CFG_OS_MTU_OVERRIDE_OFFSET (1)
#define DCI_FUNC_CFG_OS_MTU_OVERRIDE_MASK (1 << DCI_FUNC_CFG_OS_MTU_OVERRIDE_OFFSET)
#define DCI_FUNC_CFG_DIAG_WOL_ENABLE_OFFSET (2)
#define DCI_FUNC_CFG_DIAG_WOL_ENABLE_MASK (1 << DCI_FUNC_CFG_DIAG_WOL_ENABLE_OFFSET)
u8 drv_state;
u16 fcoe_cvid;

View File

@ -28,7 +28,6 @@
*
*/
/****************************************************************************
*
* Name: mcp_public.h
@ -51,17 +50,17 @@
typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
#define OFFSIZE_OFFSET_SHIFT 0
#define OFFSIZE_OFFSET_OFFSET 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
/* Size of specific element (not the whole array if any) */
#define OFFSIZE_SIZE_SHIFT 16
#define OFFSIZE_SIZE_OFFSET 16
#define OFFSIZE_SIZE_MASK 0xffff0000
/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
#define SECTION_OFFSET(_offsize) ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
#define SECTION_OFFSET(_offsize) ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_OFFSET) << 2))
/* SECTION_SIZE is calculating the size in bytes out of offsize */
#define SECTION_SIZE(_offsize) (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
#define SECTION_SIZE(_offsize) (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_OFFSET) << 2)
/* SECTION_ADDR returns the GRC addr of a section, given offsize and index within section */
#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
@ -99,20 +98,20 @@ struct eth_phy_cfg {
#define EEE_CFG_ADV_SPEED_1G (1<<2)
#define EEE_CFG_ADV_SPEED_10G (1<<3)
#define EEE_TX_TIMER_USEC_MASK (0xfffffff0)
#define EEE_TX_TIMER_USEC_SHIFT 4
#define EEE_TX_TIMER_USEC_OFFSET 4
#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00)
#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100)
#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000)
u32 link_modes; /* Additional link modes */
#define LINK_MODE_SMARTLINQ_ENABLE 0x1 /* XXX deprecated */
#define LINK_MODE_SMARTLINQ_ENABLE 0x1 /* XXX Deprecate */
};
struct port_mf_cfg {
u32 dynamic_cfg; /* device control channel */
#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
#define PORT_MF_CFG_OV_TAG_SHIFT 0
#define PORT_MF_CFG_OV_TAG_OFFSET 0
#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
u32 reserved[1];
@ -266,15 +265,15 @@ typedef enum _lldp_agent_e {
struct lldp_config_params_s {
u32 config;
#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
#define LLDP_CONFIG_TX_INTERVAL_OFFSET 0
#define LLDP_CONFIG_HOLD_MASK 0x00000f00
#define LLDP_CONFIG_HOLD_SHIFT 8
#define LLDP_CONFIG_HOLD_OFFSET 8
#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
#define LLDP_CONFIG_MAX_CREDIT_OFFSET 12
#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
#define LLDP_CONFIG_ENABLE_RX_OFFSET 30
#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
#define LLDP_CONFIG_ENABLE_TX_OFFSET 31
/* Holds local Chassis ID TLV header, subtype and 9B of payload.
If firtst byte is 0, then we will use default chassis ID */
u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
@ -296,17 +295,17 @@ struct lldp_status_params_s {
struct dcbx_ets_feature {
u32 flags;
#define DCBX_ETS_ENABLED_MASK 0x00000001
#define DCBX_ETS_ENABLED_SHIFT 0
#define DCBX_ETS_ENABLED_OFFSET 0
#define DCBX_ETS_WILLING_MASK 0x00000002
#define DCBX_ETS_WILLING_SHIFT 1
#define DCBX_ETS_WILLING_OFFSET 1
#define DCBX_ETS_ERROR_MASK 0x00000004
#define DCBX_ETS_ERROR_SHIFT 2
#define DCBX_ETS_ERROR_OFFSET 2
#define DCBX_ETS_CBS_MASK 0x00000008
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_CBS_OFFSET 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
#define DCBX_ETS_MAX_TCS_OFFSET 4
#define DCBX_OOO_TC_MASK 0x00000f00
#define DCBX_OOO_TC_SHIFT 8
#define DCBX_OOO_TC_OFFSET 8
/* Entries in tc table are orginized that the left most is pri 0, right most is prio 7 */
u32 pri_tc_tbl[1];
/* Fixed TCP OOO TC usage is deprecated and used only for driver backward compatibility */
@ -327,7 +326,7 @@ struct dcbx_ets_feature {
struct dcbx_app_priority_entry {
u32 entry;
#define DCBX_APP_PRI_MAP_MASK 0x000000ff
#define DCBX_APP_PRI_MAP_SHIFT 0
#define DCBX_APP_PRI_MAP_OFFSET 0
#define DCBX_APP_PRI_0 0x01
#define DCBX_APP_PRI_1 0x02
#define DCBX_APP_PRI_2 0x04
@ -337,11 +336,11 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_PRI_6 0x40
#define DCBX_APP_PRI_7 0x80
#define DCBX_APP_SF_MASK 0x00000300
#define DCBX_APP_SF_SHIFT 8
#define DCBX_APP_SF_OFFSET 8
#define DCBX_APP_SF_ETHTYPE 0
#define DCBX_APP_SF_PORT 1
#define DCBX_APP_SF_IEEE_MASK 0x0000f000
#define DCBX_APP_SF_IEEE_SHIFT 12
#define DCBX_APP_SF_IEEE_OFFSET 12
#define DCBX_APP_SF_IEEE_RESERVED 0
#define DCBX_APP_SF_IEEE_ETHTYPE 1
#define DCBX_APP_SF_IEEE_TCP_PORT 2
@ -349,7 +348,7 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
#define DCBX_APP_PROTOCOL_ID_SHIFT 16
#define DCBX_APP_PROTOCOL_ID_OFFSET 16
};
@ -357,19 +356,19 @@ struct dcbx_app_priority_entry {
struct dcbx_app_priority_feature {
u32 flags;
#define DCBX_APP_ENABLED_MASK 0x00000001
#define DCBX_APP_ENABLED_SHIFT 0
#define DCBX_APP_ENABLED_OFFSET 0
#define DCBX_APP_WILLING_MASK 0x00000002
#define DCBX_APP_WILLING_SHIFT 1
#define DCBX_APP_WILLING_OFFSET 1
#define DCBX_APP_ERROR_MASK 0x00000004
#define DCBX_APP_ERROR_SHIFT 2
#define DCBX_APP_ERROR_OFFSET 2
/* Not in use
#define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
#define DCBX_APP_DEFAULT_PRI_SHIFT 8
#define DCBX_APP_DEFAULT_PRI_OFFSET 8
*/
#define DCBX_APP_MAX_TCS_MASK 0x0000f000
#define DCBX_APP_MAX_TCS_SHIFT 12
#define DCBX_APP_MAX_TCS_OFFSET 12
#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
#define DCBX_APP_NUM_ENTRIES_SHIFT 16
#define DCBX_APP_NUM_ENTRIES_OFFSET 16
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
@ -380,7 +379,7 @@ struct dcbx_features {
/* PFC feature */
u32 pfc;
#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
#define DCBX_PFC_PRI_EN_BITMAP_OFFSET 0
#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
@ -391,17 +390,17 @@ struct dcbx_features {
#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
#define DCBX_PFC_FLAGS_MASK 0x0000ff00
#define DCBX_PFC_FLAGS_SHIFT 8
#define DCBX_PFC_FLAGS_OFFSET 8
#define DCBX_PFC_CAPS_MASK 0x00000f00
#define DCBX_PFC_CAPS_SHIFT 8
#define DCBX_PFC_CAPS_OFFSET 8
#define DCBX_PFC_MBC_MASK 0x00004000
#define DCBX_PFC_MBC_SHIFT 14
#define DCBX_PFC_MBC_OFFSET 14
#define DCBX_PFC_WILLING_MASK 0x00008000
#define DCBX_PFC_WILLING_SHIFT 15
#define DCBX_PFC_WILLING_OFFSET 15
#define DCBX_PFC_ENABLED_MASK 0x00010000
#define DCBX_PFC_ENABLED_SHIFT 16
#define DCBX_PFC_ENABLED_OFFSET 16
#define DCBX_PFC_ERROR_MASK 0x00020000
#define DCBX_PFC_ERROR_SHIFT 17
#define DCBX_PFC_ERROR_OFFSET 17
/* APP feature */
struct dcbx_app_priority_feature app;
@ -410,7 +409,7 @@ struct dcbx_features {
struct dcbx_local_params {
u32 config;
#define DCBX_CONFIG_VERSION_MASK 0x00000007
#define DCBX_CONFIG_VERSION_SHIFT 0
#define DCBX_CONFIG_VERSION_OFFSET 0
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
@ -425,7 +424,7 @@ struct dcbx_mib {
u32 flags;
/*
#define DCBX_CONFIG_VERSION_MASK 0x00000007
#define DCBX_CONFIG_VERSION_SHIFT 0
#define DCBX_CONFIG_VERSION_OFFSET 0
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
@ -444,7 +443,7 @@ struct lldp_system_tlvs_buffer_s {
struct dcb_dscp_map {
u32 flags;
#define DCB_DSCP_ENABLE_MASK 0x1
#define DCB_DSCP_ENABLE_SHIFT 0
#define DCB_DSCP_ENABLE_OFFSET 0
#define DCB_DSCP_ENABLE 1
u32 dscp_pri_map[8];
/* the map structure is the following:
@ -481,12 +480,12 @@ struct public_global {
#define MDUMP_REASON_DUMP_AGED (1 << 2)
u32 ext_phy_upgrade_fw;
#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
#define EXT_PHY_FW_UPGRADE_STATUS_SHIFT (0)
#define EXT_PHY_FW_UPGRADE_STATUS_OFFSET (0)
#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
#define EXT_PHY_FW_UPGRADE_TYPE_SHIFT (16)
#define EXT_PHY_FW_UPGRADE_TYPE_OFFSET (16)
u8 runtime_port_swap_map[MODE_4P];
u32 data_ptr;
@ -539,9 +538,9 @@ struct public_path {
u32 process_kill; /* Reset on mcp reset, and incremented for eveny process kill event. */
#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
#define PROCESS_KILL_COUNTER_SHIFT 0
#define PROCESS_KILL_COUNTER_OFFSET 0
#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
#define PROCESS_KILL_GLOB_AEU_BIT_OFFSET 16
#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id*32 + aeu_bit)
};
@ -693,13 +692,13 @@ struct public_port {
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
#define ETH_TRANSCEIVER_STATE_SHIFT 0x0
#define ETH_TRANSCEIVER_STATE_OFFSET 0x0
#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00
#define ETH_TRANSCEIVER_STATE_PRESENT 0x01
#define ETH_TRANSCEIVER_STATE_VALID 0x03
#define ETH_TRANSCEIVER_STATE_UPDATING 0x08
#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
#define ETH_TRANSCEIVER_TYPE_SHIFT 0x8
#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
#define ETH_TRANSCEIVER_TYPE_NONE 0x00
#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xFF
#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 /* 1G Passive copper cable */
@ -750,17 +749,17 @@ struct public_port {
#define EEE_ACTIVE_BIT (1<<0) /* Set when EEE negotiation is complete. */
#define EEE_LD_ADV_STATUS_MASK 0x000000f0 /* Shows the Local Device EEE capabilities */
#define EEE_LD_ADV_STATUS_SHIFT 4
#define EEE_LD_ADV_STATUS_OFFSET 4
#define EEE_1G_ADV (1<<1)
#define EEE_10G_ADV (1<<2)
#define EEE_LP_ADV_STATUS_MASK 0x00000f00 /* Same values as in EEE_LD_ADV, but for Link Parter */
#define EEE_LP_ADV_STATUS_SHIFT 8
#define EEE_LP_ADV_STATUS_OFFSET 8
u32 eee_remote; /* Used for EEE in LLDP */
#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
#define EEE_REMOTE_TW_TX_SHIFT 0
#define EEE_REMOTE_TW_TX_OFFSET 0
#define EEE_REMOTE_TW_RX_MASK 0xffff0000
#define EEE_REMOTE_TW_RX_SHIFT 16
#define EEE_REMOTE_TW_RX_OFFSET 16
u32 module_info;
#define ETH_TRANSCEIVER_MONITORING_TYPE_MASK 0x000000FF
@ -810,11 +809,11 @@ struct public_func {
/* function 0 of each port cannot be hidden */
#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_OFFSET 0x00000001
#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
#define FUNC_MF_CFG_PROTOCOL_OFFSET 4
#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
@ -824,15 +823,15 @@ struct public_func {
/* MINBW, MAXBW */
/* value range - 0..100, increments in 1 % */
#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
#define FUNC_MF_CFG_MIN_BW_SHIFT 8
#define FUNC_MF_CFG_MIN_BW_OFFSET 8
#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
#define FUNC_MF_CFG_MAX_BW_SHIFT 16
#define FUNC_MF_CFG_MAX_BW_OFFSET 16
#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
/*RDMA PROTOCL*/
#define FUNC_MF_CFG_RDMA_PROTOCOL_MASK 0x03000000
#define FUNC_MF_CFG_RDMA_PROTOCOL_SHIFT 24
#define FUNC_MF_CFG_RDMA_PROTOCOL_OFFSET 24
#define FUNC_MF_CFG_RDMA_PROTOCOL_NONE 0x00000000
#define FUNC_MF_CFG_RDMA_PROTOCOL_ROCE 0x01000000
#define FUNC_MF_CFG_RDMA_PROTOCOL_IWARP 0x02000000
@ -840,7 +839,7 @@ struct public_func {
#define FUNC_MF_CFG_RDMA_PROTOCOL_BOTH 0x03000000
#define FUNC_MF_CFG_BOOT_MODE_MASK 0x0C000000
#define FUNC_MF_CFG_BOOT_MODE_SHIFT 26
#define FUNC_MF_CFG_BOOT_MODE_OFFSET 26
#define FUNC_MF_CFG_BOOT_MODE_BIOS_CTRL 0x00000000
#define FUNC_MF_CFG_BOOT_MODE_DISABLED 0x04000000
#define FUNC_MF_CFG_BOOT_MODE_ENABLED 0x08000000
@ -850,7 +849,7 @@ struct public_func {
u32 mac_upper; /* MAC */
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
#define FUNC_MF_CFG_UPPERMAC_OFFSET 0
#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
u32 mac_lower;
#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
@ -863,7 +862,7 @@ struct public_func {
u32 ovlan_stag; /* tags */
#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
#define FUNC_MF_CFG_OV_STAG_SHIFT 0
#define FUNC_MF_CFG_OV_STAG_OFFSET 0
#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
u32 pf_allocation; /* vf per pf */
@ -880,32 +879,32 @@ struct public_func {
u32 drv_id;
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
#define DRV_ID_PDA_COMP_VER_OFFSET 0
#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_MCP_HSI_VER_OFFSET 16
#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << DRV_ID_MCP_HSI_VER_OFFSET)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
#define DRV_ID_DRV_TYPE_OFFSET 24
#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_TYPE_OS (DRV_ID_DRV_TYPE_LINUX | DRV_ID_DRV_TYPE_WINDOWS | \
DRV_ID_DRV_TYPE_SOLARIS | DRV_ID_DRV_TYPE_VMWARE | \
DRV_ID_DRV_TYPE_FREEBSD | DRV_ID_DRV_TYPE_AIX)
#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
#define DRV_ID_DRV_INIT_HW_SHIFT 31
#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
#define DRV_ID_DRV_INIT_HW_OFFSET 31
#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_OFFSET)
};
/**************************************/
@ -990,13 +989,13 @@ struct ocbb_data_stc {
#define MFW_SENSOR_LOCATION_EXTERNAL 2
#define MFW_SENSOR_LOCATION_SFP 3
#define SENSOR_LOCATION_SHIFT 0
#define SENSOR_LOCATION_OFFSET 0
#define SENSOR_LOCATION_MASK 0x000000ff
#define THRESHOLD_HIGH_SHIFT 8
#define THRESHOLD_HIGH_OFFSET 8
#define THRESHOLD_HIGH_MASK 0x0000ff00
#define CRITICAL_TEMPERATURE_SHIFT 16
#define CRITICAL_TEMPERATURE_OFFSET 16
#define CRITICAL_TEMPERATURE_MASK 0x00ff0000
#define CURRENT_TEMP_SHIFT 24
#define CURRENT_TEMP_OFFSET 24
#define CURRENT_TEMP_MASK 0xff000000
struct temperature_status_stc {
u32 num_of_sensors;
@ -1064,18 +1063,18 @@ struct load_req_stc {
u32 fw_ver;
u32 misc0;
#define LOAD_REQ_ROLE_MASK 0x000000FF
#define LOAD_REQ_ROLE_SHIFT 0
#define LOAD_REQ_ROLE_OFFSET 0
#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
#define LOAD_REQ_LOCK_TO_SHIFT 8
#define LOAD_REQ_LOCK_TO_OFFSET 8
#define LOAD_REQ_LOCK_TO_DEFAULT 0
#define LOAD_REQ_LOCK_TO_NONE 255
#define LOAD_REQ_FORCE_MASK 0x000F0000
#define LOAD_REQ_FORCE_SHIFT 16
#define LOAD_REQ_FORCE_OFFSET 16
#define LOAD_REQ_FORCE_NONE 0
#define LOAD_REQ_FORCE_PF 1
#define LOAD_REQ_FORCE_ALL 2
#define LOAD_REQ_FLAGS0_MASK 0x00F00000
#define LOAD_REQ_FLAGS0_SHIFT 20
#define LOAD_REQ_FLAGS0_OFFSET 20
#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
};
@ -1085,11 +1084,11 @@ struct load_rsp_stc {
u32 fw_ver;
u32 misc0;
#define LOAD_RSP_ROLE_MASK 0x000000FF
#define LOAD_RSP_ROLE_SHIFT 0
#define LOAD_RSP_ROLE_OFFSET 0
#define LOAD_RSP_HSI_MASK 0x0000FF00
#define LOAD_RSP_HSI_SHIFT 8
#define LOAD_RSP_HSI_OFFSET 8
#define LOAD_RSP_FLAGS0_MASK 0x000F0000
#define LOAD_RSP_FLAGS0_SHIFT 16
#define LOAD_RSP_FLAGS0_OFFSET 16
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
@ -1177,6 +1176,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 /* Param is either DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW/IMAGE */
#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 /* Param should be set to the transaction size (up to 64 bytes) */
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 /* MFW will place the file offset and len in file_att struct */
@ -1193,7 +1193,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MCP_HALT 0x00100000 /* Halts the MCP. To resume MCP, user will need to use MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers. */
#define DRV_MSG_CODE_SET_VMAC 0x00110000 /* Set virtual mac address, params [31:6] - reserved, [5:4] - type, [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN */
#define DRV_MSG_CODE_GET_VMAC 0x00120000 /* Set virtual mac address, params [31:6] - reserved, [5:4] - type, [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN */
#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
#define DRV_MSG_CODE_VMAC_TYPE_OFFSET 4
#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
@ -1211,9 +1211,9 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OCBB_DATA 0x00180000 /* indicate OCBB related information */
#define DRV_MSG_CODE_SET_BW 0x00190000 /* Set function BW, params[15:8] - min, params[7:0] - max */
#define BW_MAX_MASK 0x000000ff
#define BW_MAX_SHIFT 0
#define BW_MAX_OFFSET 0
#define BW_MIN_MASK 0x0000ff00
#define BW_MIN_SHIFT 8
#define BW_MIN_OFFSET 8
#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 /* When param is set to 1, all parities will be masked(disabled). When params are set to 0, parities will be unmasked again. */
#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000 /* param[0] - Simulate fan failure, param[1] - simulate over temp. */
@ -1230,21 +1230,21 @@ struct public_drv_mb {
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 /* Param[0:4] - resource number (0-31), Param[5:7] - opcode, param[15:8] - age */
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
#define RESOURCE_CMD_REQ_RESC_SHIFT 0
#define RESOURCE_CMD_REQ_RESC_OFFSET 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
#define RESOURCE_CMD_REQ_OPCODE_OFFSET 5
#define RESOURCE_OPCODE_REQ 1 /* request resource ownership with default aging */
#define RESOURCE_OPCODE_REQ_WO_AGING 2 /* request resource ownership without aging */
#define RESOURCE_OPCODE_REQ_W_AGING 3 /* request resource ownership with specific aging timer (in seconds) */
#define RESOURCE_OPCODE_RELEASE 4 /* release resource */
#define RESOURCE_OPCODE_FORCE_RELEASE 5 /* force resource release */
#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
#define RESOURCE_CMD_REQ_AGE_SHIFT 8
#define RESOURCE_CMD_REQ_AGE_OFFSET 8
#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
#define RESOURCE_CMD_RSP_OWNER_OFFSET 0
#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
#define RESOURCE_CMD_RSP_OPCODE_OFFSET 8
#define RESOURCE_OPCODE_GNT 1 /* resource is free and granted to requester */
#define RESOURCE_OPCODE_BUSY 2 /* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15, 16 = MFW, 17 = diag over serial */
#define RESOURCE_OPCODE_RELEASED 3 /* indicate release request was acknowledged */
@ -1269,11 +1269,11 @@ struct public_drv_mb {
#define DRV_MSG_CODE_GPIO_INFO 0x00270000 /* Param: [0:15] - gpio number */
#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000 /* Value will be placed in union */
#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000 /* Value shoud be placed in union */
#define DRV_MB_PARAM_ADDR_SHIFT 0
#define DRV_MB_PARAM_ADDR_OFFSET 0
#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
#define DRV_MB_PARAM_DEVAD_SHIFT 16
#define DRV_MB_PARAM_DEVAD_OFFSET 16
#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
#define DRV_MB_PARAM_PORT_SHIFT 21
#define DRV_MB_PARAM_PORT_OFFSET 21
#define DRV_MB_PARAM_PORT_MASK 0x00600000
#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
@ -1307,44 +1307,47 @@ struct public_drv_mb {
/* LLDP / DCBX params*/
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
#define DRV_MB_PARAM_LLDP_SEND_OFFSET 0
#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
#define DRV_MB_PARAM_LLDP_AGENT_OFFSET 1
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
#define DRV_MB_PARAM_DCBX_NOTIFY_OFFSET 3
#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_OFFSET 0
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
#define DRV_MB_PARAM_PHY_ADDR_OFFSET 0
#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
#define DRV_MB_PARAM_PHY_LANE_OFFSET 16
#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
#define DRV_MB_PARAM_PHY_SELECT_PORT_OFFSET 29
#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
#define DRV_MB_PARAM_PHY_PORT_OFFSET 30
#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT 0
#define DRV_MB_PARAM_PHYMOD_LANE_OFFSET 0
#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8
#define DRV_MB_PARAM_PHYMOD_SIZE_OFFSET 8
#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
/* configure vf MSIX params*/
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
/* configure vf MSIX params BB */
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET 8
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
/* configure vf MSIX for PF params AH*/
#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_OFFSET 0
#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_MASK 0x000000FF
/* OneView configuration parametres */
#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
#define DRV_MB_PARAM_OV_CURR_CFG_OFFSET 0
#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
@ -1355,7 +1358,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6
#define DRV_MB_PARAM_OV_CURR_CFG_HII 7
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OFFSET 0
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED (1 << 1)
@ -1368,17 +1371,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0
#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0
#define DRV_MB_PARAM_OV_PCI_BUS_NUM_OFFSET 0
#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF
#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
#define DRV_MB_PARAM_OV_STORM_FW_VER_OFFSET 0
#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_OFFSET 0
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 /* Not Installed*/
@ -1386,7 +1389,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 /* installed but disabled by user/admin/OS */
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 /* installed and active */
#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
@ -1404,35 +1407,35 @@ struct public_drv_mb {
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
#define DRV_MB_PARAM_FCOE_CVID_MASK 0xFFF
#define DRV_MB_PARAM_FCOE_CVID_SHIFT 0
#define DRV_MB_PARAM_FCOE_CVID_OFFSET 0
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT 0
#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT 2
#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT 8
#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT 16
#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT 0
#define DRV_MB_PARAM_GPIO_NUMBER_OFFSET 0
#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF
#define DRV_MB_PARAM_GPIO_VALUE_SHIFT 16
#define DRV_MB_PARAM_GPIO_VALUE_OFFSET 16
#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000
#define DRV_MB_PARAM_GPIO_DIRECTION_SHIFT 16
#define DRV_MB_PARAM_GPIO_DIRECTION_OFFSET 16
#define DRV_MB_PARAM_GPIO_DIRECTION_MASK 0x00FF0000
#define DRV_MB_PARAM_GPIO_CTRL_SHIFT 24
#define DRV_MB_PARAM_GPIO_CTRL_OFFSET 24
#define DRV_MB_PARAM_GPIO_CTRL_MASK 0xFF000000
/* Resource Allocation params - Driver version support*/
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
@ -1445,17 +1448,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_RC_FAILED 2
#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
#define DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET 0
#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET 8
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SHIFT 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001 /* driver supports SmartLinQ parameter */
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 /* driver supports EEE parameter */
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK 0xFFFF0000
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_SHIFT 16
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET 16
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
@ -1582,15 +1585,19 @@ struct public_drv_mb {
#define FW_MSG_CODE_WOL_READ_BUFFER_OK 0x00850000
#define FW_MSG_CODE_WOL_READ_BUFFER_INVALID_VAL 0x00860000
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_BAD_ASIC 0x00880000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 fw_mb_param;
/* Resource Allocation params - MFW version support */
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
/* get pf rdma protocol command response */
#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0

View File

@ -28,7 +28,6 @@
*
*/
/****************************************************************************
*
* Name: mfw_hsi.h

View File

@ -36,20 +36,20 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
* Created: 2/4/2017
* Created: 3/15/2017
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
#define NVM_CFG_version 0x81812
#define NVM_CFG_version 0x83000
#define NVM_CFG_new_option_seq 20
#define NVM_CFG_new_option_seq 22
#define NVM_CFG_removed_option_seq 1
#define NVM_CFG_updated_value_seq 3
#define NVM_CFG_updated_value_seq 4
struct nvm_cfg_mac_address
{
@ -535,6 +535,10 @@ struct nvm_cfg1_glob
#define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
#define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
#define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
#define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_MASK 0x30000000
#define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_OFFSET 28
#define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_DISABLED 0x0
#define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_TI 0x1
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@ -1323,6 +1327,8 @@ struct nvm_cfg1_port
#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
#define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_MASK 0x01FE0000
#define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_OFFSET 17
u32 vf_cfg; /* 0x30 */
#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
@ -1945,3 +1951,4 @@ struct nvm_cfg
};
#endif /* NVM_CFG_H */

View File

@ -28,6 +28,7 @@
*
*/
/****************************************************************************
* Name: nvm_map.h
*

View File

@ -33,7 +33,9 @@
#define _PCICS_REG_DRIVER_H
/* offset of configuration space in the pci core register */
#ifndef __EXTRACT__LINUX__
#define PCICFG_OFFSET 0x2000
#endif
#define PCICFG_VENDOR_ID_OFFSET 0x00
#define PCICFG_DEVICE_ID_OFFSET 0x02
#define PCICFG_COMMAND_OFFSET 0x04
@ -243,6 +245,7 @@
* define in version.v
*/
#define GRC_CR_VF_MSIX_CTRL_VF_MSIX_TBL_SIZE_MASK 0x3F
#ifndef __EXTRACT__LINUX__
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
/* First VF_NUM for PF is encoded in this register.
@ -253,6 +256,7 @@
* have the same location for the same 4 bits
*/
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff
#endif
#define PXPCS_TL_CONTROL_5 0x814
#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/

View File

@ -688,8 +688,8 @@ extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
#endif /* #if __FreeBSD_version < 1100000 */
#define CQE_L3_PACKET(flags) \
((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv4) || \
(((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv6))
((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv4) || \
(((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3_type_ipv6))
#define CQE_IP_HDR_ERR(flags) \
((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \

View File

@ -210,6 +210,11 @@ char qlnx_name_str[NAME_SIZE];
#define QLOGIC_PCI_DEVICE_ID_1654 0x1654
#endif
/* 10G/25G/40G Adapter QLE41xxx*/
#ifndef QLOGIC_PCI_DEVICE_ID_8070
#define QLOGIC_PCI_DEVICE_ID_8070 0x8070
#endif
static int
qlnx_valid_device(device_t dev)
{
@ -220,7 +225,8 @@ qlnx_valid_device(device_t dev)
if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1644) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_1654))
(device_id == QLOGIC_PCI_DEVICE_ID_1654) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070))
return 0;
return -1;
@ -279,6 +285,16 @@ qlnx_pci_probe(device_t dev)
break;
case QLOGIC_PCI_DEVICE_ID_8070:
snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH) "
"Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
device_set_desc_copy(dev, qlnx_dev_str);
break;
default:
return (ENXIO);
}
@ -381,7 +397,7 @@ qlnx_fp_taskqueue(void *context, int pending)
struct ifnet *ifp;
struct mbuf *mp;
int ret;
int lro_enable, tc;
int lro_enable;
int rx_int = 0, total_rx_count = 0;
struct thread *cthread;
@ -461,9 +477,9 @@ qlnx_fp_taskqueue(void *context, int pending)
goto qlnx_fp_taskqueue_exit;
}
for (tc = 0; tc < ha->num_tc; tc++) {
(void)qlnx_tx_int(ha, fp, fp->txq[tc]);
}
// for (tc = 0; tc < ha->num_tc; tc++) {
// (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
// }
mp = drbr_peek(ifp, fp->tx_br);
@ -500,9 +516,9 @@ qlnx_fp_taskqueue(void *context, int pending)
mp = drbr_peek(ifp, fp->tx_br);
}
for (tc = 0; tc < ha->num_tc; tc++) {
(void)qlnx_tx_int(ha, fp, fp->txq[tc]);
}
// for (tc = 0; tc < ha->num_tc; tc++) {
// (void)qlnx_tx_int(ha, fp, fp->txq[tc]);
// }
mtx_unlock(&fp->tx_mtx);
@ -1911,7 +1927,8 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
if (device_id == QLOGIC_PCI_DEVICE_ID_1634)
ifp->if_baudrate = IF_Gbps(40);
else if (device_id == QLOGIC_PCI_DEVICE_ID_1656)
else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070))
ifp->if_baudrate = IF_Gbps(25);
else if (device_id == QLOGIC_PCI_DEVICE_ID_1654)
ifp->if_baudrate = IF_Gbps(50);
@ -1974,7 +1991,8 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) {
} else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
@ -3216,6 +3234,11 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
ifm_type = IFM_40G_SR4;
else if (if_link->speed == (25 * 1000))
ifm_type = QLNX_IFM_25G_SR;
else if (if_link->speed == (10 * 1000))
ifm_type = (IFM_10G_LR | IFM_10G_SR);
else if (if_link->speed == (1 * 1000))
ifm_type = (IFM_1000_SX | IFM_1000_LX);
break;
case MEDIA_DA_TWINAX:
@ -3225,6 +3248,9 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
ifm_type = IFM_40G_CR4;
else if (if_link->speed == (25 * 1000))
ifm_type = QLNX_IFM_25G_CR;
else if (if_link->speed == (10 * 1000))
ifm_type = IFM_10G_TWINAX;
break;
default :
@ -4748,6 +4774,19 @@ qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value)
return;
}
void
qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value)
{
uint32_t offset;
struct ecore_dev *cdev;
cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
return;
}
void *
qlnx_zalloc(uint32_t size)
{
@ -5201,7 +5240,7 @@ qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info,
static int
qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
{
struct status_block *sb_virt;
struct status_block_e4 *sb_virt;
bus_addr_t sb_phys;
int rc;
uint32_t size;

View File

@ -38,6 +38,6 @@
*/
#define QLNX_VERSION_MAJOR 1
#define QLNX_VERSION_MINOR 3
#define QLNX_VERSION_BUILD 0
#define QLNX_VERSION_MINOR 4
#define QLNX_VERSION_BUILD 5

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __RDMA_COMMON__
#define __RDMA_COMMON__
/************************/

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,6 @@
*
*/
/****************************************************************************
* Name: spad_layout.h
*
@ -98,8 +97,8 @@ extern struct spad_layout g_spad;
#endif /* MDUMP_PARSE_TOOL */
#define TO_OFFSIZE(_offset, _size) \
(u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
(((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
(u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \
(((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET))
enum spad_sections {
SPAD_SECTION_TRACE,
@ -160,13 +159,13 @@ struct static_init {
u32 mim_start_addr; /* 0xe20848 */
u32 ah_pcie_link_params; /* 0xe20850 Stores PCIe link configuration at start, so they can be used later also for Hot-Reset, without the need to re-reading them from nvm cfg. */
#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
#define AH_PCIE_LINK_PARAMS_LINK_SPEED_OFFSET (0)
#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_OFFSET (8)
#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
#define AH_PCIE_LINK_PARAMS_ASPM_MODE_OFFSET (16)
#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
#define AH_PCIE_LINK_PARAMS_ASPM_CAP_OFFSET (24)
#define AH_PCIE_LINK_PARAMS *((u32*)(STRUCT_OFFSET(ah_pcie_link_params)))
u32 flags; /* 0xe20850 */
@ -183,6 +182,7 @@ struct static_init {
#define FLAGS_SMBUS_AUX_MODE (1 << 9)
#define FLAGS_PEND_SMBUS_VMAIN_TO_AUX (1 << 10)
#define FLAGS_NVM_CFG_EFUSE_FAILURE (1 << 11)
#define FLAGS_POWER_TRANSITION (1 << 12)
#define FLAGS_OS_DRV_LOADED (1 << 29)
#define FLAGS_OVER_TEMP_OCCUR (1 << 30)
#define FLAGS_FAN_FAIL_OCCUR (1 << 31)

View File

@ -28,7 +28,6 @@
*
*/
#ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__
/*********************/
@ -40,17 +39,44 @@
// Each Resource ID is one-one-valued mapped by the driver to a BDQ Resource ID (for instance per port)
#define BDQ_NUM_RESOURCES (4)
// ID 0 : RQ, ID 1 : IMMEDIATE_DATA:
// ID 0 : RQ, ID 1 : IMMEDIATE_DATA, ID 2 : TQ
#define BDQ_ID_RQ (0)
#define BDQ_ID_IMM_DATA (1)
#define BDQ_NUM_IDS (2)
#define BDQ_ID_TQ (2)
#define BDQ_NUM_IDS (3)
#define SCSI_NUM_SGES_SLOW_SGL_THR 8
#define BDQ_MAX_EXTERNAL_RING_SIZE (1<<15)
/* SCSI op codes */
#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
#define SCSI_OPCODE_WRITE_6 (0x0A)
#define SCSI_OPCODE_WRITE_10 (0x2A)
#define SCSI_OPCODE_WRITE_12 (0xAA)
#define SCSI_OPCODE_WRITE_16 (0x8A)
#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
/*
* iSCSI Drv opaque
*/
struct iscsi_drv_opaque
{
__le16 reserved_zero[3];
__le16 opaque;
};
/*
* Scsi 2B/8B opaque union
*/
union scsi_opaque
{
struct regpair fcoe_opaque /* 8 Bytes opaque */;
struct iscsi_drv_opaque iscsi_opaque /* 2 Bytes opaque */;
};
/*
* SCSI buffer descriptor
@ -58,7 +84,7 @@
struct scsi_bd
{
struct regpair address /* Physical Address of buffer */;
struct regpair opaque /* Driver Metadata (preferably Virtual Address of buffer) */;
union scsi_opaque opaque /* Driver Metadata (preferably Virtual Address of buffer) */;
};
@ -131,24 +157,28 @@ struct scsi_init_func_queues
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
#define SCSI_INIT_FUNC_QUEUES_TMWO_EN_MASK 0x1 /* This bit is valid if TQ is enabled for this function, tmwo option enabled/disabled */
#define SCSI_INIT_FUNC_QUEUES_TMWO_EN_SHIFT 4
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x7
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 5
__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS] /* CQ/CMDQ status block number array */;
u8 num_queues /* Number of continuous global queues used */;
u8 queue_relative_offset /* offset of continuous global queues used */;
u8 cq_sb_pi /* Protocol Index of CQ in status block (CQ consumer) */;
u8 cmdq_sb_pi /* Protocol Index of CMDQ in status block (CMDQ consumer) */;
__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS] /* CQ/CMDQ status block number array */;
__le16 reserved0 /* reserved */;
u8 bdq_pbl_num_entries[BDQ_NUM_IDS] /* Per BDQ ID, the PBL page size (number of entries in PBL) */;
u8 reserved1 /* reserved */;
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS] /* Per BDQ ID, the PBL page Base Address */;
__le16 bdq_xoff_threshold[BDQ_NUM_IDS] /* BDQ XOFF threshold - when number of entries will be below that TH, it will send XOFF */;
__le16 bdq_xon_threshold[BDQ_NUM_IDS] /* BDQ XON threshold - when number of entries will be above that TH, it will send XON */;
__le16 cmdq_xoff_threshold /* CMDQ XOFF threshold - when number of entries will be below that TH, it will send XOFF */;
__le16 bdq_xon_threshold[BDQ_NUM_IDS] /* BDQ XON threshold - when number of entries will be above that TH, it will send XON */;
__le16 cmdq_xon_threshold /* CMDQ XON threshold - when number of entries will be above that TH, it will send XON */;
__le32 reserved1 /* reserved */;
};
/*
* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data)
*/
@ -194,4 +224,13 @@ struct scsi_terminate_extra_params
u8 reserved[4];
};
/*
* SCSI Task Queue Element
*/
struct scsi_tqe
{
__le16 itid /* Physical Address of buffer */;
};
#endif /* __STORAGE_COMMON__ */

View File

@ -28,6 +28,7 @@
*
*/
#ifndef __TCP_COMMON__
#define __TCP_COMMON__
/********************/