ice(4): Update to 0.26.16

Summary of changes:

- Assorted bug fixes
- Support for newer versions of the device firmware
- Suspend/resume support
- Support for Lenient Link Mode for E82X devices (e.g. can try to link with
  SFP/QSFP modules with bad EEPROMs)
- Adds port-level rx_discards sysctl, similar to ixl(4)'s

This version of the driver is intended to be used with DDP package 1.3.16.0,
which has already been updated in a previous commit.

Tested by:	Jeffrey Pieper <jeffrey.e.pieper@intel.com>
MFC after:	3 days
MFC with:	r365332, r365550
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D26322
This commit is contained in:
Eric Joyner 2020-09-10 23:46:13 +00:00
parent 385f4a5ac8
commit 7d7af7f85b
31 changed files with 2783 additions and 2160 deletions

View File

@ -156,12 +156,13 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_MAX_MTU 0x0047
#define ICE_AQC_CAPS_NVM_VER 0x0048
#define ICE_AQC_CAPS_OROM_VER 0x004A
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_CEM 0x00F2
#define ICE_AQC_CAPS_IWARP 0x0051
#define ICE_AQC_CAPS_LED 0x0061
#define ICE_AQC_CAPS_SDP 0x0062
#define ICE_AQC_CAPS_WR_CSR_PROT 0x0064
#define ICE_AQC_CAPS_NO_DROP_POLICY 0x0065
#define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073
#define ICE_AQC_CAPS_SKU 0x0074
#define ICE_AQC_CAPS_PORT_MAP 0x0075
@ -281,13 +282,6 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
/* The response buffer is as follows. Note that the length of the
* elements array varies with the length of the command response.
*/
struct ice_aqc_get_sw_cfg_resp {
struct ice_aqc_get_sw_cfg_resp_elem elements[1];
};
/* Set Port parameters, (direct, 0x0203) */
struct ice_aqc_set_port_params {
__le16 cmd_flags;
@ -338,8 +332,6 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM 0x49
#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID 0x50
#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM 0x51
#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58
#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
/* Resource types 0x62-67 are reserved for Hash profile builder */
@ -372,15 +364,6 @@ struct ice_aqc_get_res_resp_elem {
__le16 total_free; /* Resources un-allocated/not reserved by any PF */
};
/* Buffer for Get Resource command */
struct ice_aqc_get_res_resp {
/* Number of resource entries to be calculated using
* datalen/sizeof(struct ice_aqc_cmd_resp)).
* Value of 'datalen' gets updated as part of response.
*/
struct ice_aqc_get_res_resp_elem elem[1];
};
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
*/
@ -406,7 +389,7 @@ struct ice_aqc_alloc_free_res_elem {
#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
(0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
__le16 num_elems;
struct ice_aqc_res_elem elem[1];
struct ice_aqc_res_elem elem[STRUCT_HACK_VAR_LEN];
};
/* Get Allocated Resource Descriptors Command (indirect 0x020A) */
@ -428,10 +411,6 @@ struct ice_aqc_get_allocd_res_desc {
__le32 addr_low;
};
struct ice_aqc_get_allocd_res_desc_resp {
struct ice_aqc_res_elem elem[1];
};
/* Add VSI (indirect 0x0210)
* Update VSI (indirect 0x0211)
* Get VSI (indirect 0x0212)
@ -758,7 +737,6 @@ struct ice_aqc_sw_rules {
__le32 addr_low;
};
#pragma pack(1)
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
@ -841,9 +819,8 @@ struct ice_sw_rule_lkup_rx_tx {
* lookup-type
*/
__le16 hdr_len;
u8 hdr[1];
u8 hdr[STRUCT_HACK_VAR_LEN];
};
#pragma pack()
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
@ -852,7 +829,6 @@ struct ice_sw_rule_lkup_rx_tx {
struct ice_sw_rule_lg_act {
__le16 index; /* Index in large action table */
__le16 size;
__le32 act[1]; /* array of size for actions */
/* Max number of large actions */
#define ICE_MAX_LG_ACT 4
/* Bit 0:1 - Action type */
@ -903,6 +879,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_STAT_COUNT 0x7
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
__le32 act[STRUCT_HACK_VAR_LEN]; /* array of size for actions */
};
/* Add/Update/Remove VSI list command/response entry
@ -912,7 +889,7 @@ struct ice_sw_rule_lg_act {
struct ice_sw_rule_vsi_list {
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
__le16 vsi[1]; /* Array of number_vsi VSI numbers */
__le16 vsi[STRUCT_HACK_VAR_LEN]; /* Array of number_vsi VSI numbers */
};
#pragma pack(1)
@ -977,8 +954,10 @@ struct ice_aqc_set_query_pfc_mode {
struct ice_aqc_set_dcb_params {
u8 cmd_flags; /* unused in response */
#define ICE_AQC_LINK_UP_DCB_CFG BIT(0)
#define ICE_AQC_PERSIST_DCB_CFG BIT(1)
u8 valid_flags; /* unused in response */
#define ICE_AQC_LINK_UP_DCB_CFG_VALID BIT(0)
#define ICE_AQC_PERSIST_DCB_CFG_VALID BIT(1)
u8 rsvd[14];
};
@ -1008,14 +987,6 @@ struct ice_aqc_sched_elem_cmd {
__le32 addr_low;
};
/* This is the buffer for:
* Suspend Nodes (indirect 0x0409)
* Resume Nodes (indirect 0x040A)
*/
struct ice_aqc_suspend_resume_elem {
__le32 teid[1];
};
struct ice_aqc_txsched_move_grp_info_hdr {
__le32 src_parent_teid;
__le32 dest_parent_teid;
@ -1025,7 +996,7 @@ struct ice_aqc_txsched_move_grp_info_hdr {
struct ice_aqc_move_elem {
struct ice_aqc_txsched_move_grp_info_hdr hdr;
__le32 teid[1];
__le32 teid[STRUCT_HACK_VAR_LEN];
};
struct ice_aqc_elem_info_bw {
@ -1078,15 +1049,7 @@ struct ice_aqc_txsched_topo_grp_info_hdr {
struct ice_aqc_add_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
struct ice_aqc_txsched_elem_data generic[1];
};
struct ice_aqc_conf_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
struct ice_aqc_get_elem {
struct ice_aqc_txsched_elem_data generic[1];
struct ice_aqc_txsched_elem_data generic[STRUCT_HACK_VAR_LEN];
};
struct ice_aqc_get_topo_elem {
@ -1097,7 +1060,7 @@ struct ice_aqc_get_topo_elem {
struct ice_aqc_delete_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
__le32 teid[1];
__le32 teid[STRUCT_HACK_VAR_LEN];
};
/* Query Port ETS (indirect 0x040E)
@ -1160,10 +1123,6 @@ struct ice_aqc_rl_profile_elem {
__le16 rl_encode;
};
struct ice_aqc_rl_profile_generic_elem {
struct ice_aqc_rl_profile_elem generic[1];
};
/* Configure L2 Node CGD (indirect 0x0414)
* This indirect command allows configuring a congestion domain for given L2
* node TEIDs in the scheduler topology.
@ -1181,10 +1140,6 @@ struct ice_aqc_cfg_l2_node_cgd_elem {
u8 reserved[3];
};
struct ice_aqc_cfg_l2_node_cgd_data {
struct ice_aqc_cfg_l2_node_cgd_elem elem[1];
};
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@ -1330,7 +1285,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@ -1381,6 +1336,7 @@ struct ice_aqc_get_phy_caps_data {
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
#define ICE_AQC_MOD_TYPE_IDENT 1
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
@ -1490,6 +1446,9 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 link_cfg_err;
#define ICE_AQ_LINK_CFG_ERR BIT(0)
#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2)
#define ICE_AQ_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
#define ICE_AQ_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
#define ICE_AQ_LINK_FAULT BIT(1)
@ -1607,7 +1566,7 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
/* DNL Get Status command (indirect 0x680)
/* DNL Get Status command (indirect 0x0680)
* Structure used for the response, the command uses the generic
* ice_aqc_generic struct to pass a buffer address to the FW.
*/
@ -1667,7 +1626,7 @@ struct ice_aqc_dnl_get_status_data {
u32 sb_iosf_clk_cntr;
};
/* DNL run command (direct 0x681) */
/* DNL run command (direct 0x0681) */
struct ice_aqc_dnl_run_command {
u8 reserved0;
u8 command;
@ -1686,7 +1645,7 @@ struct ice_aqc_dnl_run_command {
u8 reserved1[12];
};
/* DNL call command (indirect 0x682)
/* DNL call command (indirect 0x0682)
* Struct is used for both command and response
*/
struct ice_aqc_dnl_call_command {
@ -1698,14 +1657,14 @@ struct ice_aqc_dnl_call_command {
__le32 addr_low;
};
/* DNL call command/response buffer (indirect 0x682) */
/* DNL call command/response buffer (indirect 0x0682) */
struct ice_aqc_dnl_call {
__le32 stores[4];
};
/* Used for both commands:
* DNL read sto command (indirect 0x683)
* DNL write sto command (indirect 0x684)
* DNL read sto command (indirect 0x0683)
* DNL write sto command (indirect 0x0684)
*/
struct ice_aqc_dnl_read_write_command {
u8 ctx;
@ -1720,8 +1679,8 @@ struct ice_aqc_dnl_read_write_command {
};
/* Used for both command responses:
* DNL read sto response (indirect 0x683)
* DNL write sto response (indirect 0x684)
* DNL read sto response (indirect 0x0683)
* DNL write sto response (indirect 0x0684)
*/
struct ice_aqc_dnl_read_write_response {
u8 reserved;
@ -1732,14 +1691,14 @@ struct ice_aqc_dnl_read_write_response {
__le32 addr_low; /* Reserved for write command */
};
/* DNL set breakpoints command (indirect 0x686) */
/* DNL set breakpoints command (indirect 0x0686) */
struct ice_aqc_dnl_set_breakpoints_command {
__le32 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
/* DNL set breakpoints data buffer structure (indirect 0x686) */
/* DNL set breakpoints data buffer structure (indirect 0x0686) */
struct ice_aqc_dnl_set_breakpoints {
u8 ctx;
u8 ena; /* 0- disabled, 1- enabled */
@ -1747,7 +1706,7 @@ struct ice_aqc_dnl_set_breakpoints {
__le16 activity_id;
};
/* DNL read log data command(indirect 0x687) */
/* DNL read log data command(indirect 0x0687) */
struct ice_aqc_dnl_read_log_command {
__le16 reserved0;
__le16 offset;
@ -1757,7 +1716,7 @@ struct ice_aqc_dnl_read_log_command {
};
/* DNL read log data response(indirect 0x687) */
/* DNL read log data response(indirect 0x0687) */
struct ice_aqc_dnl_read_log_response {
__le16 reserved;
__le16 size;
@ -1976,6 +1935,7 @@ struct ice_aqc_get_port_options {
struct ice_aqc_get_port_options_elem {
u8 pmd;
#define ICE_AQC_PORT_INV_PORT_OPT 4
#define ICE_AQC_PORT_OPT_PMD_COUNT_S 0
#define ICE_AQC_PORT_OPT_PMD_COUNT_M (0xF << ICE_AQC_PORT_OPT_PMD_COUNT_S)
#define ICE_AQC_PORT_OPT_PMD_WIDTH_S 4
@ -1995,13 +1955,6 @@ struct ice_aqc_get_port_options_elem {
u8 phy_scid[2];
};
/* The buffer for command 0x06EA contains port_options_count of options
* in the option array.
*/
struct ice_aqc_get_port_options_data {
struct ice_aqc_get_port_options_elem option[1];
};
/* Set Port Option (direct, 0x06EB) */
struct ice_aqc_set_port_option {
u8 lport_num;
@ -2114,6 +2067,7 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M MAKEMASK(0x3FF, 0)
#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
@ -2353,6 +2307,18 @@ struct ice_aqc_lldp_stop_start_specific_agent {
u8 reserved[15];
};
/* LLDP Filter Control (direct 0x0A0A) */
struct ice_aqc_lldp_filter_ctrl {
u8 cmd_flags;
#define ICE_AQC_LLDP_FILTER_ACTION_M MAKEMASK(3, 0)
#define ICE_AQC_LLDP_FILTER_ACTION_ADD 0x0
#define ICE_AQC_LLDP_FILTER_ACTION_DELETE 0x1
#define ICE_AQC_LLDP_FILTER_ACTION_UPDATE 0x2
u8 reserved1;
__le16 vsi_num;
u8 reserved2[12];
};
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@ -2389,7 +2355,7 @@ struct ice_aqc_get_set_rss_keys {
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
__le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
@ -2450,7 +2416,7 @@ struct ice_aqc_add_tx_qgrp {
__le32 parent_teid;
u8 num_txqs;
u8 rsvd[3];
struct ice_aqc_add_txqs_perq txqs[1];
struct ice_aqc_add_txqs_perq txqs[STRUCT_HACK_VAR_LEN];
};
/* Disable Tx LAN Queues (indirect 0x0C31) */
@ -2483,23 +2449,20 @@ struct ice_aqc_dis_txqs {
* added before the start of the next group, to allow correct
* alignment of the parent_teid field.
*/
#pragma pack(1)
struct ice_aqc_dis_txq_item {
__le32 parent_teid;
u8 num_qs;
u8 rsvd;
/* The length of the q_id array varies according to num_qs */
__le16 q_id[1];
/* This only applies from F8 onward */
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
(0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
(1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
__le16 q_id[STRUCT_HACK_VAR_LEN];
};
struct ice_aqc_dis_txq {
struct ice_aqc_dis_txq_item qgrps[1];
};
#pragma pack()
/* Tx LAN Queues Cleanup Event (0x0C31) */
struct ice_aqc_txqs_cleanup {
@ -2540,11 +2503,11 @@ struct ice_aqc_move_txqs_elem {
struct ice_aqc_move_txqs_data {
__le32 src_teid;
__le32 dest_teid;
struct ice_aqc_move_txqs_elem txqs[1];
struct ice_aqc_move_txqs_elem txqs[STRUCT_HACK_VAR_LEN];
};
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C42) */
/* Also used for Update Package (indirect 0x0C42 and 0x0C41) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
@ -2593,7 +2556,7 @@ struct ice_aqc_get_pkg_info {
/* Get Package Info List response buffer format (0x0C43) */
struct ice_aqc_get_pkg_info_resp {
__le32 count;
struct ice_aqc_get_pkg_info pkg_info[1];
struct ice_aqc_get_pkg_info pkg_info[STRUCT_HACK_VAR_LEN];
};
/* Driver Shared Parameters (direct, 0x0C90) */
@ -2617,6 +2580,50 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
/* Set Health Status (direct 0xFF20) */
struct ice_aqc_set_health_status_config {
u8 event_source;
#define ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0)
#define ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1)
#define ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2)
u8 reserved[15];
};
/* Get Health Status codes (indirect 0xFF21) */
struct ice_aqc_get_supported_health_status_codes {
__le16 health_code_count;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
/* Get Health Status (indirect 0xFF22) */
struct ice_aqc_get_health_status {
__le16 health_status_count;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
/* Get Health Status event buffer entry, (0xFF22)
* repeated per reported health status
*/
struct ice_aqc_health_status_elem {
__le16 health_status_code;
__le16 event_source;
#define ICE_AQC_HEALTH_STATUS_PF (0x1)
#define ICE_AQC_HEALTH_STATUS_PORT (0x2)
#define ICE_AQC_HEALTH_STATUS_GLOBAL (0x3)
__le32 internal_data1;
#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF)
__le32 internal_data2;
};
/* Clear Health Status (direct 0xFF23) */
struct ice_aqc_clear_health_status {
__le32 reserved[4];
};
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@ -2706,6 +2713,7 @@ struct ice_aq_desc {
struct ice_aqc_lldp_start lldp_start;
struct ice_aqc_lldp_set_local_mib lldp_set_mib;
struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@ -2727,6 +2735,12 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
struct ice_aqc_set_health_status_config
set_health_status_config;
struct ice_aqc_get_supported_health_status_codes
get_supported_health_status_codes;
struct ice_aqc_get_health_status get_health_status;
struct ice_aqc_clear_health_status clear_health_status;
} params;
};
@ -2918,6 +2932,8 @@ enum ice_adminq_opc {
ice_aqc_opc_nvm_sr_dump = 0x0707,
ice_aqc_opc_nvm_save_factory_settings = 0x0708,
ice_aqc_opc_nvm_update_empr = 0x0709,
ice_aqc_opc_nvm_pkg_data = 0x070A,
ice_aqc_opc_nvm_pass_component_tbl = 0x070B,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
@ -2940,6 +2956,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@ -2963,6 +2980,12 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
/* SystemDiagnostic commands */
ice_aqc_opc_set_health_status_config = 0xFF20,
ice_aqc_opc_get_supported_health_status_codes = 0xFF21,
ice_aqc_opc_get_health_status = 0xFF22,
ice_aqc_opc_clear_health_status = 0xFF23
};
#endif /* _ICE_ADMINQ_CMD_H_ */

View File

@ -242,7 +242,7 @@ ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
ice_bitmap_t mask;
u16 i;
/* Handle all but last chunk*/
/* Handle all but last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] | bmp2[i];
@ -273,7 +273,7 @@ ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
ice_bitmap_t mask;
u16 i;
/* Handle all but last chunk*/
/* Handle all but last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] ^ bmp2[i];
@ -286,6 +286,37 @@ ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask);
}
/**
* ice_andnot_bitmap - bitwise ANDNOT 2 bitmaps and result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap of ANDNOT operation
* @bmp2: The second bitmap to ANDNOT operation
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise ANDNOT on two "source" bitmaps of the same
* size, and stores the result to "dst" bitmap. The "dst" bitmap must be of the
* same size as the "source" bitmaps to avoid buffer overflows.
*/
static inline void
ice_andnot_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
const ice_bitmap_t *bmp2, u16 size)
{
ice_bitmap_t mask;
u16 i;
/* Handle all but last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] & ~bmp2[i];
/* We want to only clear bits within the size. Furthermore, we also do
* not want to modify destination bits which are beyond the specified
* size. Use a bitmask to ensure that we only modify the bits that are
* within the specified size.
*/
mask = LAST_CHUNK_MASK(size);
dst[i] = (dst[i] & ~mask) | ((bmp1[i] & ~bmp2[i]) & mask);
}
/**
* ice_find_next_bit - Find the index of the next set bit of a bitmap
* @bitmap: the bitmap to scan
@ -343,6 +374,11 @@ static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size)
return ice_find_next_bit(bitmap, size, 0);
}
#define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \
for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \
(_bitpos) < (_maxlen); \
(_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1))
/**
* ice_is_any_bit_set - Return true of any bit in the bitmap is set
* @bitmap: the bitmap to check
@ -372,6 +408,48 @@ static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size)
ICE_NONDMA_TO_NONDMA);
}
/**
* ice_bitmap_set - set a number of bits in bitmap from a starting position
* @dst: bitmap destination
* @pos: first bit position to set
* @num_bits: number of bits to set
*
* This function sets bits in a bitmap from pos to (pos + num_bits) - 1.
* Note that this function assumes it is operating on a bitmap declared using
* ice_declare_bitmap.
*/
static inline void
ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits)
{
u16 i;
for (i = pos; i < num_bits; i++)
ice_set_bit(i, dst);
}
/**
* ice_bitmap_hweight - hamming weight of bitmap
* @bm: bitmap pointer
* @size: size of bitmap (in bits)
*
* This function determines the number of set bits in a bitmap.
* Note that this function assumes it is operating on a bitmap declared using
* ice_declare_bitmap.
*/
static inline int
ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
{
int count = 0;
u16 bit = 0;
while (size > (bit = ice_find_next_bit(bm, size, bit))) {
count++;
bit++;
}
return count;
}
/**
* ice_cmp_bitmaps - compares two bitmaps.
* @bmp1: the bitmap to compare
@ -386,12 +464,12 @@ ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size)
ice_bitmap_t mask;
u16 i;
/* Handle all but last chunk*/
/* Handle all but last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
if (bmp1[i] != bmp2[i])
return false;
/* We want to only compare bits within the size.*/
/* We want to only compare bits within the size */
mask = LAST_CHUNK_MASK(size);
if ((bmp1[i] & mask) != (bmp2[i] & mask))
return false;

File diff suppressed because it is too large Load Diff

View File

@ -46,12 +46,18 @@ enum ice_fw_modes {
ICE_FW_MODE_ROLLBACK
};
/* prototype for functions used for SW locks */
void ice_free_list(struct LIST_HEAD_TYPE *list);
void ice_init_lock(struct ice_lock *lock);
void ice_acquire_lock(struct ice_lock *lock);
void ice_release_lock(struct ice_lock *lock);
void ice_destroy_lock(struct ice_lock *lock);
void *ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m, u64 size);
void ice_free_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m);
void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@ -147,7 +153,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
@ -165,9 +172,6 @@ enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
@ -186,6 +190,7 @@ bool ice_fw_supports_link_override(struct ice_hw *hw);
enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
@ -248,6 +253,7 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
struct ice_q_ctx *
@ -263,7 +269,6 @@ ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
struct ice_eth_stats *cur_stats);
enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
bool ice_is_generic_mac(struct ice_hw *hw);
enum ice_status
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
@ -276,10 +281,16 @@ ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode,
enum ice_status ice_aq_alternate_clear(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
struct ice_aqc_txsched_elem_data *buf);
enum ice_status
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
#endif /* _ICE_COMMON_H_ */

View File

@ -41,6 +41,7 @@ do { \
(qinfo)->sq.bal = prefix##_ATQBAL; \
(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
(qinfo)->rq.head = prefix##_ARQH; \
(qinfo)->rq.tail = prefix##_ARQT; \
@ -49,6 +50,7 @@ do { \
(qinfo)->rq.bal = prefix##_ARQBAL; \
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
} while (0)
@ -208,7 +210,9 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
i--;
for (; i >= 0; i--)
ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
cq->rq.r.rq_bi = NULL;
ice_free(hw, cq->rq.dma_head);
cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@ -246,7 +250,9 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
i--;
for (; i >= 0; i--)
ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
cq->sq.r.sq_bi = NULL;
ice_free(hw, cq->sq.dma_head);
cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@ -305,6 +311,24 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return ICE_SUCCESS;
}
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
/* free descriptors */ \
if ((qi)->ring.r.ring##_bi) { \
int i; \
\
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) \
ice_free_dma_mem((hw), \
&(qi)->ring.r.ring##_bi[i]); \
} \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
ice_free(hw, (qi)->ring.cmd_buf); \
/* free DMA head */ \
ice_free(hw, (qi)->ring.dma_head); \
} while (0)
/**
* ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure
@ -360,6 +384,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
@ -421,27 +446,13 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
int i; \
/* free descriptors */ \
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) \
ice_free_dma_mem((hw), \
&(qi)->ring.r.ring##_bi[i]); \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
ice_free(hw, (qi)->ring.cmd_buf); \
/* free DMA head */ \
ice_free(hw, (qi)->ring.dma_head); \
} while (0)
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@ -650,73 +661,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return ret_code;
}
/**
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
* Prior to calling this function, the driver MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
*
* NOTE: this function does not initialize the controlq locks.
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
enum ice_status status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Init FW admin queue */
status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
if (status)
return status;
status = ice_init_check_adminq(hw);
if (status)
return status;
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
* ice_init_ctrlq_locks - Initialize locks for a control queue
* @cq: pointer to the control queue
*
* Initializes the send and receive queue locks for a given control queue.
*/
static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
{
ice_init_lock(&cq->sq_lock);
ice_init_lock(&cq->rq_lock);
}
/**
* ice_create_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
*
* This function creates all the control queue locks and then calls
* ice_init_all_ctrlq. It should be called once during driver load. If the
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
}
/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
@ -764,14 +708,90 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
* Prior to calling this function, the driver MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
*
* NOTE: this function does not initialize the controlq locks.
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
enum ice_status status;
u32 retry = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Init FW admin queue */
do {
status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
if (status)
return status;
status = ice_init_check_adminq(hw);
if (status != ICE_ERR_AQ_FW_CRITICAL)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
if (status)
return status;
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
/**
* ice_init_ctrlq_locks - Initialize locks for a control queue
* @cq: pointer to the control queue
*
* Initializes the send and receive queue locks for a given control queue.
*/
static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
{
ice_init_lock(&cq->sq_lock);
ice_init_lock(&cq->rq_lock);
}
/**
* ice_create_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
* Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
*
* This function creates all the control queue locks and then calls
* ice_init_all_ctrlq. It should be called once during driver load. If the
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
}
/**
* ice_destroy_ctrlq_locks - Destroy locks for a control queue
* @cq: pointer to the control queue
*
* Destroys the send and receive queue locks for a given control queue.
*/
static void
ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
{
ice_destroy_lock(&cq->sq_lock);
ice_destroy_lock(&cq->rq_lock);
@ -813,8 +833,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
ntc++;
@ -852,8 +871,7 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
datalen = LE16_TO_CPU(cq_desc->datalen);
flags = LE16_TO_CPU(cq_desc->flags);
ice_debug(hw, ICE_DBG_AQ_DESC,
"CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(cq_desc->opcode), flags, datalen,
LE16_TO_CPU(cq_desc->retval));
ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
@ -926,8 +944,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Send queue not initialized.\n");
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
status = ICE_ERR_AQ_EMPTY;
goto sq_send_command_error;
}
@ -939,8 +956,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf) {
if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Invalid buffer size for Control Send queue: %d.\n",
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
status = ICE_ERR_INVAL_SIZE;
goto sq_send_command_error;
@ -953,8 +969,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
val = rd32(hw, cq->sq.head);
if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"head overrun at %d in the Control Send Queue ring\n",
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
status = ICE_ERR_AQ_EMPTY;
goto sq_send_command_error;
@ -972,8 +987,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* called in a separate thread in case of asynchronous completions.
*/
if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Error: Control Send Queue is full.\n");
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
status = ICE_ERR_AQ_FULL;
goto sq_send_command_error;
}
@ -1002,8 +1016,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
/* Debug desc and buffer */
ice_debug(hw, ICE_DBG_AQ_DESC,
"ATQ: Control Send queue desc and buffer:\n");
ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
@ -1029,8 +1042,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
u16 copy_size = LE16_TO_CPU(desc->datalen);
if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Return len %d > than buf len %d\n",
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
status = ICE_ERR_AQ_ERROR;
} else {
@ -1040,8 +1052,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
retval = LE16_TO_CPU(desc->retval);
if (retval) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Send Queue command 0x%04X completed with error 0x%X\n",
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
LE16_TO_CPU(desc->opcode),
retval);
@ -1054,8 +1065,7 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
cq->sq_last_status = (enum ice_aq_err)retval;
}
ice_debug(hw, ICE_DBG_AQ_MSG,
"ATQ: desc and buffer writeback:\n");
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
ice_debug_cq(hw, (void *)desc, buf, buf_size);
@ -1066,9 +1076,14 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
/* update the error if time out occurred */
if (!cmd_completed) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT;
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
status = ICE_ERR_AQ_FW_CRITICAL;
} else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
status = ICE_ERR_AQ_TIMEOUT;
}
}
sq_send_command_error:
@ -1151,8 +1166,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_acquire_lock(&cq->rq_lock);
if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Receive queue not initialized.\n");
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
ret_code = ICE_ERR_AQ_EMPTY;
goto clean_rq_elem_err;
}
@ -1174,8 +1188,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
flags = LE16_TO_CPU(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Receive Queue Event 0x%04X received with error 0x%X\n",
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
LE16_TO_CPU(desc->opcode),
cq->rq_last_status);
}
@ -1188,8 +1201,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
ice_debug_cq(hw, (void *)desc, e->msg_buf,
cq->rq_buf_size);
ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size

View File

@ -63,6 +63,8 @@ enum ice_ctl_q {
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
@ -88,6 +90,7 @@ struct ice_ctl_q_ring {
u32 bal;
u32 len_mask;
u32 len_ena_mask;
u32 len_crit_mask;
u32 head_mask;
};

View File

@ -265,39 +265,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
* @buf: pointer to the caller-supplied buffer to store the MIB block
* @buf_size: size of the buffer (in bytes)
* @cd: pointer to command details structure or NULL
*
* Set the LLDP MIB. (0x0A08)
*/
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_local_mib *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
desc.datalen = CPU_TO_LE16(buf_size);
cmd->type = mib_type;
cmd->length = CPU_TO_LE16(buf_size);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
/**
* ice_get_dcbx_status
* @hw: pointer to the HW struct
@ -761,8 +728,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
enum ice_status
ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
enum ice_status ret = ICE_SUCCESS;
@ -1140,9 +1106,9 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
return ICE_ERR_PARAM;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->local_dcbx_cfg;
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
else if (dcbx_mode == ICE_DCBX_MODE_CEE)
dcbx_cfg = &pi->desired_dcbx_cfg;
dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg;
/* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
* or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
@ -1153,7 +1119,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
goto out;
/* Get Remote DCB Config */
dcbx_cfg = &pi->remote_dcbx_cfg;
dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
@ -1182,14 +1148,14 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (ret == ICE_SUCCESS) {
/* CEE mode */
dcbx_cfg = &pi->local_dcbx_cfg;
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbx_cfg->tlv_status = LE32_TO_CPU(cee_cfg.tlv_status);
ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->local_dcbx_cfg;
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
}
@ -1206,26 +1172,26 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
*/
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
struct ice_port_info *pi = hw->port_info;
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret = ICE_SUCCESS;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
pi->is_sw_lldp = true;
qos_cfg->is_sw_lldp = true;
/* Get DCBX status */
pi->dcbx_status = ice_get_dcbx_status(hw);
qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE ||
qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi);
ret = ice_get_dcb_cfg(hw->port_info);
if (ret)
return ret;
pi->is_sw_lldp = false;
} else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
}
@ -1233,7 +1199,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
if (ret)
pi->is_sw_lldp = true;
qos_cfg->is_sw_lldp = true;
}
return ret;
@ -1248,21 +1214,21 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*/
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
struct ice_port_info *pi = hw->port_info;
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
/* Get DCBX status */
pi->dcbx_status = ice_get_dcbx_status(hw);
qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (pi->dcbx_status == ICE_DCBX_STATUS_DIS)
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
pi->is_sw_lldp = !ena_mib;
qos_cfg->is_sw_lldp = !ena_mib;
return ret;
}
@ -1559,7 +1525,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
hw = pi->hw;
/* update the HW local config */
dcbcfg = &pi->local_dcbx_cfg;
dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
/* Allocate the LLDPDU */
lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
if (!lldpmib)
@ -1618,7 +1584,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
struct ice_aqc_get_elem elem;
struct ice_aqc_txsched_elem_data elem;
enum ice_status status = ICE_SUCCESS;
u32 teid1, teid2;
u8 i, j;
@ -1660,7 +1626,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
/* new TC */
status = ice_sched_query_elem(pi->hw, teid2, &elem);
if (!status)
status = ice_sched_add_node(pi, 1, &elem.generic[0]);
status = ice_sched_add_node(pi, 1, &elem);
if (status)
break;
/* update the TC number */

View File

@ -131,17 +131,11 @@
#define ICE_IEEE_APP_TLV_LEN 11
#pragma pack(1)
/* IEEE 802.1AB LLDP TLV structure */
struct ice_lldp_generic_tlv {
__be16 typelen;
u8 tlvinfo[1];
};
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
__be32 ouisubtype;
u8 tlvinfo[1];
u8 tlvinfo[STRUCT_HACK_VAR_LEN];
};
#pragma pack()
@ -164,7 +158,7 @@ struct ice_cee_feat_tlv {
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
u8 subtype;
u8 tlvinfo[1];
u8 tlvinfo[STRUCT_HACK_VAR_LEN];
};
#pragma pack(1)
@ -219,9 +213,6 @@ ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd);
enum ice_status

View File

@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "0.26.0-k";
const char ice_driver_version[] = "0.26.16-k";
const uint8_t ice_major_version = 0;
const uint8_t ice_minor_version = 26;
const uint8_t ice_patch_version = 0;
const uint8_t ice_patch_version = 16;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 0.26.0-k")
PVID(vendor, devid, name " - 0.26.16-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.26.0-k")
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.26.16-k")
/**
* @var ice_vendor_info_array

File diff suppressed because it is too large Load Diff

View File

@ -50,9 +50,6 @@
enum ice_status
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
enum ice_status
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
enum ice_status
@ -103,9 +100,9 @@ enum ice_status
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
struct ice_prof_map *
enum ice_status
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
struct ice_prof_map *
enum ice_status
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
@ -124,8 +121,4 @@ ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
enum ice_status
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len);
#endif /* _ICE_FLEX_PIPE_H_ */

View File

@ -55,7 +55,7 @@ struct ice_fv {
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[1];
__le32 seg_offset[STRUCT_HACK_VAR_LEN];
};
/* generic segment */
@ -86,12 +86,12 @@ struct ice_device_id_entry {
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[1];
struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[1];
__le32 vers[STRUCT_HACK_VAR_LEN];
};
struct ice_buf {
@ -101,7 +101,7 @@ struct ice_buf {
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[1];
struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
};
/* global metadata specific segment */
@ -134,11 +134,12 @@ struct ice_section_entry {
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[1];
struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
(ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT0_SW 10
@ -365,17 +366,17 @@ struct ice_label {
struct ice_label_section {
__le16 count;
struct ice_label label[1];
struct ice_label label[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_label_section) - sizeof(struct ice_label), \
sizeof(struct ice_label))
ice_struct_size((struct ice_label_section *)0, label, 1) - \
sizeof(struct ice_label), sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
struct ice_fv fv[1];
struct ice_fv fv[STRUCT_HACK_VAR_LEN];
};
struct ice_sw_fv_list_entry {
@ -420,43 +421,32 @@ struct ice_boost_tcam_entry {
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
struct ice_boost_tcam_entry tcam[1];
struct ice_boost_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
sizeof(struct ice_boost_tcam_section) - \
ice_struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
#pragma pack(1)
struct ice_xlt1_section {
__le16 count;
__le16 offset;
u8 value[1];
u8 value[STRUCT_HACK_VAR_LEN];
};
#pragma pack()
#define ICE_XLT1_SIZE(n) (sizeof(struct ice_xlt1_section) + \
(sizeof(u8) * ((n) - 1)))
struct ice_xlt2_section {
__le16 count;
__le16 offset;
__le16 value[1];
__le16 value[STRUCT_HACK_VAR_LEN];
};
#define ICE_XLT2_SIZE(n) (sizeof(struct ice_xlt2_section) + \
(sizeof(u16) * ((n) - 1)))
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
u8 redir_value[1];
u8 redir_value[STRUCT_HACK_VAR_LEN];
};
#define ICE_PROF_REDIR_SIZE(n) (sizeof(struct ice_prof_redir_section) + \
(sizeof(u8) * ((n) - 1)))
/* package buffer building */
struct ice_buf_build {
@ -513,7 +503,7 @@ struct ice_tunnel_table {
struct ice_pkg_es {
__le16 count;
__le16 offset;
struct ice_fv_word es[1];
struct ice_fv_word es[STRUCT_HACK_VAR_LEN];
};
struct ice_es {
@ -664,12 +654,12 @@ struct ice_prof_tcam_entry {
u8 key[ICE_TCAM_KEY_SZ];
u8 prof_id;
};
#pragma pack()
struct ice_prof_id_section {
__le16 count;
struct ice_prof_tcam_entry entry[1];
struct ice_prof_tcam_entry entry[STRUCT_HACK_VAR_LEN];
};
#pragma pack()
struct ice_prof_tcam {
u32 sid;

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,7 @@
#define _ICE_FLOW_H_
#include "ice_flex_type.h"
#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
#define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful
#define ICE_FLOW_PROF_ID_BYPASS 0
@ -85,6 +86,10 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
* ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
*/
ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
};
enum ice_flow_field {
@ -180,6 +185,19 @@ enum ice_flow_avf_hdr_field {
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
enum ice_rss_hash_func {
ICE_RSS_HASH_TOEPLITZ = 0,
ICE_RSS_HASH_TOEPLITZ_SYMMETRIC = 1,
ICE_RSS_HASH_XOR = 2,
ICE_RSS_HASH_JHASH = 3,
};
struct ice_rss_hash_cfg {
u32 addl_hdrs;
u64 hash_flds;
enum ice_rss_hash_func hash_func;
};
enum ice_flow_dir {
ICE_FLOW_DIR_UNDEFINED = 0,
ICE_FLOW_TX = 0x01,
@ -194,9 +212,7 @@ enum ice_flow_priority {
};
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_PROFILE_MAX 1024
#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
#define ICE_FLOW_FV_EXTRACT_SZ 2
@ -234,40 +250,15 @@ struct ice_flow_fld_info {
struct ice_flow_seg_xtrct xtrct;
};
struct ice_flow_seg_fld_raw {
struct ice_flow_fld_info info;
u16 off; /* Offset from the start of the segment */
};
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
u8 raws_cnt; /* Number of raw fields to be matched */
struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
};
/* This structure describes a flow entry, and is tracked only in this file */
struct ice_flow_entry {
struct LIST_ENTRY_TYPE l_entry;
u64 id;
struct ice_flow_prof *prof;
/* Action list */
struct ice_flow_action *acts;
/* Flow entry's content */
void *entry;
enum ice_flow_priority priority;
u16 vsi_handle;
u16 entry_sz;
u8 acts_cnt;
};
#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
struct ice_flow_prof {
struct LIST_ENTRY_TYPE l_entry;
@ -275,11 +266,6 @@ struct ice_flow_prof {
u64 id;
enum ice_flow_dir dir;
u8 segs_cnt;
u8 acts_cnt;
/* Keep track of flow entries associated with this flow profile */
struct ice_lock entries_lock;
struct LIST_HEAD_TYPE entries;
struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
@ -288,12 +274,7 @@ struct ice_flow_prof {
union {
/* struct sw_recipe */
/* struct fd */
u32 data;
} cfg;
/* Default actions */
struct ice_flow_action *acts;
};
struct ice_rss_cfg {
@ -338,36 +319,14 @@ u64
ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt);
enum ice_status
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof);
enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
enum ice_status
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof);
u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
enum ice_status
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, struct ice_flow_action *acts, u8 acts_cnt,
u64 *entry_h);
enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
u64 entry_h);
void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
void
ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 prefix_loc, u8 prefix_sz);
void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status

View File

@ -5260,8 +5260,8 @@
#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
#define PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0
#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)

View File

@ -190,7 +190,9 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
#define ICE_FXD_FLTR_QW1_FDID_PRI_ZERO 0x0ULL
#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL
#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
@ -1049,7 +1051,7 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal do not write */
u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
/* LAN Tx Completion Queue data */

View File

@ -143,8 +143,8 @@ static void
ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
u64 *phy_type_high);
static int
ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low,
u64 *phy_type_high);
ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds,
u64 *phy_type_low, u64 *phy_type_high);
static int
ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low,
u64 *phy_type_high);
@ -1388,44 +1388,54 @@ ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf
int
ice_cfg_vsi_for_tx(struct ice_vsi *vsi)
{
struct ice_aqc_add_tx_qgrp qg = { 0 };
struct ice_aqc_add_tx_qgrp *qg;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
enum ice_status status;
int i, err;
u16 pf_q;
int i;
int err = 0;
u16 qg_size, pf_q;
qg.num_txqs = 1;
qg_size = ice_struct_size(qg, txqs, 1);
qg = (struct ice_aqc_add_tx_qgrp *)malloc(qg_size, M_ICE, M_NOWAIT|M_ZERO);
if (!qg)
return (ENOMEM);
qg->num_txqs = 1;
for (i = 0; i < vsi->num_tx_queues; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_tx_queue *txq = &vsi->tx_queues[i];
pf_q = vsi->tx_qmap[txq->me];
qg.txqs[0].txq_id = htole16(pf_q);
qg->txqs[0].txq_id = htole16(pf_q);
err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q);
if (err)
return err;
goto free_txqg;
ice_set_ctx((u8 *)&tlan_ctx, qg.txqs[0].txq_ctx,
ice_set_ctx(hw, (u8 *)&tlan_ctx, qg->txqs[0].txq_ctx,
ice_tlan_ctx_info);
status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0,
i, 1, &qg, sizeof(qg), NULL);
i, 1, qg, qg_size, NULL);
if (status) {
device_printf(dev,
"Failed to set LAN Tx queue context, err %s aq_err %s\n",
ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
return (ENODEV);
err = ENODEV;
goto free_txqg;
}
/* Keep track of the Tx queue TEID */
if (pf_q == le16toh(qg.txqs[0].txq_id))
txq->q_teid = le32toh(qg.txqs[0].q_teid);
if (pf_q == le16toh(qg->txqs[0].txq_id))
txq->q_teid = le32toh(qg->txqs[0].q_teid);
}
return (0);
free_txqg:
free(qg, M_ICE);
return (err);
}
/**
@ -2343,6 +2353,10 @@ ice_update_pf_stats(struct ice_softc *sc)
ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast);
ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast);
ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast);
/* This stat register doesn't have an lport */
ice_stat_update32(hw, PRTRPB_RDPC,
sc->stats.offsets_loaded,
&prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards);
ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down);
ICE_PF_STAT40(GLPRT_PRC64, rx_size_64);
@ -2808,6 +2822,7 @@ ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
/**
* ice_intersect_media_types_with_caps - Restrict input AQ PHY flags
* @sc: driver private structure
* @sysctl_speeds: current SW configuration of PHY types
* @phy_type_low: input/output flag set for low PHY types
* @phy_type_high: input/output flag set for high PHY types
*
@ -2819,34 +2834,101 @@ ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
* mode
*/
static int
ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low,
u64 *phy_type_high)
ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds,
u64 *phy_type_low, u64 *phy_type_high)
{
struct ice_aqc_get_phy_caps_data pcaps = { 0 };
struct ice_port_info *pi = sc->hw.port_info;
device_t dev = sc->dev;
enum ice_status status;
u64 temp_phy_low, temp_phy_high;
u64 final_phy_low, final_phy_high;
u16 topo_speeds;
u64 new_phy_low, new_phy_high;
status = ice_get_phy_types(sc, &new_phy_low, &new_phy_high);
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
&pcaps, NULL);
if (status != ICE_SUCCESS) {
/* Function already prints appropriate error message */
device_printf(dev,
"%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
ice_aq_str(sc->hw.adminq.sq_last_status));
return (EIO);
}
ice_apply_supported_speed_filter(&new_phy_low, &new_phy_high);
final_phy_low = le64toh(pcaps.phy_type_low);
final_phy_high = le64toh(pcaps.phy_type_high);
new_phy_low &= *phy_type_low;
new_phy_high &= *phy_type_high;
topo_speeds = ice_aq_phy_types_to_sysctl_speeds(final_phy_low,
final_phy_high);
if (new_phy_low == 0 && new_phy_high == 0) {
/*
* If the user specifies a subset of speeds the media is already
* capable of supporting, then we're good to go.
*/
if ((sysctl_speeds & topo_speeds) == sysctl_speeds)
goto intersect_final;
temp_phy_low = final_phy_low;
temp_phy_high = final_phy_high;
/*
* Otherwise, we'll have to use the superset if Lenient Mode is
* supported.
*/
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) {
/*
* Start with masks that _don't_ include the PHY types
* discovered by the TOPO_CAP.
*/
ice_sysctl_speeds_to_aq_phy_types(topo_speeds, &final_phy_low,
&final_phy_high);
final_phy_low = ~final_phy_low;
final_phy_high = ~final_phy_high;
/* Get the PHY types the NVM says we can support */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP,
&pcaps, NULL);
if (status != ICE_SUCCESS) {
device_printf(dev,
"%s: ice_aq_get_phy_caps (NVM_CAP) failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
ice_aq_str(sc->hw.adminq.sq_last_status));
return (status);
}
/*
* Clear out the unsupported PHY types, including those
* from TOPO_CAP.
*/
final_phy_low &= le64toh(pcaps.phy_type_low);
final_phy_high &= le64toh(pcaps.phy_type_high);
/*
* Include PHY types from TOPO_CAP (which may be a subset
* of the types the NVM specifies).
*/
final_phy_low |= temp_phy_low;
final_phy_high |= temp_phy_high;
}
intersect_final:
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE))
ice_apply_supported_speed_filter(&final_phy_low, &final_phy_high);
ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &temp_phy_low,
&temp_phy_high);
final_phy_low &= temp_phy_low;
final_phy_high &= temp_phy_high;
if (final_phy_low == 0 && final_phy_high == 0) {
device_printf(dev,
"The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
return (EINVAL);
}
/* Overwrite input phy_type values and return */
*phy_type_low = new_phy_low;
*phy_type_high = new_phy_high;
*phy_type_low = final_phy_low;
*phy_type_high = final_phy_high;
return (0);
}
@ -2960,8 +3042,8 @@ ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS)
/* Function already prints appropriate error message */
return (error);
} else {
ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &phy_low, &phy_high);
error = ice_intersect_media_types_with_caps(sc, &phy_low, &phy_high);
error = ice_intersect_media_types_with_caps(sc, sysctl_speeds,
&phy_low, &phy_high);
if (error)
/* Function already prints appropriate error message */
return (error);
@ -2976,7 +3058,7 @@ ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS)
cfg.phy_type_low = phy_low;
cfg.phy_type_high = phy_high;
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
if (status != ICE_SUCCESS) {
@ -3752,9 +3834,10 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
return (EIO);
}
ice_aq_set_dcb_parameters(hw, true, NULL);
hw->port_info->is_sw_lldp = true;
hw->port_info->qos_cfg.is_sw_lldp = true;
ice_add_rx_lldp_filter(sc);
} else {
ice_del_rx_lldp_filter(sc);
retry_start_lldp:
status = ice_aq_start_lldp(hw, true, NULL);
if (status) {
@ -3778,8 +3861,7 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
return (EIO);
}
}
hw->port_info->is_sw_lldp = false;
ice_del_rx_lldp_filter(sc);
hw->port_info->qos_cfg.is_sw_lldp = false;
}
return (error);
@ -3977,8 +4059,9 @@ struct ice_sysctl_info {
* Adds statistics sysctls for the ethernet statistics of the MAC or a VSI.
* Will add them under the parent node specified.
*
* Note that rx_discards and tx_errors are only meaningful for VSIs and not
* the global MAC/PF statistics, so they are not included here.
* Note that tx_errors is only meaningful for VSIs and not the global MAC/PF
* statistics, so it is not included here. Similarly, rx_discards has different
* descriptions for VSIs and MAC/PF stats, so it is also not included here.
*/
void
ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
@ -4195,7 +4278,7 @@ ice_add_vsi_sysctls(struct ice_vsi *vsi)
SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards",
CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards,
0, "Discarded Rx Packets");
0, "Discarded Rx Packets (see rx_errors or rx_no_desc)");
SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors",
CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors,
@ -4256,6 +4339,8 @@ ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
{&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"},
{&stats->eth.rx_discards, "rx_discards",
"Discarded Rx Packets by Port (shortage of storage space)"},
/* Packet Transmission Stats */
{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
@ -6652,7 +6737,7 @@ ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status)
break;
}
reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x14;
reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x13;
switch (reg) {
case ice_pcie_speed_2_5GT:
@ -6845,9 +6930,9 @@ ice_init_dcb_setup(struct ice_softc *sc)
return;
}
hw->port_info->dcbx_status = ice_get_dcbx_status(hw);
if (hw->port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
hw->port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw);
if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE &&
hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
/*
* Start DCBX agent, but not LLDP. The return value isn't
* checked here because a more detailed dcbx agent status is
@ -6856,7 +6941,7 @@ ice_init_dcb_setup(struct ice_softc *sc)
ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL);
}
/* This sets hw->port_info->is_sw_lldp */
/* This sets hw->port_info->qos_cfg.is_sw_lldp */
status = ice_init_dcb(hw, true);
/* If there is an error, then FW LLDP is not in a usable state */
@ -6871,10 +6956,10 @@ ice_init_dcb_setup(struct ice_softc *sc)
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}
hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
}
switch (hw->port_info->dcbx_status) {
switch (hw->port_info->qos_cfg.dcbx_status) {
case ICE_DCBX_STATUS_DIS:
ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n");
break;
@ -6889,11 +6974,9 @@ ice_init_dcb_setup(struct ice_softc *sc)
}
/* LLDP disabled in FW */
if (hw->port_info->is_sw_lldp) {
if (hw->port_info->qos_cfg.is_sw_lldp) {
ice_add_rx_lldp_filter(sc);
device_printf(dev, "Firmware LLDP agent disabled\n");
} else {
ice_del_rx_lldp_filter(sc);
}
}
@ -7117,6 +7200,25 @@ ice_add_rx_lldp_filter(struct ice_softc *sc)
device_t dev = sc->dev;
enum ice_status status;
int err;
u16 vsi_num;
/*
* If FW is new enough, use a direct AQ command to perform the filter
* addition.
*/
if (ice_fw_supports_lldp_fltr_ctrl(hw)) {
vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
status = ice_lldp_fltr_add_remove(hw, vsi_num, true);
if (status) {
device_printf(dev,
"Failed to add Rx LLDP filter, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
} else
ice_set_state(&sc->state,
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER);
return;
}
INIT_LIST_HEAD(&ethertype_list);
@ -7132,13 +7234,17 @@ ice_add_rx_lldp_filter(struct ice_softc *sc)
}
status = ice_add_eth_mac(hw, &ethertype_list);
if (status == ICE_ERR_ALREADY_EXISTS) {
; /* Don't complain if we try to add a filter that already exists */
} else if (status) {
if (status && status != ICE_ERR_ALREADY_EXISTS) {
device_printf(dev,
"Failed to add Rx LLDP filter, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
} else {
/*
* If status == ICE_ERR_ALREADY_EXISTS, we won't treat an
* already existing filter as an error case.
*/
ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER);
}
free_ethertype_list:
@ -7162,6 +7268,31 @@ ice_del_rx_lldp_filter(struct ice_softc *sc)
device_t dev = sc->dev;
enum ice_status status;
int err;
u16 vsi_num;
/*
* Only in the scenario where the driver added the filter during
* this session (while the driver was loaded) would we be able to
* delete this filter.
*/
if (!ice_test_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER))
return;
/*
* If FW is new enough, use a direct AQ command to perform the filter
* removal.
*/
if (ice_fw_supports_lldp_fltr_ctrl(hw)) {
vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
status = ice_lldp_fltr_add_remove(hw, vsi_num, false);
if (status) {
device_printf(dev,
"Failed to remove Rx LLDP filter, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}
return;
}
INIT_LIST_HEAD(&ethertype_list);
@ -7693,7 +7824,6 @@ ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u1
struct ice_hw *hw = &sc->hw;
int error = 0, retries = 0;
enum ice_status status;
u16 lport;
if (length > 16)
return (EINVAL);
@ -7704,11 +7834,8 @@ ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u1
if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA))
return (ENXIO);
/* Set bit to indicate lport value is valid */
lport = hw->port_info->lport | (0x1 << 8);
do {
status = ice_aq_sff_eeprom(hw, lport, dev_addr,
status = ice_aq_sff_eeprom(hw, 0, dev_addr,
offset, 0, 0, data, length,
false, NULL);
if (!status) {
@ -7998,3 +8125,50 @@ ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high)
return (ICE_SUCCESS);
}
/**
* ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings
* @sc: device softc structure
*
* This function needs to be called after link up; it makes sure the FW
* has certain PFC/DCB settings. This is intended to workaround a FW behavior
* where these settings seem to be cleared on link up.
*/
void
ice_set_default_local_lldp_mib(struct ice_softc *sc)
{
struct ice_dcbx_cfg *dcbcfg;
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
enum ice_status status;
pi = hw->port_info;
dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
/* This value is only 3 bits; 8 TCs maps to 0 */
u8 maxtcs = hw->func_caps.common_cap.maxtc & ICE_IEEE_ETS_MAXTC_M;
/**
* Setup the default settings used by the driver for the Set Local
* LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no
* PFC).
*/
memset(dcbcfg, 0, sizeof(*dcbcfg));
dcbcfg->etscfg.willing = 1;
dcbcfg->etscfg.tcbwtable[0] = 100;
dcbcfg->etscfg.maxtcs = maxtcs;
dcbcfg->etsrec.willing = 1;
dcbcfg->etsrec.tcbwtable[0] = 100;
dcbcfg->etsrec.maxtcs = maxtcs;
dcbcfg->pfc.willing = 1;
dcbcfg->pfc.pfccap = maxtcs;
status = ice_set_dcb_cfg(pi);
if (status)
device_printf(dev,
"Error setting Local LLDP MIB: %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}

View File

@ -527,6 +527,7 @@ enum ice_state {
ICE_STATE_LINK_STATUS_REPORTED,
ICE_STATE_DETACHING,
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
/* This entry must be last */
ICE_STATE_LAST,
};
@ -807,5 +808,6 @@ int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req);
int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length);
int ice_alloc_intr_tracking(struct ice_softc *sc);
void ice_free_intr_tracking(struct ice_softc *sc);
void ice_set_default_local_lldp_mib(struct ice_softc *sc);
#endif /* _ICE_LIB_H_ */

View File

@ -86,7 +86,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
*
* Reads a portion of the NVM, as a flat memory space. This function correctly
* breaks read requests across Shadow RAM sectors and ensures that no single
* read request exceeds the maximum 4Kb read for a single AdminQ command.
* read request exceeds the maximum 4KB read for a single AdminQ command.
*
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
@ -106,18 +106,17 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM,
"NVM error: requested data is beyond Shadow RAM limit\n");
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
do {
u32 read_size, sector_offset;
/* ice_aq_read_nvm cannot read more than 4Kb at a time.
/* ice_aq_read_nvm cannot read more than 4KB at a time.
* Additionally, a read from the Shadow RAM may not cross over
* a sector boundary. Conveniently, the sector size is also
* 4Kb.
* 4KB.
*/
sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
@ -157,7 +156,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*
* Update the NVM using the admin queue commands (0x0703)
*/
static enum ice_status
enum ice_status
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@ -295,15 +294,13 @@ static enum ice_status
ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
{
if ((offset + words) > hw->nvm.sr_words) {
ice_debug(hw, ICE_DBG_NVM,
"NVM error: offset beyond SR lmt.\n");
ice_debug(hw, ICE_DBG_NVM, "NVM error: offset beyond SR lmt.\n");
return ICE_ERR_PARAM;
}
if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
/* We can access only up to 4KB (one sector), in one AQ write */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: tried to access %d words, limit is %d.\n",
ice_debug(hw, ICE_DBG_NVM, "NVM error: tried to access %d words, limit is %d.\n",
words, ICE_SR_SECTOR_SIZE_IN_WORDS);
return ICE_ERR_PARAM;
}
@ -311,8 +308,7 @@ ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
(offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
/* A single access cannot spread over two sectors */
ice_debug(hw, ICE_DBG_NVM,
"NVM error: cannot spread over two sectors.\n");
ice_debug(hw, ICE_DBG_NVM, "NVM error: cannot spread over two sectors.\n");
return ICE_ERR_PARAM;
}
@ -327,8 +323,7 @@ ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
enum ice_status
ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
enum ice_status status;
@ -341,7 +336,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
* boundary
*/
status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
(u8 *)&data_local, true);
(_FORCE_ u8 *)&data_local, true);
if (status)
return status;
@ -393,7 +388,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* ice_read_flat_nvm takes into account the 4Kb AdminQ and Shadow RAM
/* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM
* sector restrictions necessary when reading from the NVM.
*/
status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
@ -569,16 +564,14 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
*/
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT,
"Buffer too small for PBA data.\n");
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
return ICE_ERR_PARAM;
}
for (i = 0; i < pba_size; i++) {
status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
if (status != ICE_SUCCESS) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read PBA Block word %d.\n", i);
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
return status;
}
@ -607,8 +600,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read Boot Configuration Block TLV.\n");
ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
return status;
}
@ -616,8 +608,7 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
* (Combo Image Version High and Combo Image Version Low)
*/
if (boot_cfg_tlv_len < 2) {
ice_debug(hw, ICE_DBG_INIT,
"Invalid Boot Configuration Block TLV size.\n");
ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
return ICE_ERR_INVAL_SIZE;
}
@ -646,6 +637,87 @@ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
return ICE_SUCCESS;
}
/**
* ice_get_netlist_ver_info
* @hw: pointer to the HW struct
*
* Get the netlist version information
*/
enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
{
struct ice_netlist_ver_info *ver = &hw->netlist_ver;
enum ice_status ret;
u32 id_blk_start;
__le16 raw_data;
u16 data, i;
u16 *buff;
ret = ice_acquire_nvm(hw, ICE_RES_READ);
if (ret)
return ret;
buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
sizeof(*buff));
if (!buff) {
ret = ICE_ERR_NO_MEMORY;
goto exit_no_mem;
}
/* read module length */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
false, false, NULL);
if (ret)
goto exit_error;
data = LE16_TO_CPU(raw_data);
/* exit if length is = 0 */
if (!data)
goto exit_error;
/* read node count */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
false, false, NULL);
if (ret)
goto exit_error;
data = LE16_TO_CPU(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
/* netlist ID block starts from offset 4 + node count * 2 */
id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
/* read the entire netlist ID block */
ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
id_blk_start * 2,
ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
false, NULL);
if (ret)
goto exit_error;
for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
/* Read the left most 4 bytes of SHA */
ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
exit_error:
ice_free(hw, buff);
exit_no_mem:
ice_release_nvm(hw);
return ret;
}
/**
* ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct
@ -673,14 +745,12 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == ICE_ERR_AQ_ERROR &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM,
"%s: New upper bound of %u bytes\n",
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
status = ICE_SUCCESS;
max_size = offset;
} else if (!status) {
ice_debug(hw, ICE_DBG_NVM,
"%s: New lower bound of %u bytes\n",
ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
__func__, offset);
min_size = offset;
} else {
@ -689,8 +759,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
}
}
ice_debug(hw, ICE_DBG_NVM,
"Predicted flash size is %u bytes\n", max_size);
ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
hw->nvm.flash_size = max_size;
@ -733,15 +802,13 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
} else {
/* Blank programming mode */
nvm->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: unsupported blank mode.\n");
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE;
}
status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
ice_debug(hw, ICE_DBG_INIT,
"Failed to read DEV starter version.\n");
ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n");
return status;
}
nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
@ -762,32 +829,10 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
status = ice_discover_flash_size(hw);
if (status) {
ice_debug(hw, ICE_DBG_NVM,
"NVM init error: failed to discover flash size.\n");
ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
return status;
}
switch (hw->device_id) {
/* the following devices do not have boot_cfg_tlv yet */
case ICE_DEV_ID_E822C_BACKPLANE:
case ICE_DEV_ID_E822C_QSFP:
case ICE_DEV_ID_E822C_10G_BASE_T:
case ICE_DEV_ID_E822C_SGMII:
case ICE_DEV_ID_E822C_SFP:
case ICE_DEV_ID_E822L_BACKPLANE:
case ICE_DEV_ID_E822L_SFP:
case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_BACKPLANE:
case ICE_DEV_ID_E823L_SFP:
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823L_QSFP:
return status;
default:
break;
}
status = ice_get_orom_ver_info(hw);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
@ -943,12 +988,12 @@ static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
if (i == ICE_SR_SW_CHECKSUM_WORD)
continue;
/* Skip VPD module (convert byte size to word count) */
if ((i >= (u32)vpd_module) &&
(i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS)))
if (i >= (u32)vpd_module &&
i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS))
continue;
/* Skip PCIe ALT module (convert byte size to word count) */
if ((i >= (u32)pcie_alt_module) &&
(i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS)))
if (i >= (u32)pcie_alt_module &&
i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS))
continue;
checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS];
@ -1072,7 +1117,7 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
{
/* The provided data_size must be at least as large as our NVM
* features structure. A larger size should not be treated as an
* error, to allow future extensions to to the features structure to
* error, to allow future extensions to the features structure to
* work on older drivers.
*/
if (cmd->data_size < sizeof(struct ice_nvm_features))
@ -1239,8 +1284,7 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
break;
}
ice_debug(hw, ICE_DBG_NVM,
"NVM access: writing register %08x with value %08x\n",
ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n",
cmd->offset, data->regval);
/* Write the data field to the specified register */

View File

@ -135,9 +135,16 @@ ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
enum ice_status
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd);
enum ice_status
ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
u16 elem_count, struct ice_sq_cd *cd);
enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
#endif /* _ICE_NVM_H_ */

View File

@ -79,8 +79,10 @@ enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN_GPE,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE, /* GENEVE matches only non-VLAN pkts */
ICE_SW_TUN_GENEVE_VLAN, /* GENEVE matches both VLAN and non-VLAN pkts */
ICE_SW_TUN_VXLAN, /* VXLAN matches only non-VLAN pkts */
ICE_SW_TUN_VXLAN_VLAN, /* VXLAN matches both VLAN and non-VLAN pkts */
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
* and GENEVE
@ -160,7 +162,9 @@ enum ice_prot_id {
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_TUN_FLAG_VLAN_MASK 0x01
#define ICE_TUN_FLAG_FV_IND 2
#define ICE_PROTOCOL_MAX_ENTRIES 16

View File

@ -158,7 +158,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*/
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
@ -178,8 +178,8 @@ enum ice_status
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
struct ice_aqc_get_elem elem;
struct ice_sched_node *node;
enum ice_status status;
struct ice_hw *hw;
@ -193,13 +193,12 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
parent = ice_sched_find_node_by_teid(pi->root,
LE32_TO_CPU(info->parent_teid));
if (!parent) {
ice_debug(hw, ICE_DBG_SCHED,
"Parent Node not found for parent_teid=0x%x\n",
ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
LE32_TO_CPU(info->parent_teid));
return ICE_ERR_PARAM;
}
/* query the current node information from FW before additing it
/* query the current node information from FW before adding it
* to the SW DB
*/
status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
@ -222,7 +221,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->parent = parent;
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
node->info = elem.generic[0];
node->info = elem;
return ICE_SUCCESS;
}
@ -265,7 +264,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
enum ice_status status;
u16 buf_size;
buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
buf_size = ice_struct_size(buf, teid, num_nodes);
buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
@ -450,7 +449,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*/
static enum ice_status
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_conf_elem *buf, u16 buf_size,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
@ -491,8 +490,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
* Suspend scheduling elements (0x0409)
*/
static enum ice_status
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf,
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
@ -512,8 +510,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
* resume scheduling elements (0x040A)
*/
static enum ice_status
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_suspend_resume_elem *buf,
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
@ -554,18 +551,17 @@ static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
struct ice_aqc_suspend_resume_elem *buf;
u16 i, buf_size, num_elem_ret = 0;
enum ice_status status;
__le32 *buf;
buf_size = sizeof(*buf) * num_nodes;
buf = (struct ice_aqc_suspend_resume_elem *)
ice_malloc(hw, buf_size);
buf = (__le32 *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
for (i = 0; i < num_nodes; i++)
buf->teid[i] = CPU_TO_LE32(node_teids[i]);
buf[i] = CPU_TO_LE32(node_teids[i]);
if (suspend)
status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
@ -627,18 +623,18 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
* @opcode:opcode for add, query, or remove profile(s)
* @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @num_processed: number of processed add or remove profile(s) to return
* @cd: pointer to command details structure
*
* Rl profile function to add, query, or remove profile(s)
* RL profile function to add, query, or remove profile(s)
*/
static enum ice_status
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
@ -669,13 +665,11 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*/
static enum ice_status
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_generic_elem *buf,
u16 buf_size, u16 *num_profiles_added,
struct ice_sq_cd *cd)
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_added, struct ice_sq_cd *cd)
{
return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
num_profiles, buf,
buf_size, num_profiles_added, cd);
return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
buf, buf_size, num_profiles_added, cd);
}
/**
@ -690,8 +684,8 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*/
enum ice_status
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_generic_elem *buf,
u16 buf_size, struct ice_sq_cd *cd)
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
num_profiles, buf, buf_size, NULL, cd);
@ -710,13 +704,12 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
*/
static enum ice_status
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_generic_elem *buf,
u16 buf_size, u16 *num_profiles_removed,
struct ice_sq_cd *cd)
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_removed, struct ice_sq_cd *cd)
{
return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
num_profiles, buf,
buf_size, num_profiles_removed, cd);
num_profiles, buf, buf_size,
num_profiles_removed, cd);
}
/**
@ -732,7 +725,7 @@ static enum ice_status
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
struct ice_aqc_rl_profile_generic_elem *buf;
struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
enum ice_status status;
u16 num_profiles = 1;
@ -741,8 +734,7 @@ ice_sched_del_rl_profile(struct ice_hw *hw,
return ICE_ERR_IN_USE;
/* Safe to remove profile ID */
buf = (struct ice_aqc_rl_profile_generic_elem *)
&rl_info->profile;
buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
@ -777,8 +769,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
if (status) {
ice_debug(hw, ICE_DBG_SCHED,
"Remove rl profile failed\n");
ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
/* On error, free mem required */
LIST_DEL(&rl_prof_elem->list_entry);
ice_free(hw, rl_prof_elem);
@ -817,7 +808,7 @@ void ice_sched_clear_agg(struct ice_hw *hw)
}
/**
* ice_sched_clear_tx_topo - clears the schduler tree nodes
* ice_sched_clear_tx_topo - clears the scheduler tree nodes
* @pi: port information structure
*
* This function removes all the nodes from HW as well as from SW DB.
@ -888,7 +879,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
*/
enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
struct ice_aqc_cfg_l2_node_cgd_data *buf,
struct ice_aqc_cfg_l2_node_cgd_elem *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_l2_node_cgd *cmd;
@ -927,7 +918,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 buf_size;
u32 teid;
buf_size = ice_struct_size(buf, generic, num_nodes - 1);
buf_size = ice_struct_size(buf, generic, num_nodes);
buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
@ -965,8 +956,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
for (i = 0; i < num_nodes; i++) {
status = ice_sched_add_node(pi, layer, &buf->generic[i]);
if (status != ICE_SUCCESS) {
ice_debug(hw, ICE_DBG_SCHED,
"add nodes in SW DB failed status =%d\n",
ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
break;
}
@ -974,8 +964,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
teid = LE32_TO_CPU(buf->generic[i].node_teid);
new_node = ice_sched_find_node_by_teid(parent, teid);
if (!new_node) {
ice_debug(hw, ICE_DBG_SCHED,
"Node is missing for teid =%d\n", teid);
ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
break;
}
@ -1332,8 +1321,7 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
ice_release_lock(&pi->sched_lock);
if (!node)
ice_debug(pi->hw, ICE_DBG_SCHED,
"Node not found for teid=0x%x\n", teid);
ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid);
return node;
}
@ -1469,6 +1457,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
return false;
}
/**
* ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
* @pi: port information structure
* @vsi_node: software VSI handle
* @qgrp_node: first queue group node identified for scanning
* @owner: LAN or RDMA
*
* This function retrieves a free LAN or RDMA queue group node by scanning
* qgrp_node and its siblings for the queue group with the fewest number
* of queues currently assigned.
*/
static struct ice_sched_node *
ice_sched_get_free_qgrp(struct ice_port_info *pi,
struct ice_sched_node *vsi_node,
struct ice_sched_node *qgrp_node, u8 owner)
{
struct ice_sched_node *min_qgrp;
u8 min_children;
if (!qgrp_node)
return qgrp_node;
min_children = qgrp_node->num_children;
if (!min_children)
return qgrp_node;
min_qgrp = qgrp_node;
/* scan all queue groups until find a node which has less than the
* minimum number of children. This way all queue group nodes get
* equal number of shares and active. The bandwidth will be equally
* distributed across all queues.
*/
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < min_children &&
qgrp_node->owner == owner) {
/* replace the new min queue group node */
min_qgrp = qgrp_node;
min_children = min_qgrp->num_children;
/* break if it has no children, */
if (!min_children)
break;
}
qgrp_node = qgrp_node->sibling;
}
return min_qgrp;
}
/**
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
@ -1482,7 +1517,7 @@ struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
struct ice_sched_node *vsi_node, *qgrp_node = NULL;
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
@ -1496,7 +1531,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI ID */
if (!vsi_node)
goto lan_q_exit;
return NULL;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
@ -1509,8 +1544,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
qgrp_node = qgrp_node->sibling;
}
lan_q_exit:
return qgrp_node;
/* Select the best queue group */
return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
}
/**
@ -1583,7 +1618,7 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*/
static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
{
struct ice_aqc_get_elem buf;
struct ice_aqc_txsched_elem_data buf;
enum ice_status status;
u32 node_teid;
@ -1592,7 +1627,7 @@ static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
if (status != ICE_SUCCESS)
return false;
if (memcmp(buf.generic, &node->info, sizeof(*buf.generic))) {
if (memcmp(&buf, &node->info, sizeof(buf))) {
ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n",
node_teid);
return false;
@ -1960,8 +1995,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* This function removes single aggregator VSI info entry from
* aggregator list.
*/
static void
ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
@ -2037,8 +2071,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
continue;
if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED,
"VSI has leaf nodes in TC %d\n", i);
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
status = ICE_ERR_IN_USE;
goto exit_sched_rm_vsi_cfg;
}
@ -2122,7 +2155,7 @@ bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
*/
enum ice_status
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf, u16 buf_size,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_node_to_root *cmd;
@ -2142,7 +2175,7 @@ ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
* This function validates aggregator ID. The function returns info if
* aggregator ID is present in list otherwise it returns null.
*/
static struct ice_sched_agg_info*
static struct ice_sched_agg_info *
ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
{
struct ice_sched_agg_info *agg_info;
@ -2249,6 +2282,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
u16 buf_len;
hw = pi->hw;
@ -2256,11 +2290,12 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
return ICE_ERR_PARAM;
/* Does parent have enough space */
if (parent->num_children + num_items >=
if (parent->num_children + num_items >
hw->max_children[parent->tx_sched_layer])
return ICE_ERR_AQ_FULL;
buf = (struct ice_aqc_move_elem *)ice_malloc(hw, sizeof(*buf));
buf_len = ice_struct_size(buf, teid, 1);
buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@ -2275,7 +2310,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = CPU_TO_LE16(1);
status = ice_aq_move_sched_elems(hw, 1, buf, sizeof(*buf),
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
status = ICE_ERR_CFG;
@ -2324,6 +2359,10 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
if (!vsi_node)
return ICE_ERR_DOES_NOT_EXIST;
/* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
return ICE_SUCCESS;
aggl = ice_sched_get_agg_layer(pi->hw);
vsil = ice_sched_get_vsi_layer(pi->hw);
@ -2725,7 +2764,7 @@ ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
* The function returns aggregator VSI info based on VSI handle. This function
* needs to be called with scheduler lock held.
*/
static struct ice_sched_agg_vsi_info*
static struct ice_sched_agg_vsi_info *
ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
@ -2747,7 +2786,7 @@ ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
* VSI has in this case a different aggregator than the default one. This
* function needs to be called with scheduler lock held.
*/
static struct ice_sched_agg_info*
static struct ice_sched_agg_info *
ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
@ -2874,8 +2913,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
&pi->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
ice_debug(pi->hw, ICE_DBG_SCHED,
"Removed rl profile\n");
ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
}
}
}
@ -2886,7 +2924,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
* @node: pointer to node
* @info: node info to update
*
* It updates the HW DB, and local SW DB of node. It updates the scheduling
* Update the HW DB, and local SW DB of node. Update the scheduling
* parameters of node from argument info data buffer (Info->data buf) and
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
@ -2895,18 +2933,18 @@ static enum ice_status
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_conf_elem buf;
struct ice_aqc_txsched_elem_data buf;
enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
buf.generic[0] = *info;
buf = *info;
/* Parent TEID is reserved field in this aq call */
buf.generic[0].parent_teid = 0;
buf.parent_teid = 0;
/* Element type is reserved field in this aq call */
buf.generic[0].data.elem_type = 0;
buf.data.elem_type = 0;
/* Flags is reserved field in this aq call */
buf.generic[0].data.flags = 0;
buf.data.flags = 0;
/* Update HW DB */
/* Configure element node */
@ -3107,8 +3145,7 @@ ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
*/
static void
ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
@ -3127,8 +3164,7 @@ ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
*/
static void
ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
@ -3153,8 +3189,7 @@ ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save or clear shared bandwidth (BW) in the passed param bw_t_info.
*/
static void
ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
@ -3216,8 +3251,7 @@ ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* Save or clear priority (prio) in the passed param bw_t_info.
*/
static void
ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
{
bw_t_info->generic = prio;
if (bw_t_info->generic)
@ -3499,7 +3533,6 @@ ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
* @pi: port information structure
* @num_qs: number of VSI queues
* @q_ids: queue IDs array
* @q_ids: queue IDs array
* @q_prio: queue priority array
*
* This function configures the queue node priority (Sibling Priority) of the
@ -3862,9 +3895,9 @@ static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_port_info *pi,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_generic_elem *buf;
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
@ -3890,8 +3923,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
hw = pi->hw;
LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if (rl_prof_elem->profile.flags == profile_type &&
rl_prof_elem->bw == bw)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type && rl_prof_elem->bw == bw)
/* Return existing profile ID info */
return rl_prof_elem;
@ -3913,8 +3946,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size);
/* Create new entry in HW DB */
buf = (struct ice_aqc_rl_profile_generic_elem *)
&rl_prof_elem->profile;
buf = &rl_prof_elem->profile;
status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&profiles_added, NULL);
if (status || profiles_added != num_profiles)
@ -4122,7 +4154,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Check the existing list for RL profile */
LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if (rl_prof_elem->profile.flags == profile_type &&
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type &&
LE16_TO_CPU(rl_prof_elem->profile.profile_id) ==
profile_id) {
if (rl_prof_elem->prof_id_ref)
@ -4131,8 +4164,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
if (status && status != ICE_ERR_IN_USE)
ice_debug(pi->hw, ICE_DBG_SCHED,
"Remove rl profile failed\n");
ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
if (status == ICE_ERR_IN_USE)
@ -4284,8 +4316,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
return ICE_SUCCESS;
return ice_sched_rm_rl_profile(pi, layer_num,
rl_prof_info->profile.flags,
old_id);
rl_prof_info->profile.flags &
ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
@ -5156,7 +5188,7 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
return ICE_SUCCESS;
}
/*
/**
* ice_sched_replay_node_prio - re-configure node priority
* @hw: pointer to the HW struct
* @node: sched node to configure
@ -5320,7 +5352,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)
ice_acquire_lock(&pi->sched_lock);
LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
list_entry) {
list_entry)
/* replay aggregator (re-create aggregator node) */
if (!ice_cmp_bitmap(agg_info->tc_bitmap,
agg_info->replay_tc_bitmap,
@ -5349,7 +5381,6 @@ void ice_sched_replay_agg(struct ice_hw *hw)
ice_info(hw, "Replay agg bw [id=%d] failed\n",
agg_info->agg_id);
}
}
ice_release_lock(&pi->sched_lock);
}
@ -5378,14 +5409,33 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
ice_release_lock(&pi->sched_lock);
}
/**
* ice_sched_replay_root_node_bw - replay root node BW
* @pi: port information structure
*
* Replay root node BW settings.
*/
enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
{
enum ice_status status = ICE_SUCCESS;
if (!pi->hw)
return ICE_ERR_PARAM;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_replay_node_bw(pi->hw, pi->root,
&pi->root_node_bw_t_info);
ice_release_lock(&pi->sched_lock);
return status;
}
/**
* ice_sched_replay_tc_node_bw - replay TC node(s) BW
* @pi: port information structure
*
* This function replay TC nodes.
*/
enum ice_status
ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
{
enum ice_status status = ICE_SUCCESS;
u8 tc;
@ -5508,8 +5558,7 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
enum ice_status
ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
enum ice_status status;

View File

@ -103,15 +103,15 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_generic_elem *buf,
u16 buf_size, struct ice_sq_cd *cd);
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_data *buf, u16 buf_size,
struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
@ -145,7 +145,7 @@ ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node);
enum ice_status
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf, u16 buf_size,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd);
/* Tx scheduler rate limiter functions */

View File

@ -70,6 +70,7 @@ enum ice_status {
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
ICE_ERR_AQ_FW_CRITICAL = -105,
};
#endif /* _ICE_STATUS_H_ */

View File

@ -252,6 +252,9 @@ _ice_status_str(enum ice_status status)
case ICE_ERR_HW_TABLE:
str = "ICE_ERR_HW_TABLE";
break;
case ICE_ERR_FW_DDP_MISMATCH:
str = "ICE_ERR_FW_DDP_MISMATCH";
break;
case ICE_ERR_DOES_NOT_EXIST:
str = "ICE_ERR_DOES_NOT_EXIST";
break;
@ -270,8 +273,8 @@ _ice_status_str(enum ice_status status)
case ICE_ERR_AQ_EMPTY:
str = "ICE_ERR_AQ_EMPTY";
break;
case ICE_ERR_FW_DDP_MISMATCH:
str = "ICE_ERR_FW_DDP_MISMATCH";
case ICE_ERR_AQ_FW_CRITICAL:
str = "ICE_ERR_AQ_FW_CRITICAL";
break;
}
@ -987,6 +990,8 @@ ice_state_to_str(enum ice_state state)
return "DETACHING";
case ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING:
return "LINK_DEFAULT_OVERRIDE_PENDING";
case ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER:
return "LLDP_RX_FLTR_FROM_DRIVER";
case ICE_STATE_LAST:
return NULL;
}

View File

@ -54,32 +54,10 @@
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
*/
#define DUMMY_ETH_HDR_LEN 16
static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(sizeof(struct ice_aqc_sw_rules_elem) - \
sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
(sizeof(struct ice_aqc_sw_rules_elem) - \
sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
#define ICE_SW_RULE_LG_ACT_SIZE(n) \
(sizeof(struct ice_aqc_sw_rules_elem) - \
sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
sizeof(struct ice_sw_rule_lg_act) - \
sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
(sizeof(struct ice_aqc_sw_rules_elem) - \
sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
sizeof(struct ice_sw_rule_vsi_list) - \
sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@ -121,7 +99,7 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
* @num_elems: pointer to number of elements
* @cd: pointer to command details structure or NULL
*
* Get switch configuration (0x0200) to be placed in 'buff'.
* Get switch configuration (0x0200) to be placed in buf.
* This admin command returns information such as initial VSI/port number
* and switch ID it belongs to.
*
@ -138,13 +116,13 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
* parsing the response buffer.
*/
static enum ice_status
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
enum ice_status status;
struct ice_aq_desc desc;
enum ice_status status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@ -178,9 +156,8 @@ ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
enum ice_status status;
u16 buf_len;
buf_len = sizeof(*sw_buf);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)
ice_malloc(hw, buf_len);
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@ -260,9 +237,8 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
enum ice_status status, ret_status;
u16 buf_len;
buf_len = sizeof(*sw_buf);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)
ice_malloc(hw, buf_len);
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@ -300,8 +276,7 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status) {
ice_debug(hw, ICE_DBG_SW,
"VEB counter resource could not be freed\n");
ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
ret_status = status;
}
@ -697,8 +672,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
return ICE_ERR_PARAM;
break;
default:
ice_debug(hw, ICE_DBG_SW,
"Error due to unsupported rule_type %u\n", rule_type);
ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
return ICE_ERR_OUT_OF_RANGE;
}
@ -720,8 +694,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
* than ICE_MAX_VSI, if not return with error.
*/
if (id >= ICE_MAX_VSI) {
ice_debug(hw, ICE_DBG_SW,
"Error VSI index (%u) out-of-range\n",
ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
id);
ice_free(hw, mr_list);
return ICE_ERR_OUT_OF_RANGE;
@ -805,9 +778,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_status status;
u16 buf_len;
buf_len = sizeof(*sw_buf);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)
ice_malloc(hw, buf_len);
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
sw_buf->num_elems = CPU_TO_LE16(1);
@ -850,7 +822,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
* @hw: pointer to the HW struct
* @bcast_thresh: represents the upper threshold for broadcast storm control
* @mcast_thresh: represents the upper threshold for multicast storm control
* @ctl_bitmask: storm control control knobs
* @ctl_bitmask: storm control knobs
*
* Sets the storm control configuration (0x0280)
*/
@ -877,7 +849,7 @@ ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
* @hw: pointer to the HW struct
* @bcast_thresh: represents the upper threshold for broadcast storm control
* @mcast_thresh: represents the upper threshold for multicast storm control
* @ctl_bitmask: storm control control knobs
* @ctl_bitmask: storm control knobs
*
* Gets the storm control configuration (0x0281)
*/
@ -923,6 +895,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
enum ice_status status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@ -936,7 +909,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
desc.params.sw_rules.num_rules_fltr_entry_index =
CPU_TO_LE16(num_rules);
return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
status = ICE_ERR_DOES_NOT_EXIST;
return status;
}
/* ice_init_port_info - Initialize port_info with switch configuration data
@ -961,8 +939,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW,
"incorrect VSI/port type received\n");
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
break;
}
}
@ -972,7 +949,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
*/
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
{
struct ice_aqc_get_sw_cfg_resp *rbuf;
struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
enum ice_status status;
u8 num_total_ports;
u16 req_desc = 0;
@ -982,7 +959,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
num_total_ports = 1;
rbuf = (struct ice_aqc_get_sw_cfg_resp *)
rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
if (!rbuf)
@ -994,19 +971,19 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
* writing a non-zero value in req_desc
*/
do {
struct ice_aqc_get_sw_cfg_resp_elem *ele;
status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
&req_desc, &num_elems, NULL);
if (status)
break;
for (i = 0; i < num_elems; i++) {
struct ice_aqc_get_sw_cfg_resp_elem *ele;
for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
u8 res_type;
ele = rbuf[i].elements;
vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
@ -1026,8 +1003,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
if (j == num_total_ports) {
ice_debug(hw, ICE_DBG_SW,
"more ports than expected\n");
ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
status = ICE_ERR_CFG;
goto out;
}
@ -1043,7 +1019,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
} while (req_desc && !status);
out:
ice_free(hw, (void *)rbuf);
ice_free(hw, rbuf);
return status;
}
@ -1294,8 +1270,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
ICE_LG_ACT_VSI_LIST_ID_M;
act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
@ -1376,13 +1351,11 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
rules_size);
lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
if (!lg_act)
return ICE_ERR_NO_MEMORY;
rx_tx = (struct ice_aqc_sw_rules_elem *)
((u8 *)lg_act + lg_act_size);
rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
/* Fill in the first switch rule i.e. large action */
lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
@ -1950,26 +1923,11 @@ static enum ice_status
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
struct ice_aqc_sw_rules_elem *s_rule;
enum ice_status status;
u16 s_rule_size;
s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
/* Free the vsi_list resource that we allocated. It is assumed that the
* list is empty at this point.
*/
status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
ice_aqc_opc_free_res);
ice_free(hw, s_rule);
return status;
}
/**
@ -2031,8 +1989,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
tmp_fltr_info.vsi_handle = rem_vsi_handle;
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
if (status) {
ice_debug(hw, ICE_DBG_SW,
"Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
tmp_fltr_info.fwd_id.hw_vsi_id, status);
return status;
}
@ -2048,8 +2005,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
/* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
if (status) {
ice_debug(hw, ICE_DBG_SW,
"Failed to remove VSI list %d, error %d\n",
ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
vsi_list_id, status);
return status;
}
@ -2157,17 +2113,18 @@ ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
* ice_aq_get_res_alloc - get allocated resources
* @hw: pointer to the HW struct
* @num_entries: pointer to u16 to store the number of resource entries returned
* @buf: pointer to user-supplied buffer
* @buf_size: size of buff
* @buf: pointer to buffer
* @buf_size: size of buf
* @cd: pointer to command details structure or NULL
*
* The user-supplied buffer must be large enough to store the resource
* The caller-supplied buffer must be large enough to store the resource
* information for all resource types. Each resource type is an
* ice_aqc_get_res_resp_data_elem structure.
* ice_aqc_get_res_resp_elem structure.
*/
enum ice_status
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_res_alloc *resp;
enum ice_status status;
@ -2194,8 +2151,8 @@ ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
* ice_aq_get_res_descs - get allocated resource descriptors
* @hw: pointer to the hardware structure
* @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
* @buf: structure to hold response data buffer
* @buf_size: size of buffer
* @res_type: resource type
* @res_shared: is resource shared
* @desc_id: input - first desc ID to start; output - next desc ID
@ -2203,9 +2160,8 @@ ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
*/
enum ice_status
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_get_allocd_res_desc_resp *buf,
u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
struct ice_sq_cd *cd)
struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
{
struct ice_aqc_get_allocd_res_desc *cmd;
struct ice_aq_desc desc;
@ -2398,8 +2354,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
*
* Function add MAC rule for logical port from HW struct
*/
enum ice_status
ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
{
if (!m_list || !hw)
return ICE_ERR_PARAM;
@ -2511,8 +2466,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
*/
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
ice_debug(hw, ICE_DBG_SW,
"Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
status = ICE_ERR_CFG;
goto exit;
}
@ -2597,8 +2551,7 @@ ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
*
* Function add VLAN rule for logical port from HW struct
*/
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
{
if (!v_list || !hw)
return ICE_ERR_PARAM;
@ -2644,7 +2597,6 @@ ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
return ICE_SUCCESS;
}
enum ice_status
/**
* ice_add_eth_mac - Add a ethertype based filter rule
* @hw: pointer to the hardware structure
@ -2652,6 +2604,7 @@ enum ice_status
*
* Function add ethertype rule for logical port from HW struct
*/
enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
{
if (!em_list || !hw)
@ -2773,7 +2726,8 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
@ -2930,8 +2884,7 @@ ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
* @m_list: list of MAC addresses and forwarding information
*
*/
enum ice_status
ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
{
struct ice_sw_recipe *recp_list;
@ -3116,17 +3069,17 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
}
/**
* ice_get_vsi_promisc - get promiscuous mode of given VSI
* _ice_get_vsi_promisc - get promiscuous mode of given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to retrieve info from
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
* @sw: pointer to switch info struct for which function add rule
*/
enum ice_status
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
static enum ice_status
_ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid, struct ice_switch_info *sw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
@ -3156,17 +3109,32 @@ ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
}
/**
* ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
* ice_get_vsi_promisc - get promiscuous mode of given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to retrieve info from
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
*/
enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
{
return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info);
}
/**
* ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to retrieve info from
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
* @sw: pointer to switch info struct for which function add rule
*/
static enum ice_status
_ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid, struct ice_switch_info *sw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
@ -3195,6 +3163,21 @@ ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
return ICE_SUCCESS;
}
/**
* ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to retrieve info from
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
*/
enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
{
return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info);
}
/**
* ice_remove_promisc - Remove promisc based filter rules
* @hw: pointer to the hardware structure
@ -3220,17 +3203,17 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
}
/**
* ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
* _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to clear mode
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
* @sw: pointer to switch info struct for which function add rule
*/
enum ice_status
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
static enum ice_status
_ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, struct ice_switch_info *sw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *fm_entry, *tmp;
struct LIST_HEAD_TYPE remove_list_head;
struct ice_fltr_mgmt_list_entry *itr;
@ -3295,14 +3278,32 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
}
/**
* ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
* ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to clear mode
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
enum ice_status
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
u8 promisc_mask, u16 vid)
{
return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info);
}
/**
* _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
* @lport: logical port number to configure promisc mode
* @sw: pointer to switch info struct for which function add rule
*/
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
static enum ice_status
_ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid, u8 lport, struct ice_switch_info *sw)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
struct ice_fltr_list_entry f_list_entry;
@ -3393,17 +3394,16 @@ ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
new_fltr.src = hw_vsi_id;
} else {
new_fltr.flag |= ICE_FLTR_RX;
new_fltr.src = hw->port_info->lport;
new_fltr.src = lport;
}
new_fltr.fltr_act = ICE_FWD_TO_VSI;
new_fltr.vsi_handle = vsi_handle;
new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
f_list_entry.fltr_info = new_fltr;
recp_list = &hw->switch_info->recp_list[recipe_id];
recp_list = &sw->recp_list[recipe_id];
status = ice_add_rule_internal(hw, recp_list,
hw->port_info->lport,
status = ice_add_rule_internal(hw, recp_list, lport,
&f_list_entry);
if (status != ICE_SUCCESS)
goto set_promisc_exit;
@ -3414,19 +3414,37 @@ ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
}
/**
* ice_set_vlan_vsi_promisc
* ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
hw->port_info->lport,
hw->switch_info);
}
/**
* _ice_set_vlan_vsi_promisc
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @rm_vlan_promisc: Clear VLANs VSI promisc mode
* @lport: logical port number to configure promisc mode
* @sw: pointer to switch info struct for which function add rule
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
static enum ice_status
_ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc, u8 lport,
struct ice_switch_info *sw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *list_itr, *tmp;
struct LIST_HEAD_TYPE vsi_list_head;
struct LIST_HEAD_TYPE *vlan_head;
@ -3448,11 +3466,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
list_entry) {
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
status = _ice_clear_vsi_promisc(hw, vsi_handle,
promisc_mask,
vlan_id, sw);
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
status = _ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id,
lport, sw);
if (status)
break;
}
@ -3466,6 +3486,24 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
return status;
}
/**
* ice_set_vlan_vsi_promisc
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @rm_vlan_promisc: Clear VLANs VSI promisc mode
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
{
return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
rm_vlan_promisc, hw->port_info->lport,
hw->switch_info);
}
/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
@ -3514,8 +3552,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
ice_remove_eth_mac(hw, &remove_list_head);
break;
case ICE_SW_LKUP_DFLT:
ice_debug(hw, ICE_DBG_SW,
"Remove filters for this lookup type hasn't been implemented yet\n");
ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
break;
case ICE_SW_LKUP_LAST:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
@ -3586,9 +3623,8 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 buf_len;
/* Allocate resource */
buf_len = sizeof(*buf);
buf = (struct ice_aqc_alloc_free_res_elem *)
ice_malloc(hw, buf_len);
buf_len = ice_struct_size(buf, elem, 1);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@ -3625,9 +3661,8 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 buf_len;
/* Free resource */
buf_len = sizeof(*buf);
buf = (struct ice_aqc_alloc_free_res_elem *)
ice_malloc(hw, buf_len);
buf_len = ice_struct_size(buf, elem, 1);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@ -3639,8 +3674,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status)
ice_debug(hw, ICE_DBG_SW,
"counter resource could not be freed\n");
ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
ice_free(hw, buf);
return status;
@ -3687,9 +3721,8 @@ ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
return ICE_ERR_PARAM;
/* Allocate resource for large action */
buf_len = sizeof(*sw_buf);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)
ice_malloc(hw, buf_len);
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@ -3944,6 +3977,7 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
list_entry) {
struct ice_fltr_list_entry f_entry;
u16 vsi_handle;
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
@ -3955,12 +3989,8 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
}
/* Add a filter per VSI separately */
while (1) {
u16 vsi_handle;
vsi_handle =
ice_find_first_bit(itr->vsi_list_info->vsi_map,
ICE_MAX_VSI);
ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
ICE_MAX_VSI) {
if (!ice_is_vsi_valid(hw, vsi_handle))
break;
@ -4012,6 +4042,8 @@ enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @pi: pointer to port information structure
* @sw: pointer to switch info struct for which function replays filters
* @vsi_handle: driver VSI handle
* @recp_id: Recipe ID for which rules need to be replayed
* @list_head: list for which filters need to be replayed
@ -4020,7 +4052,8 @@ enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
* It is required to pass valid VSI handle.
*/
static enum ice_status
ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
struct LIST_HEAD_TYPE *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
@ -4030,7 +4063,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
if (LIST_EMPTY(list_head))
return status;
recp_list = &hw->switch_info->recp_list[recp_id];
recp_list = &sw->recp_list[recp_id];
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
@ -4044,7 +4077,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
status = ice_add_rule_internal(hw, recp_list,
hw->port_info->lport,
pi->lport,
&f_entry);
if (status != ICE_SUCCESS)
goto end;
@ -4064,7 +4097,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
status = ice_add_vlan_internal(hw, recp_list, &f_entry);
else
status = ice_add_rule_internal(hw, recp_list,
hw->port_info->lport,
pi->lport,
&f_entry);
if (status != ICE_SUCCESS)
goto end;
@ -4076,11 +4109,14 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
/**
* ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
* @hw: pointer to the hardware structure
* @pi: pointer to port information structure
* @vsi_handle: driver VSI handle
*
* Replays filters for requested VSI via vsi_handle.
*/
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
enum ice_status status = ICE_SUCCESS;
@ -4092,7 +4128,8 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
head = &sw->recp_list[i].filt_replay_rules;
if (!sw->recp_list[i].adv_rule)
status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
head);
if (status != ICE_SUCCESS)
return status;
}
@ -4101,14 +4138,14 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
}
/**
* ice_rm_all_sw_replay_rule_info - deletes filter replay rules
* ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
* @hw: pointer to the HW struct
* @sw: pointer to switch info struct for which function removes filters
*
* Deletes the filter replay rules.
* Deletes the filter replay rules for given switch
*/
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
{
struct ice_switch_info *sw = hw->switch_info;
u8 i;
if (!sw)
@ -4124,3 +4161,15 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
}
}
}
/**
* ice_rm_all_sw_replay_rule_info - deletes filter replay rules
* @hw: pointer to the HW struct
*
* Deletes the filter replay rules.
*/
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
{
ice_rm_sw_replay_rule_info(hw, hw->switch_info);
}

View File

@ -43,6 +43,20 @@
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define DUMMY_ETH_HDR_LEN 16
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
#define ICE_SW_RULE_LG_ACT_SIZE(n) \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
/* Worst case buffer length for ice_aqc_opc_get_res_alloc */
#define ICE_MAX_RES_TYPES 0x80
#define ICE_AQ_GET_RES_ALLOC_BUF_LEN \
@ -254,8 +268,7 @@ struct ice_sw_recipe {
/* Profiles this recipe is associated with */
u8 num_profs, *prof_ids;
/* Possible result indexes are 44, 45, 46 and 47 */
#define ICE_POSSIBLE_RES_IDX 0x0000F00000000000ULL
/* Bit map for possible result indexes */
ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS);
/* This allows user to specify the recipe priority.
@ -407,13 +420,13 @@ ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
enum ice_status
ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id);
enum ice_status
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
u16 buf_size, struct ice_sq_cd *cd);
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_get_allocd_res_desc_resp *buf,
u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
struct ice_sq_cd *cd);
struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
bool res_shared, u16 *desc_id, struct ice_sq_cd *cd);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list);
enum ice_status
@ -462,7 +475,9 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle);
void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */

View File

@ -56,6 +56,13 @@
#define IS_ASCII(_ch) ((_ch) < 0x80)
#define STRUCT_HACK_VAR_LEN
/**
* ice_struct_size - size of struct with C99 flexible array member
* @ptr: pointer to structure
* @field: flexible array member (last member of the structure)
* @num: number of elements of that flexible array member
*/
#define ice_struct_size(ptr, field, num) \
(sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
@ -231,6 +238,7 @@ enum ice_media_type {
ICE_MEDIA_BASET,
ICE_MEDIA_BACKPLANE,
ICE_MEDIA_DA,
ICE_MEDIA_AUI,
};
/* Software VSI types. */
@ -476,7 +484,7 @@ struct ice_nvm_info {
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of dev starter */
u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present)*/
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
struct ice_link_default_override_tlv {
@ -587,7 +595,7 @@ enum ice_rl_type {
#define ICE_SCHED_NO_BW_WT 0
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 1
#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
@ -720,6 +728,14 @@ struct ice_dcbx_cfg {
#define ICE_DCBX_APPS_NON_WILLING 0x1
};
struct ice_qos_cfg {
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
u8 dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */
u8 is_sw_lldp : 1;
};
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
struct ice_hw *hw; /* back pointer to HW instance */
@ -743,14 +759,9 @@ struct ice_port_info {
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
/* List contain profile ID(s) and other params per layer */
struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_bw_type_info root_node_bw_t_info;
struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
/* LLDP/DCBX Status */
u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
u8 is_sw_lldp:1;
struct ice_qos_cfg qos_cfg;
u8 is_vf:1;
};
@ -758,6 +769,7 @@ struct ice_switch_info {
struct LIST_HEAD_TYPE vsi_list_map_head;
struct ice_sw_recipe *recp_list;
u16 prof_res_bm_init;
u16 max_used_prof_index;
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
@ -1022,7 +1034,7 @@ enum ice_sw_fwd_act_type {
#define ICE_SR_1ST_SCRATCH_PAD_PTR 0x41
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
#define ICE_SR_1ND_OROM_BANK_PTR 0x44
#define ICE_SR_1ST_OROM_BANK_PTR 0x44
#define ICE_SR_OROM_BANK_SIZE 0x45
#define ICE_SR_NETLIST_BANK_PTR 0x46
#define ICE_SR_NETLIST_BANK_SIZE 0x47
@ -1069,4 +1081,8 @@ enum ice_sw_fwd_act_type {
#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_8KB 0x1
#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_64KB 0x2
/* AQ API version for LLDP_FILTER_CONTROL */
#define ICE_FW_API_LLDP_FLTR_MAJ 1
#define ICE_FW_API_LLDP_FLTR_MIN 7
#define ICE_FW_API_LLDP_FLTR_PATCH 1
#endif /* _ICE_TYPE_H_ */

View File

@ -82,6 +82,8 @@ static void ice_if_stop(if_ctx_t ctx);
static uint64_t ice_if_get_counter(if_ctx_t ctx, ift_counter counter);
static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
static int ice_if_suspend(if_ctx_t ctx);
static int ice_if_resume(if_ctx_t ctx);
static int ice_msix_que(void *arg);
static int ice_msix_admin(void *arg);
@ -167,6 +169,8 @@ static device_method_t ice_iflib_methods[] = {
DEVMETHOD(ifdi_get_counter, ice_if_get_counter),
DEVMETHOD(ifdi_priv_ioctl, ice_if_priv_ioctl),
DEVMETHOD(ifdi_i2c_req, ice_if_i2c_req),
DEVMETHOD(ifdi_suspend, ice_if_suspend),
DEVMETHOD(ifdi_resume, ice_if_resume),
DEVMETHOD_END
};
@ -278,7 +282,6 @@ MODULE_VERSION(ice, 1);
MODULE_DEPEND(ice, pci, 1, 1, 1);
MODULE_DEPEND(ice, ether, 1, 1, 1);
MODULE_DEPEND(ice, iflib, 1, 1, 1);
MODULE_DEPEND(ice, firmware, 1, 1, 1);
IFLIB_PNP_INFO(pci, ice, ice_vendor_info_array);
@ -663,6 +666,7 @@ static void
ice_update_link_status(struct ice_softc *sc, bool update_media)
{
struct ice_hw *hw = &sc->hw;
enum ice_status status;
/* Never report link up when in recovery mode */
if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
@ -673,6 +677,8 @@ ice_update_link_status(struct ice_softc *sc, bool update_media)
if (sc->link_up) { /* link is up */
uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info);
ice_set_default_local_lldp_mib(sc);
iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
ice_link_up_msg(sc);
@ -687,7 +693,7 @@ ice_update_link_status(struct ice_softc *sc, bool update_media)
/* Update the supported media types */
if (update_media) {
enum ice_status status = ice_add_media_types(sc, sc->media);
status = ice_add_media_types(sc, sc->media);
if (status)
device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n",
ice_status_str(status),
@ -1827,6 +1833,16 @@ ice_if_init(if_ctx_t ctx)
ASSERT_CTX_LOCKED(sc);
/*
* We've seen an issue with 11.3/12.1 where sideband routines are
* called after detach is called. This would call routines after
* if_stop, causing issues with the teardown process. This has
* seemingly been fixed in STABLE snapshots, but it seems like a
* good idea to have this guard here regardless.
*/
if (ice_driver_is_detaching(sc))
return;
if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
return;
@ -2573,7 +2589,7 @@ ice_init_device_features(struct ice_softc *sc)
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE))
return;
/* Set capabilities that the driver supports */
/* Set capabilities that all devices support */
ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap);
@ -2872,3 +2888,46 @@ ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
return ice_handle_i2c_req(sc, req);
}
/**
* ice_if_suspend - PCI device suspend handler for iflib
* @ctx: iflib context pointer
*
* Deinitializes the driver and clears HW resources in preparation for
* suspend or an FLR.
*
* @returns 0; this return value is ignored
*/
static int
ice_if_suspend(if_ctx_t ctx)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
/* At least a PFR is always going to happen after this;
* either via FLR or during the D3->D0 transition.
*/
ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
ice_prepare_for_reset(sc);
return (0);
}
/**
* ice_if_resume - PCI device resume handler for iflib
* @ctx: iflib context pointer
*
* Reinitializes the driver and the HW after PCI resume or after
* an FLR. An init is performed by iflib after this function is finished.
*
* @returns 0; this return value is ignored
*/
static int
ice_if_resume(if_ctx_t ctx)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
ice_rebuild(sc);
return (0);
}

View File

@ -156,9 +156,11 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
/* opcodes 34, 35, 36, 37 and 38 are reserved */
/* opcodes 39, 40, 41 and 42 are reserved */
/* opcode 42 is reserved */
/* opcode 34 is reserved */
/* opcodes 39, 40, 41, 42 and 43 are reserved */
/* opcode 44 is reserved */
/* opcode 45, 46, 47, 48 and 49 are reserved */
VIRTCHNL_OP_MAX,
};
/* These macros are used to generate compilation errors if a structure/union
@ -273,6 +275,9 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
/* 0x04000000 is reserved */
/* 0X08000000 and 0X10000000 are reserved */
/* 0X20000000 is reserved */
/* 0X40000000 is reserved */
/* 0X80000000 is reserved */
@ -449,9 +454,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
* PF removes the filters and returns status.
*/
/* VIRTCHNL_ETHER_ADDR_LEGACY
* Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
* bytes. Moving forward all VF drivers should not set type to
* VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
* behavior. The control plane function (i.e. PF) can use a best effort method
* of tracking the primary/device unicast in this case, but there is no
* guarantee and functionality depends on the implementation of the PF.
*/
/* VIRTCHNL_ETHER_ADDR_PRIMARY
* All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
* primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
* VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
* function (i.e. PF) to accurately track and use this MAC address for
* displaying on the host and for VM/function reset.
*/
/* VIRTCHNL_ETHER_ADDR_EXTRA
* All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
* unicast and/or multicast filters that are being added/deleted via
* VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
*/
struct virtchnl_ether_addr {
u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
u8 pad[2];
u8 type;
#define VIRTCHNL_ETHER_ADDR_LEGACY 0
#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
#define VIRTCHNL_ETHER_ADDR_EXTRA 2
#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@ -630,6 +662,11 @@ enum virtchnl_action {
/* action types */
VIRTCHNL_ACTION_DROP = 0,
VIRTCHNL_ACTION_TC_REDIRECT,
VIRTCHNL_ACTION_PASSTHRU,
VIRTCHNL_ACTION_QUEUE,
VIRTCHNL_ACTION_Q_REGION,
VIRTCHNL_ACTION_MARK,
VIRTCHNL_ACTION_COUNT,
};
enum virtchnl_flow_type {
@ -752,7 +789,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
int valid_len = 0;
u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {

View File

@ -39,8 +39,8 @@
#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
#define VIRTCHNL_IPSEC_SELECTED_SA_DESTROY 0
#define VIRTCHNL_IPSEC_ALL_SA_DESTROY 1
#define VIRTCHNL_IPSEC_SA_DESTROY 0
#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF
/* crypto type */
#define VIRTCHNL_AUTH 1
@ -49,7 +49,7 @@
/* algorithm type */
/* Hash Algorithm */
#define VIRTCHNL_NO_ALG 0 /* NULL algorithm */
#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */
#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
@ -65,13 +65,14 @@
#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
/* Cipher Algorithm */
#define VIRTCHNL_3DES_CBC 15 /* Triple DES algorithm in CBC mode */
#define VIRTCHNL_AES_CBC 16 /* AES algorithm in CBC mode */
#define VIRTCHNL_AES_CTR 17 /* AES algorithm in Counter mode */
#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */
#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */
#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */
#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */
/* AEAD Algorithm */
#define VIRTCHNL_AES_CCM 18 /* AES algorithm in CCM mode */
#define VIRTCHNL_AES_GCM 19 /* AES algorithm in GCM mode */
#define VIRTCHNL_CHACHA20_POLY1305 20 /* algorithm of ChaCha20-Poly1305 */
#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */
#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */
#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
/* protocol type */
#define VIRTCHNL_PROTO_ESP 1
@ -97,6 +98,21 @@
#define VIRTCHNL_IPV4 1
#define VIRTCHNL_IPV6 2
/* Detailed opcodes for DPDK and IPsec use */
enum inline_ipsec_ops {
INLINE_IPSEC_OP_GET_CAP = 0,
INLINE_IPSEC_OP_GET_STATUS = 1,
INLINE_IPSEC_OP_SA_CREATE = 2,
INLINE_IPSEC_OP_SA_UPDATE = 3,
INLINE_IPSEC_OP_SA_DESTROY = 4,
INLINE_IPSEC_OP_SP_CREATE = 5,
INLINE_IPSEC_OP_SP_DESTROY = 6,
INLINE_IPSEC_OP_SA_READ = 7,
INLINE_IPSEC_OP_EVENT = 8,
INLINE_IPSEC_OP_RESP = 9,
};
#pragma pack(1)
/* Not all valid, if certain field is invalid, set 1 for all bits */
struct virtchnl_algo_cap {
u32 algo_type;
@ -119,6 +135,7 @@ struct virtchnl_algo_cap {
u16 max_aad_size;
u16 inc_aad_size;
};
#pragma pack()
/* vf record the capability of crypto from the virtchnl */
struct virtchnl_sym_crypto_cap {
@ -178,17 +195,7 @@ struct virtchnl_ipsec_cap {
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
};
/* using desc_id to record the format of rx descriptor */
struct virtchnl_rx_desc_fmt {
u16 desc_id;
};
/* using desc_id to record the format of tx descriptor */
struct virtchnl_tx_desc_fmt {
u8 desc_num;
u16 desc_ids[VIRTCHNL_IPSEC_MAX_TX_DESC_NUM];
};
#pragma pack(1)
/* configuration of crypto function */
struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;
@ -201,18 +208,23 @@ struct virtchnl_ipsec_crypto_cfg_item {
/* Length of digest */
u16 digest_len;
/* SA salt */
u32 salt;
/* The length of the symmetric key */
u16 key_len;
/* key data buffer */
u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
};
#pragma pack()
struct virtchnl_ipsec_sym_crypto_cfg {
struct virtchnl_ipsec_crypto_cfg_item
items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_CREATE
* VF send this SA configuration to PF using virtchnl;
* PF create SA as configuration and PF driver will return
@ -249,9 +261,6 @@ struct virtchnl_ipsec_sa_cfg {
/* outer dst ip address */
u8 dst_addr[16];
/* SA salt */
u32 salt;
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
@ -311,6 +320,7 @@ struct virtchnl_ipsec_sa_cfg {
/* crypto configuration */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* VIRTCHNL_OP_IPSEC_SA_UPDATE
* VF send configuration of index of SA to PF
@ -322,6 +332,7 @@ struct virtchnl_ipsec_sa_update {
u32 esn_low; /* low 32 bits of esn */
};
#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_DESTROY
* VF send configuration of index of SA to PF
* PF will destroy SA according to configuration
@ -329,14 +340,12 @@ struct virtchnl_ipsec_sa_update {
* be destroyed
*/
struct virtchnl_ipsec_sa_destroy {
/* VIRTCHNL_SELECTED_SA_DESTROY: selected SA will be destroyed.
* VIRTCHNL_ALL_SA_DESTROY: all SA will be destroyed.
/* All zero bitmap indicates all SA will be destroyed.
* Non-zero bitmap indicates the selected SA in
* array sa_index will be destroyed.
*/
u8 flag;
u8 pad1; /* pading */
u16 pad2; /* pading */
/* selected SA index */
u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
};
@ -445,5 +454,127 @@ struct virtchnl_ipsec_sa_read {
/* crypto configuration. Salt and keys are set to 0 */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
#pragma pack()
/* Add whitelist entry in IES */
struct virtchnl_ipsec_sp_cfg {
u32 spi;
u32 dip[4];
/* Drop frame if true or redirect to QAT if false. */
u8 drop;
/* Congestion domain. For future use. */
u8 cgd;
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
/* Set TC (congestion domain) if true. For future use. */
u8 set_tc;
};
#pragma pack(1)
/* Delete whitelist entry in IES */
struct virtchnl_ipsec_sp_destroy {
/* 0 for IPv4 table, 1 for IPv6 table. */
u8 table_id;
u32 rule_id;
};
#pragma pack()
/* Response from IES to whitelist operations */
struct virtchnl_ipsec_sp_cfg_resp {
u32 rule_id;
};
struct virtchnl_ipsec_sa_cfg_resp {
u32 sa_handle;
};
#define INLINE_IPSEC_EVENT_RESET 0x1
#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2
#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4
struct virtchnl_ipsec_event {
u32 ipsec_event_data;
};
#define INLINE_IPSEC_STATUS_AVAILABLE 0x1
#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2
struct virtchnl_ipsec_status {
u32 status;
};
struct virtchnl_ipsec_resp {
u32 resp;
};
/* Internal message descriptor for VF <-> IPsec communication */
struct inline_ipsec_msg {
u16 ipsec_opcode;
u16 req_id;
union {
/* IPsec request */
struct virtchnl_ipsec_sa_cfg sa_cfg[0];
struct virtchnl_ipsec_sp_cfg sp_cfg[0];
struct virtchnl_ipsec_sa_update sa_update[0];
struct virtchnl_ipsec_sa_destroy sa_destroy[0];
struct virtchnl_ipsec_sp_destroy sp_destroy[0];
/* IPsec response */
struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
struct virtchnl_ipsec_cap ipsec_cap[0];
struct virtchnl_ipsec_status ipsec_status[0];
/* response to del_sa, del_sp, update_sa */
struct virtchnl_ipsec_resp ipsec_resp[0];
/* IPsec event (no req_id is required) */
struct virtchnl_ipsec_event event[0];
/* Reserved */
struct virtchnl_ipsec_sa_read sa_read[0];
} ipsec_data;
};
static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
{
u16 valid_len = sizeof(struct inline_ipsec_msg);
switch (opcode) {
case INLINE_IPSEC_OP_GET_CAP:
case INLINE_IPSEC_OP_GET_STATUS:
break;
case INLINE_IPSEC_OP_SA_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
break;
case INLINE_IPSEC_OP_SP_CREATE:
valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
break;
case INLINE_IPSEC_OP_SA_UPDATE:
valid_len += sizeof(struct virtchnl_ipsec_sa_update);
break;
case INLINE_IPSEC_OP_SA_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
break;
case INLINE_IPSEC_OP_SP_DESTROY:
valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
break;
/* Only for msg length caculation of response to VF in case of
* inline ipsec failure.
*/
case INLINE_IPSEC_OP_RESP:
valid_len += sizeof(struct virtchnl_ipsec_resp);
break;
default:
valid_len = 0;
break;
}
return valid_len;
}
#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */