ice(4): Update to 1.34.2-k

- Adds FW logging support
  - Once enabled, this lets the firmware print event and error messages
    to the log, increasing the visibility into what the hardware is
    doing; this is useful for debugging
- General bug fixes
- Adds inital DCB support to the driver
  - Notably, this adds support for DCBX to the driver; now with the
    fw_lldp sysctl set to 1, the driver and adapter will adopt a DCBX
    configuration sent from a link partner
  - Adds statistcs sysctls for priority flow control frames
  - Adds new configuration sysctls for DCB-related features: (VLAN) user
    priority to TC mapping; ETS bandwidth allocation; priority flow
    control
- Remove unused SR-IOV files (until support gets added)

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Tested by:	jeffrey.e.pieper@intel.com
MFC after:	3 days
MFC with:	213e91399b, e438f0a975
Relnotes:	yes
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D34024
This commit is contained in:
Eric Joyner 2022-03-04 10:25:25 -08:00
parent b126b79b27
commit 56429daea2
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
43 changed files with 5077 additions and 1431 deletions

View File

@ -165,12 +165,14 @@ dev/ice/ice_nvm.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sched.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sriov.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_vlan_mode.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fw_logging.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fwlog.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031b00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \

View File

@ -222,11 +222,13 @@ dev/ice/ice_nvm.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sched.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sriov.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_vlan_mode.c optional ice pci \
dev/ice/ice_vlan_mode.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fw_logging.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fwlog.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031b00 -mice_ddp -c${.TARGET}" \

View File

@ -68,12 +68,14 @@ dev/ice/ice_nvm.c optional ice pci powerpc64 \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sched.c optional ice pci powerpc64 \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sriov.c optional ice pci powerpc64 \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci powerpc64 \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_vlan_mode.c optional ice pci powerpc64 \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fw_logging.c optional ice pci powerpc64 \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fwlog.c optional ice pci powerpc64 \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp powerpc64 \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031b00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \

View File

@ -169,6 +169,10 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0 0x0081
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
u8 major_ver;
u8 minor_ver;
@ -1240,10 +1244,11 @@ struct ice_aqc_get_phy_caps {
/* 18.0 - Report qualified modules */
#define ICE_AQC_GET_PHY_RQM BIT(0)
/* 18.1 - 18.3 : Report mode
* 000b - Report NVM capabilities
* 001b - Report topology capabilities
* 010b - Report SW configured
* 100b - Report default capabilities
* 000b - Report topology capabilities, without media
* 001b - Report topology capabilities, with media
* 010b - Report Active configuration
* 011b - Report PHY Type and FEC mode capabilities
* 100b - Report Default capabilities
*/
#define ICE_AQC_REPORT_MODE_S 1
#define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S)
@ -1586,6 +1591,7 @@ struct ice_aqc_set_event_mask {
#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
#define ICE_AQ_LINK_EVENT_TOPO_CONFLICT BIT(10)
#define ICE_AQ_LINK_EVENT_MEDIA_CONFLICT BIT(11)
#define ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
u8 reserved1[6];
};
@ -1774,7 +1780,7 @@ struct ice_aqc_dnl_read_log_response {
};
struct ice_aqc_link_topo_addr {
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
@ -1800,6 +1806,10 @@ struct ice_aqc_link_topo_addr {
#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
u8 index;
};
struct ice_aqc_link_topo_addr {
struct ice_aqc_link_topo_params topo_params;
__le16 handle;
#define ICE_AQC_LINK_TOPO_HANDLE_S 0
#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
@ -1822,59 +1832,10 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
u8 rsvd[9];
};
/* Get Link Topology Pin (direct, 0x06E1) */
struct ice_aqc_get_link_topo_pin {
struct ice_aqc_link_topo_addr addr;
u8 input_io_params;
#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S 0
#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_M \
(0x1F << ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S)
#define ICE_AQC_LINK_TOPO_IO_FUNC_GPIO 0
#define ICE_AQC_LINK_TOPO_IO_FUNC_RESET_N 1
#define ICE_AQC_LINK_TOPO_IO_FUNC_INT_N 2
#define ICE_AQC_LINK_TOPO_IO_FUNC_PRESENT_N 3
#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_DIS 4
#define ICE_AQC_LINK_TOPO_IO_FUNC_MODSEL_N 5
#define ICE_AQC_LINK_TOPO_IO_FUNC_LPMODE 6
#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_FAULT 7
#define ICE_AQC_LINK_TOPO_IO_FUNC_RX_LOSS 8
#define ICE_AQC_LINK_TOPO_IO_FUNC_RS0 9
#define ICE_AQC_LINK_TOPO_IO_FUNC_RS1 10
#define ICE_AQC_LINK_TOPO_IO_FUNC_EEPROM_WP 11
/* 12 repeats intentionally due to two different uses depending on context */
#define ICE_AQC_LINK_TOPO_IO_FUNC_LED 12
#define ICE_AQC_LINK_TOPO_IO_FUNC_RED_LED 12
#define ICE_AQC_LINK_TOPO_IO_FUNC_GREEN_LED 13
#define ICE_AQC_LINK_TOPO_IO_FUNC_BLUE_LED 14
#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S 5
#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_M \
(0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
u8 output_io_params;
#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_S 0
#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_M \
(0x1F << \ ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_NUM_S)
/* Use ICE_AQC_LINK_TOPO_IO_FUNC_* for the non-numerical options */
#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_S 5
#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_M \
(0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
u8 output_io_flags;
#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S 0
#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_M \
(0x7 << ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S)
#define ICE_AQC_LINK_TOPO_OUTPUT_INT_S 3
#define ICE_AQC_LINK_TOPO_OUTPUT_INT_M \
(0x3 << ICE_AQC_LINK_TOPO_OUTPUT_INT_S)
#define ICE_AQC_LINK_TOPO_OUTPUT_POLARITY BIT(5)
#define ICE_AQC_LINK_TOPO_OUTPUT_VALUE BIT(6)
#define ICE_AQC_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
u8 rsvd[7];
};
/* Read/Write I2C (direct, 0x06E2/0x06E3) */
struct ice_aqc_i2c {
struct ice_aqc_link_topo_addr topo_addr;
@ -2072,23 +2033,18 @@ struct ice_aqc_sw_gpio {
u8 rsvd[12];
};
/* Program topology device NVM (direct, 0x06F2) */
struct ice_aqc_program_topology_device_nvm {
u8 lport_num;
u8 lport_num_valid;
u8 node_type_ctx;
u8 index;
/* Program Topology Device NVM (direct, 0x06F2) */
struct ice_aqc_prog_topo_dev_nvm {
struct ice_aqc_link_topo_params topo_params;
u8 rsvd[12];
};
/* Read topology device NVM (indirect, 0x06F3) */
struct ice_aqc_read_topology_device_nvm {
u8 lport_num;
u8 lport_num_valid;
u8 node_type_ctx;
u8 index;
/* Read Topology Device NVM (direct, 0x06F3) */
struct ice_aqc_read_topo_dev_nvm {
struct ice_aqc_link_topo_params topo_params;
__le32 start_address;
u8 data_read[8];
#define ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8
u8 data_read[ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE];
};
/* NVM Read command (indirect 0x0701)
@ -2117,10 +2073,11 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
#define ICE_AQC_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
#define ICE_AQC_NVM_POR_FLAG 0 /* Used by NVM Write completion on ARQ */
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0)
#define ICE_AQC_NVM_RESET_LVL_M MAKEMASK(0x3, 0) /* Write reply only */
#define ICE_AQC_NVM_POR_FLAG 0
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@ -2665,6 +2622,63 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
u8 cluster_id;
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
/* EMP_DRAM only dumpable in device debug mode */
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
/* AUX_REGS only dumpable in device debug mode */
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
u8 reserved;
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
__le32 addr_high;
__le32 addr_low;
};
enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_GENERAL = 0,
ICE_AQC_FW_LOG_ID_CTRL,
ICE_AQC_FW_LOG_ID_LINK,
ICE_AQC_FW_LOG_ID_LINK_TOPO,
ICE_AQC_FW_LOG_ID_DNL,
ICE_AQC_FW_LOG_ID_I2C,
ICE_AQC_FW_LOG_ID_SDP,
ICE_AQC_FW_LOG_ID_MDIO,
ICE_AQC_FW_LOG_ID_ADMINQ,
ICE_AQC_FW_LOG_ID_HDMA,
ICE_AQC_FW_LOG_ID_LLDP,
ICE_AQC_FW_LOG_ID_DCBX,
ICE_AQC_FW_LOG_ID_DCB,
ICE_AQC_FW_LOG_ID_XLR,
ICE_AQC_FW_LOG_ID_NVM,
ICE_AQC_FW_LOG_ID_AUTH,
ICE_AQC_FW_LOG_ID_VPD,
ICE_AQC_FW_LOG_ID_IOSF,
ICE_AQC_FW_LOG_ID_PARSER,
ICE_AQC_FW_LOG_ID_SW,
ICE_AQC_FW_LOG_ID_SCHEDULER,
ICE_AQC_FW_LOG_ID_TXQ,
ICE_AQC_FW_LOG_ID_RSVD,
ICE_AQC_FW_LOG_ID_POST,
ICE_AQC_FW_LOG_ID_WATCHDOG,
ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
ICE_AQC_FW_LOG_ID_MNG,
ICE_AQC_FW_LOG_ID_SYNCE,
ICE_AQC_FW_LOG_ID_HEALTH,
ICE_AQC_FW_LOG_ID_TSDRV,
ICE_AQC_FW_LOG_ID_PFREG,
ICE_AQC_FW_LOG_ID_MDLVER,
ICE_AQC_FW_LOG_ID_MAX,
};
/* Set Health Status (direct 0xFF20) */
struct ice_aqc_set_health_status_config {
u8 event_source;
@ -2694,6 +2708,8 @@ struct ice_aqc_set_health_status_config {
#define ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115
#define ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116
#define ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT 0x117
#define ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG 0x120
#define ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD 0x121
#define ICE_AQC_HEALTH_STATUS_INFO_RECOVERY 0x500
#define ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501
#define ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH 0x502
@ -2745,11 +2761,11 @@ struct ice_aqc_clear_health_status {
* Get FW Log (indirect 0xFF34)
* Clear FW Log (indirect 0xFF35)
*/
struct ice_aqc_fw_log {
u8 cmd_flags;
#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
@ -2837,6 +2853,7 @@ struct ice_aq_desc {
struct ice_aqc_mdio read_write_mdio;
struct ice_aqc_gpio_by_func read_write_gpio_by_func;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sw_gpio sw_read_write_gpio;
struct ice_aqc_set_led set_led;
struct ice_aqc_mdio read_mdio;
struct ice_aqc_mdio write_mdio;
@ -2887,6 +2904,8 @@ struct ice_aq_desc {
struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_get_pkg_info_list get_pkg_info_list;
struct ice_aqc_driver_shared_params drv_shared_params;
struct ice_aqc_fw_log fw_log;
struct ice_aqc_debug_dump_internals debug_dump;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_get_res_alloc get_res;
@ -2902,6 +2921,8 @@ struct ice_aq_desc {
get_supported_health_status_codes;
struct ice_aqc_get_health_status get_health_status;
struct ice_aqc_clear_health_status clear_health_status;
struct ice_aqc_prog_topo_dev_nvm prog_topo_dev_nvm;
struct ice_aqc_read_topo_dev_nvm read_topo_dev_nvm;
} params;
};
@ -3069,7 +3090,6 @@ enum ice_adminq_opc {
ice_aqc_opc_dnl_set_breakpoints = 0x0686,
ice_aqc_opc_dnl_read_log = 0x0687,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_get_link_topo_pin = 0x06E1,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_read_mdio = 0x06E4,
@ -3085,8 +3105,8 @@ enum ice_adminq_opc {
ice_aqc_opc_sff_eeprom = 0x06EE,
ice_aqc_opc_sw_set_gpio = 0x06EF,
ice_aqc_opc_sw_get_gpio = 0x06F0,
ice_aqc_opc_program_topology_device_nvm = 0x06F2,
ice_aqc_opc_read_topology_device_nvm = 0x06F3,
ice_aqc_opc_prog_topo_dev_nvm = 0x06F2,
ice_aqc_opc_read_topo_dev_nvm = 0x06F3,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,
@ -3148,6 +3168,9 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
/* debug commands */
ice_aqc_opc_debug_dump_internals = 0xFF08,
/* SystemDiagnostic commands */
ice_aqc_opc_set_health_status_config = 0xFF20,
ice_aqc_opc_get_supported_health_status_codes = 0xFF21,

View File

@ -39,6 +39,120 @@
#define ICE_PF_RESET_WAIT_COUNT 300
/**
* dump_phy_type - helper function that prints PHY type strings
* @hw: pointer to the HW structure
* @phy: 64 bit PHY type to decipher
* @i: bit index within phy
* @phy_string: string corresponding to bit i in phy
* @prefix: prefix string to differentiate multiple dumps
*/
static void
dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string,
const char *prefix)
{
if (phy & BIT_ULL(i))
ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i,
phy_string);
}
/**
* ice_dump_phy_type_low - helper function to dump phy_type_low
* @hw: pointer to the HW structure
* @low: 64 bit value for phy_type_low
* @prefix: prefix string to differentiate multiple dumps
*/
static void
ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix)
{
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
(unsigned long long)low);
dump_phy_type(hw, low, 0, "100BASE_TX", prefix);
dump_phy_type(hw, low, 1, "100M_SGMII", prefix);
dump_phy_type(hw, low, 2, "1000BASE_T", prefix);
dump_phy_type(hw, low, 3, "1000BASE_SX", prefix);
dump_phy_type(hw, low, 4, "1000BASE_LX", prefix);
dump_phy_type(hw, low, 5, "1000BASE_KX", prefix);
dump_phy_type(hw, low, 6, "1G_SGMII", prefix);
dump_phy_type(hw, low, 7, "2500BASE_T", prefix);
dump_phy_type(hw, low, 8, "2500BASE_X", prefix);
dump_phy_type(hw, low, 9, "2500BASE_KX", prefix);
dump_phy_type(hw, low, 10, "5GBASE_T", prefix);
dump_phy_type(hw, low, 11, "5GBASE_KR", prefix);
dump_phy_type(hw, low, 12, "10GBASE_T", prefix);
dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix);
dump_phy_type(hw, low, 14, "10GBASE_SR", prefix);
dump_phy_type(hw, low, 15, "10GBASE_LR", prefix);
dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix);
dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix);
dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix);
dump_phy_type(hw, low, 19, "25GBASE_T", prefix);
dump_phy_type(hw, low, 20, "25GBASE_CR", prefix);
dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix);
dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix);
dump_phy_type(hw, low, 23, "25GBASE_SR", prefix);
dump_phy_type(hw, low, 24, "25GBASE_LR", prefix);
dump_phy_type(hw, low, 25, "25GBASE_KR", prefix);
dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix);
dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix);
dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix);
dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix);
dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix);
dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix);
dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix);
dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix);
dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix);
dump_phy_type(hw, low, 35, "40G_XLAUI", prefix);
dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix);
dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix);
dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix);
dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix);
dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix);
dump_phy_type(hw, low, 41, "50G_LAUI2", prefix);
dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix);
dump_phy_type(hw, low, 43, "50G_AUI2", prefix);
dump_phy_type(hw, low, 44, "50GBASE_CP", prefix);
dump_phy_type(hw, low, 45, "50GBASE_SR", prefix);
dump_phy_type(hw, low, 46, "50GBASE_FR", prefix);
dump_phy_type(hw, low, 47, "50GBASE_LR", prefix);
dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix);
dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix);
dump_phy_type(hw, low, 50, "50G_AUI1", prefix);
dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix);
dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix);
dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix);
dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix);
dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix);
dump_phy_type(hw, low, 56, "100G_CAUI4", prefix);
dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix);
dump_phy_type(hw, low, 58, "100G_AUI4", prefix);
dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix);
dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix);
dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix);
dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix);
dump_phy_type(hw, low, 63, "100GBASE_DR", prefix);
}
/**
* ice_dump_phy_type_high - helper function to dump phy_type_high
* @hw: pointer to the HW structure
* @high: 64 bit value for phy_type_high
* @prefix: prefix string to differentiate multiple dumps
*/
static void
ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix)
{
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
(unsigned long long)high);
dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix);
dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix);
dump_phy_type(hw, high, 2, "100G_CAUI2", prefix);
dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix);
dump_phy_type(hw, high, 4, "100G_AUI2", prefix);
}
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@ -76,6 +190,11 @@ enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_BACKPLANE:
case ICE_DEV_ID_E823L_QSFP:
case ICE_DEV_ID_E823L_SFP:
case ICE_DEV_ID_E823C_10G_BASE_T:
case ICE_DEV_ID_E823C_BACKPLANE:
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_SGMII:
hw->mac_type = ICE_MAC_GENERIC;
break;
default:
@ -87,6 +206,42 @@ enum ice_status ice_set_mac_type(struct ice_hw *hw)
return ICE_SUCCESS;
}
/**
* ice_is_e810
* @hw: pointer to the hardware structure
*
* returns true if the device is E810 based, false if not.
*/
bool ice_is_e810(struct ice_hw *hw)
{
return hw->mac_type == ICE_MAC_E810;
}
/**
* ice_is_e810t
* @hw: pointer to the hardware structure
*
* returns true if the device is E810T based, false if not.
*/
bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
return true;
break;
case ICE_DEV_ID_E810C_QSFP:
if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
return true;
break;
default:
break;
}
return false;
}
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
@ -181,6 +336,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
enum ice_status status;
const char *prefix;
struct ice_hw *hw;
cmd = &desc.params.get_phy;
@ -201,29 +357,42 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= CPU_TO_LE16(report_mode);
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
report_mode);
ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
(unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA)
prefix = "phy_caps_media";
else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA)
prefix = "phy_caps_no_media";
else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG)
prefix = "phy_caps_active";
else if (report_mode == ICE_AQC_REPORT_DFLT_CFG)
prefix = "phy_caps_default";
else
prefix = "phy_caps_invalid";
ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix);
ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix);
ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
prefix, report_mode);
ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
pcaps->low_power_ctrl_an);
ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
pcaps->eee_cap);
ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
pcaps->eeer_value);
ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
pcaps->link_fec_options);
ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
pcaps->module_compliance_enforcement);
ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
pcaps->extended_compliance_code);
ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
prefix, pcaps->module_compliance_enforcement);
ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
prefix, pcaps->extended_compliance_code);
ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
pcaps->module_type[0]);
ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
pcaps->module_type[1]);
ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
@ -238,35 +407,76 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
}
/**
* ice_aq_get_link_topo_handle - get link topology node return status
* @pi: port information structure
* @node_type: requested node type
* @cd: pointer to command details structure or NULL
*
* Get link topology node return status for specified node type (0x06E0)
*
* Node type cage can be used to determine if cage is present. If AQC
* returns error (ENOENT), then no cage present. If no cage present, then
* connection type is backplane or BASE-T.
* ice_aq_get_netlist_node
* @hw: pointer to the hw struct
* @cmd: get_link_topo AQ structure
* @node_part_number: output node part number if node found
* @node_handle: output node handle parameter if node found
*/
static enum ice_status
ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
enum ice_status
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle)
{
struct ice_aqc_get_link_topo *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.get_link_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
desc.params.get_link_topo = *cmd;
cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
ICE_AQC_LINK_TOPO_NODE_CTX_S);
if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
return ICE_ERR_NOT_SUPPORTED;
/* set node type */
cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
if (node_handle)
*node_handle =
LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
if (node_part_number)
*node_part_number = desc.params.get_link_topo.node_part_num;
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
return ICE_SUCCESS;
}
#define MAX_NETLIST_SIZE 10
/**
* ice_find_netlist_node
* @hw: pointer to the hw struct
* @node_type_ctx: type of netlist node to look for
* @node_part_number: node part number to look for
* @node_handle: output parameter if node found - optional
*
* Find and return the node handle for a given node type and part number in the
* netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
* otherwise. If @node_handle provided, it would be set to found node handle.
*/
enum ice_status
ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
u16 *node_handle)
{
struct ice_aqc_get_link_topo cmd;
u8 rec_node_part_number;
enum ice_status status;
u16 rec_node_handle;
u8 idx;
for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
memset(&cmd, 0, sizeof(cmd));
cmd.addr.topo_params.node_type_ctx =
(node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
cmd.addr.topo_params.index = idx;
status = ice_aq_get_netlist_node(hw, &cmd,
&rec_node_part_number,
&rec_node_handle);
if (status)
return status;
if (rec_node_part_number == node_part_number) {
if (node_handle)
*node_handle = rec_node_handle;
return ICE_SUCCESS;
}
}
return ICE_ERR_DOES_NOT_EXIST;
}
/**
@ -278,13 +488,27 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
*/
static bool ice_is_media_cage_present(struct ice_port_info *pi)
{
struct ice_aqc_get_link_topo *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.get_link_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
cmd->addr.topo_params.node_type_ctx =
(ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
ICE_AQC_LINK_TOPO_NODE_CTX_S);
/* set node type */
cmd->addr.topo_params.node_type_ctx |=
(ICE_AQC_LINK_TOPO_NODE_TYPE_M &
ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE);
/* Node type cage can be used to determine if cage is present. If AQC
* returns error (ENOENT), then no cage present. If no cage present then
* connection type is backplane or BASE-T.
*/
return !ice_aq_get_link_topo_handle(pi,
ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
NULL);
return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL);
}
/**
@ -489,6 +713,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
(unsigned long long)li->phy_type_high);
ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
@ -526,7 +751,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* LFC. Thus, we will use index =
* PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
*
* Also, because we are opearating on transmit timer and fc
* Also, because we are operating on transmit timer and fc
* threshold of LFC, we don't turn on any bit in tx_tmr_priority
*/
#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
@ -548,12 +773,14 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* ice_aq_set_mac_cfg
* @hw: pointer to the HW struct
* @max_frame_size: Maximum Frame Size to be supported
* @auto_drop: Tell HW to drop packets if TC queue is blocked
* @cd: pointer to command details structure or NULL
*
* Set MAC configuration (0x0603)
*/
enum ice_status
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
struct ice_aq_desc desc;
@ -567,6 +794,8 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
ice_fill_tx_timer_and_fc_thresh(hw, cmd);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
@ -667,7 +896,7 @@ ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
}
/**
* ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
* @hw: pointer to the HW struct
*/
static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
@ -723,6 +952,17 @@ void ice_print_rollback_msg(struct ice_hw *hw)
nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
}
/**
* ice_set_umac_shared
* @hw: pointer to the hw struct
*
* Set boolean flag to allow unicast MAC sharing
*/
void ice_set_umac_shared(struct ice_hw *hw)
{
hw->umac_shared = true;
}
/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
@ -754,6 +994,25 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
ice_fwlog_set_support_ena(hw);
status = ice_fwlog_set(hw, &hw->fwlog_cfg);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
status);
} else {
if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
status = ice_fwlog_register(hw);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
status);
} else {
status = ice_fwlog_unregister(hw);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
status);
}
}
status = ice_init_nvm(hw);
if (status)
goto err_unroll_cqinit;
@ -833,6 +1092,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_sched;
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = ice_calloc(hw, 2,
sizeof(struct ice_aqc_manage_mac_read_resp));
@ -848,10 +1108,13 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
/* enable jumbo frame support at MAC level */
status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
NULL);
if (status)
goto err_unroll_fltr_mgmt_struct;
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
@ -2132,12 +2395,64 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->num_wol_proxy_fltr);
ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
caps->wol_proxy_vsi_seid);
ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
prefix, caps->apm_wol_support);
break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
caps->pcie_reset_avoidance = (number > 0);
ice_debug(hw, ICE_DBG_INIT,
"%s: pcie_reset_avoidance = %d\n", prefix,
caps->pcie_reset_avoidance);
break;
case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
caps->reset_restrict_support = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0:
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1:
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
{
u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
caps->ext_topo_dev_img_ver_high[index] = number;
caps->ext_topo_dev_img_ver_low[index] = logical_id;
caps->ext_topo_dev_img_part_num[index] =
(phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
ICE_EXT_TOPO_DEV_IMG_PART_NUM_S;
caps->ext_topo_dev_img_load_en[index] =
(phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
caps->ext_topo_dev_img_prog_en[index] =
(phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
ice_debug(hw, ICE_DBG_INIT,
"%s: ext_topo_dev_img_ver_high[%d] = %d\n",
prefix, index,
caps->ext_topo_dev_img_ver_high[index]);
ice_debug(hw, ICE_DBG_INIT,
"%s: ext_topo_dev_img_ver_low[%d] = %d\n",
prefix, index,
caps->ext_topo_dev_img_ver_low[index]);
ice_debug(hw, ICE_DBG_INIT,
"%s: ext_topo_dev_img_part_num[%d] = %d\n",
prefix, index,
caps->ext_topo_dev_img_part_num[index]);
ice_debug(hw, ICE_DBG_INIT,
"%s: ext_topo_dev_img_load_en[%d] = %d\n",
prefix, index,
caps->ext_topo_dev_img_load_en[index]);
ice_debug(hw, ICE_DBG_INIT,
"%s: ext_topo_dev_img_prog_en[%d] = %d\n",
prefix, index,
caps->ext_topo_dev_img_prog_en[index]);
break;
}
default:
/* Not one of the recognized common capabilities */
found = false;
@ -2282,6 +2597,7 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->num_funcs = ice_hweight32(number);
ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
dev_p->num_funcs);
}
/**
@ -2658,6 +2974,28 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_is_100m_speed_supported
* @hw: pointer to the HW struct
*
* returns true if 100M speeds are supported by the device,
* false otherwise.
*/
bool ice_is_100m_speed_supported(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E822C_10G_BASE_T:
case ICE_DEV_ID_E822C_SGMII:
case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
return true;
default:
return false;
}
}
/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
@ -3205,7 +3543,7 @@ ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
/**
* ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
* @pi: port information structure
* @caps: PHY ability structure to copy date from
* @caps: PHY ability structure to copy data from
* @cfg: PHY configuration structure to copy data to
*
* Helper function to copy AQC PHY get ability data to PHY set configuration
@ -3500,6 +3838,76 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return status;
}
/**
* ice_aq_prog_topo_dev_nvm
* @hw: pointer to the hardware structure
* @topo_params: pointer to structure storing topology parameters for a device
* @cd: pointer to command details structure or NULL
*
* Program Topology Device NVM (0x06F2)
*
*/
enum ice_status
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
struct ice_sq_cd *cd)
{
struct ice_aqc_prog_topo_dev_nvm *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.prog_topo_dev_nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm);
ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
ICE_NONDMA_TO_NONDMA);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_read_topo_dev_nvm
* @hw: pointer to the hardware structure
* @topo_params: pointer to structure storing topology parameters for a device
* @start_address: byte offset in the topology device NVM
* @data: pointer to data buffer
* @data_size: number of bytes to be read from the topology device NVM
* @cd: pointer to command details structure or NULL
* Read Topology Device NVM (0x06F3)
*
*/
enum ice_status
ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
u32 start_address, u8 *data, u8 data_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_read_topo_dev_nvm *cmd;
struct ice_aq_desc desc;
enum ice_status status;
if (!data || data_size == 0 ||
data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
return ICE_ERR_PARAM;
cmd = &desc.params.read_topo_dev_nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
desc.datalen = data_size;
ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
ICE_NONDMA_TO_NONDMA);
cmd->start_address = CPU_TO_LE32(start_address);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
return status;
ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
return ICE_SUCCESS;
}
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
@ -4160,6 +4568,56 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
return ICE_SUCCESS;
}
/**
* ice_aq_get_internal_data
* @hw: pointer to the hardware structure
* @cluster_id: specific cluster to dump
* @table_id: table ID within cluster
* @start: index of line in the block to read
* @buf: dump buffer
* @buf_size: dump buffer size
* @ret_buf_size: return buffer size (returned by FW)
* @ret_next_table: next block to read (returned by FW)
* @ret_next_index: next index to read (returned by FW)
* @cd: pointer to command details structure
*
* Get internal FW/HW data (0xFF08) for debug purposes.
*/
enum ice_status
ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_table, u32 *ret_next_index,
struct ice_sq_cd *cd)
{
struct ice_aqc_debug_dump_internals *cmd;
struct ice_aq_desc desc;
enum ice_status status;
cmd = &desc.params.debug_dump;
if (buf_size == 0 || !buf)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals);
cmd->cluster_id = cluster_id;
cmd->table_id = CPU_TO_LE16(table_id);
cmd->idx = CPU_TO_LE32(start);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status) {
if (ret_buf_size)
*ret_buf_size = LE16_TO_CPU(desc.datalen);
if (ret_next_table)
*ret_next_table = LE16_TO_CPU(cmd->table_id);
if (ret_next_index)
*ret_next_index = LE32_TO_CPU(cmd->idx);
}
return status;
}
/**
* ice_read_byte - read context byte into struct
* @src_ctx: the context structure to read from
@ -4662,7 +5120,7 @@ static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
static enum ice_status
enum ice_status
ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
{
enum ice_status status;
@ -5021,7 +5479,6 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
/* check the current FW mode */
fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
if (fw_mode & ICE_FW_MODE_DBG_M)
return ICE_FW_MODE_DBG;
else if (fw_mode & ICE_FW_MODE_REC_M)
@ -5145,6 +5602,158 @@ ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
return ret;
}
/**
* ice_aq_read_i2c
* @hw: pointer to the hw struct
* @topo_addr: topology address for a device to communicate with
* @bus_addr: 7-bit I2C bus address
* @addr: I2C memory address (I2C offset) with up to 16 bits
* @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
* bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
* @data: pointer to data (0 to 16 bytes) to be read from the I2C device
* @cd: pointer to command details structure or NULL
*
* Read I2C (0x06E2)
*/
enum ice_status
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
enum ice_status status;
u8 data_size;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
cmd = &desc.params.read_write_i2c;
if (!data)
return ICE_ERR_PARAM;
data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
cmd->topo_addr = topo_addr;
cmd->i2c_params = params;
cmd->i2c_addr = addr;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (!status) {
struct ice_aqc_read_i2c_resp *resp;
u8 i;
resp = &desc.params.read_i2c_resp;
for (i = 0; i < data_size; i++) {
*data = resp->i2c_data[i];
data++;
}
}
return status;
}
/**
* ice_aq_write_i2c
* @hw: pointer to the hw struct
* @topo_addr: topology address for a device to communicate with
* @bus_addr: 7-bit I2C bus address
* @addr: I2C memory address (I2C offset) with up to 16 bits
* @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
* @data: pointer to data (0 to 4 bytes) to be written to the I2C device
* @cd: pointer to command details structure or NULL
*
* Write I2C (0x06E3)
*/
enum ice_status
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 i, data_size;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
cmd = &desc.params.read_write_i2c;
data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S;
/* data_size limited to 4 */
if (data_size > 4)
return ICE_ERR_PARAM;
cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
cmd->topo_addr = topo_addr;
cmd->i2c_params = params;
cmd->i2c_addr = addr;
for (i = 0; i < data_size; i++) {
cmd->i2c_data[i] = *data;
data++;
}
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
* @pin_idx: IO Number of the GPIO that needs to be set
* @value: SW provide IO value to set in the LSB
* @cd: pointer to command details structure or NULL
*
* Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
*/
enum ice_status
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd)
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
cmd = &desc.params.read_write_gpio;
cmd->gpio_ctrl_handle = gpio_ctrl_handle;
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_get_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
* @pin_idx: IO Number of the GPIO that needs to be set
* @value: IO value read
* @cd: pointer to command details structure or NULL
*
* Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
* the topology
*/
enum ice_status
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd)
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
enum ice_status status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
cmd->gpio_ctrl_handle = gpio_ctrl_handle;
cmd->gpio_num = pin_idx;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
return status;
*value = !!cmd->gpio_val;
return ICE_SUCCESS;
}
/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
@ -5423,7 +6032,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
}
/**
* ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
* ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
* @hw: pointer to HW struct
*/
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
@ -5488,3 +6097,17 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
}
return false;
}
/**
* ice_is_fw_auto_drop_supported
* @hw: pointer to the hardware structure
*
* Checks if the firmware supports auto drop feature
*/
bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
{
if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)
return true;
return false;
}

View File

@ -52,6 +52,7 @@ enum ice_fw_modes {
void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
void ice_set_umac_shared(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@ -89,6 +90,12 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
enum ice_status
ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_table, u32 *ret_next_index,
struct ice_sq_cd *cd);
enum ice_status ice_set_mac_type(struct ice_hw *hw);
/* Define a macro that will align a pointer to point to the next memory address
@ -164,6 +171,12 @@ enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle);
enum ice_status
ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
u16 *node_handle);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
@ -203,7 +216,8 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
@ -221,6 +235,16 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
enum ice_status
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
u32 start_address, u8 *buf, u8 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
@ -244,6 +268,8 @@ enum ice_status
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
@ -259,6 +285,8 @@ ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
struct ice_eth_stats *cur_stats);
enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
bool ice_is_e810t(struct ice_hw *hw);
enum ice_status
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
@ -276,6 +304,13 @@ enum ice_status
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
bool ice_is_100m_speed_supported(struct ice_hw *hw);
enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
@ -284,8 +319,18 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
enum ice_status
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd);
bool ice_is_fw_health_report_supported(struct ice_hw *hw);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
/* AQ API version for FW auto drop reports */
bool ice_is_fw_auto_drop_supported(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */

View File

@ -932,36 +932,41 @@ ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
/**
* ice_aq_set_pfc_mode - Set PFC mode
* @hw: pointer to the HW struct
* @pfcmode_set: set-value of PFC mode
* @pfcmode_ret: return value of PFC mode, written by FW
* @pfc_mode: value of PFC mode to set
* @cd: pointer to command details structure or NULL
*
* This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN
* -based PFC (0x0303)
*/
enum ice_status
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfcmode_set, u8 *pfcmode_ret,
struct ice_sq_cd *cd)
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
enum ice_status status;
if (pfcmode_set > ICE_AQC_PFC_DSCP_BASED_PFC)
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return ICE_ERR_PARAM;
cmd = &desc.params.set_query_pfc_mode;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
cmd->pfc_mode = pfcmode_set;
cmd->pfc_mode = pfc_mode;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
return status;
if (!status)
*pfcmode_ret = cmd->pfc_mode;
/* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
* been executed, check if cmd->pfc_mode is what was requested. If not,
* return an error.
*/
if (cmd->pfc_mode != pfc_mode)
return ICE_ERR_NOT_SUPPORTED;
return status;
return ICE_SUCCESS;
}
/**
@ -1113,7 +1118,7 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
}
/**
* ice_get_ieee_dcb_cfg
* ice_get_ieee_or_cee_dcb_cfg
* @pi: port information structure
* @dcbx_mode: mode of DCBX (IEEE or CEE)
*
@ -1463,7 +1468,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
}
/**
* ice_add_dcb_tlv - Add all IEEE TLVs
* ice_add_dscp_up_tlv - Prepare DSCP to UP TLV
* @tlv: location to build the TLV data
* @dcbcfg: location of data to convert to TLV
*/
static void
ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
{
u8 *buf = tlv->tlvinfo;
u32 ouisubtype;
u16 typelen;
int i;
typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
ICE_DSCP_UP_TLV_LEN);
tlv->typelen = HTONS(typelen);
ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
ICE_DSCP_SUBTYPE_DSCP2UP);
tlv->ouisubtype = HTONL(ouisubtype);
/* bytes 0 - 63 - IPv4 DSCP2UP LUT */
for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
/* IPv4 mapping */
buf[i] = dcbcfg->dscp_map[i];
/* IPv6 mapping */
buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i];
}
/* byte 64 - IPv4 untagged traffic */
buf[i] = 0;
/* byte 144 - IPv6 untagged traffic */
buf[i + ICE_DSCP_IPV6_OFFSET] = 0;
}
#define ICE_BYTES_PER_TC 8
/**
* ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV
* @tlv: location to build the TLV data
*/
static void
ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv)
{
u8 *buf = tlv->tlvinfo;
u32 ouisubtype;
u16 typelen;
typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
ICE_DSCP_ENF_TLV_LEN);
tlv->typelen = HTONS(typelen);
ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
ICE_DSCP_SUBTYPE_ENFORCE);
tlv->ouisubtype = HTONL(ouisubtype);
/* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */
memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC));
}
/**
* ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV
* @tlv: location to build the TLV data
* @dcbcfg: location of the data to convert to TLV
*/
static void
ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv,
struct ice_dcbx_cfg *dcbcfg)
{
struct ice_dcb_ets_cfg *etscfg;
u8 *buf = tlv->tlvinfo;
u32 ouisubtype;
u8 offset = 0;
u16 typelen;
int i;
typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
ICE_DSCP_TC_BW_TLV_LEN);
tlv->typelen = HTONS(typelen);
ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
ICE_DSCP_SUBTYPE_TCBW);
tlv->ouisubtype = HTONL(ouisubtype);
/* First Octect after subtype
* ----------------------------
* | RSV | CBS | RSV | Max TCs |
* | 1b | 1b | 3b | 3b |
* ----------------------------
*/
etscfg = &dcbcfg->etscfg;
buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
/* bytes 1 - 4 reserved */
offset = 5;
/* TC BW table
* bytes 0 - 7 for TC 0 - 7
*
* TSA Assignment table
* bytes 8 - 15 for TC 0 - 7
*/
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
buf[offset] = etscfg->tcbwtable[i];
buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i];
offset++;
}
}
/**
* ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV
* @tlv: Fill PFC TLV in IEEE format
* @dcbcfg: Local store which holds the PFC CFG data
*/
static void
ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
{
u8 *buf = tlv->tlvinfo;
u32 ouisubtype;
u16 typelen;
typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
ICE_DSCP_PFC_TLV_LEN);
tlv->typelen = HTONS(typelen);
ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
ICE_DSCP_SUBTYPE_PFC);
tlv->ouisubtype = HTONL(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF;
buf[1] = dcbcfg->pfc.pfcena & 0xF;
}
/**
* ice_add_dcb_tlv - Add all IEEE or DSCP TLVs
* @tlv: Fill TLV data in IEEE format
* @dcbcfg: Local store which holds the DCB Config
* @tlvid: Type of IEEE TLV
@ -1474,21 +1612,41 @@ static void
ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
u16 tlvid)
{
switch (tlvid) {
case ICE_IEEE_TLV_ID_ETS_CFG:
ice_add_ieee_ets_tlv(tlv, dcbcfg);
break;
case ICE_IEEE_TLV_ID_ETS_REC:
ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
break;
case ICE_IEEE_TLV_ID_PFC_CFG:
ice_add_ieee_pfc_tlv(tlv, dcbcfg);
break;
case ICE_IEEE_TLV_ID_APP_PRI:
ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
break;
default:
break;
if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) {
switch (tlvid) {
case ICE_IEEE_TLV_ID_ETS_CFG:
ice_add_ieee_ets_tlv(tlv, dcbcfg);
break;
case ICE_IEEE_TLV_ID_ETS_REC:
ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
break;
case ICE_IEEE_TLV_ID_PFC_CFG:
ice_add_ieee_pfc_tlv(tlv, dcbcfg);
break;
case ICE_IEEE_TLV_ID_APP_PRI:
ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
break;
default:
break;
}
} else {
/* pfc_mode == ICE_QOS_MODE_DSCP */
switch (tlvid) {
case ICE_TLV_ID_DSCP_UP:
ice_add_dscp_up_tlv(tlv, dcbcfg);
break;
case ICE_TLV_ID_DSCP_ENF:
ice_add_dscp_enf_tlv(tlv);
break;
case ICE_TLV_ID_DSCP_TC_BW:
ice_add_dscp_tc_bw_tlv(tlv, dcbcfg);
break;
case ICE_TLV_ID_DSCP_TO_PFC:
ice_add_dscp_pfc_tlv(tlv, dcbcfg);
break;
default:
break;
}
}
}
@ -1582,7 +1740,7 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aq_desc desc;
enum ice_status status;
if (!pi)
if (!pi || !pi->root)
return ICE_ERR_PARAM;
cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);

View File

@ -57,6 +57,13 @@
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
#define ICE_DSCP_OUI 0xFFFFFF
#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41
#define ICE_DSCP_SUBTYPE_ENFORCE 0x42
#define ICE_DSCP_SUBTYPE_TCBW 0x43
#define ICE_DSCP_SUBTYPE_PFC 0x44
#define ICE_DSCP_IPV6_OFFSET 80
#define ICE_CEE_SUBTYPE_CTRL 1
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
@ -125,11 +132,20 @@
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
#define ICE_TLV_ID_DSCP_UP 3
#define ICE_TLV_ID_DSCP_ENF 4
#define ICE_TLV_ID_DSCP_TC_BW 5
#define ICE_TLV_ID_DSCP_TO_PFC 6
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
#define ICE_DSCP_UP_TLV_LEN 148
#define ICE_DSCP_ENF_TLV_LEN 132
#define ICE_DSCP_TC_BW_TLV_LEN 25
#define ICE_DSCP_PFC_TLV_LEN 6
#pragma pack(1)
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
@ -222,11 +238,10 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
enum ice_status
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfcmode_set, u8 *pfcmode_ret,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
u8 ice_get_dcbx_status(struct ice_hw *hw);
enum ice_status

View File

@ -50,12 +50,24 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
#define ICE_DEV_ID_E823C_BACKPLANE 0x188A
/* Intel(R) Ethernet Connection E823-C for QSFP */
#define ICE_DEV_ID_E823C_QSFP 0x188B
/* Intel(R) Ethernet Connection E823-C for SFP */
#define ICE_DEV_ID_E823C_SFP 0x188C
/* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E823C_10G_BASE_T 0x188D
/* Intel(R) Ethernet Connection E823-C 1GbE */
#define ICE_DEV_ID_E823C_SGMII 0x188E
/* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */

View File

@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "0.29.4-k";
const uint8_t ice_major_version = 0;
const uint8_t ice_minor_version = 29;
const uint8_t ice_patch_version = 4;
const char ice_driver_version[] = "1.34.2-k";
const uint8_t ice_major_version = 1;
const uint8_t ice_minor_version = 34;
const uint8_t ice_patch_version = 2;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 0.29.4-k")
PVID(vendor, devid, name " - 1.34.2-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.29.4-k")
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.34.2-k")
/**
* @var ice_vendor_info_array
@ -133,9 +133,6 @@ static pci_vendor_info_t ice_vendor_info_array[] = {
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0008, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0009, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 2.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x000C, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"),
@ -169,6 +166,16 @@ static pci_vendor_info_t ice_vendor_info_array[] = {
"Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE,
"Intel(R) Ethernet Connection E823-L 1GbE"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE,
"Intel(R) Ethernet Connection E823-C for backplane"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP,
"Intel(R) Ethernet Connection E823-C for QSFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP,
"Intel(R) Ethernet Connection E823-C for SFP"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T,
"Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII,
"Intel(R) Ethernet Connection E823-C 1GbE"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE,
"Intel(R) Ethernet Controller E810-XXV for backplane"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP,

View File

@ -66,6 +66,8 @@ enum feat_list {
ICE_FEATURE_LINK_MGMT_VER_1,
ICE_FEATURE_LINK_MGMT_VER_2,
ICE_FEATURE_HEALTH_STATUS,
ICE_FEATURE_FW_LOGGING,
ICE_FEATURE_HAS_PBA,
/* Must be last entry */
ICE_FEATURE_COUNT
};

View File

@ -365,7 +365,6 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
return NULL;
/* cppcheck-suppress nullPointer */
if (index > ICE_MAX_BST_TCAMS_IN_BUF)
return NULL;
@ -437,7 +436,6 @@ ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
if (!section)
return NULL;
/* cppcheck-suppress nullPointer */
if (index > ICE_MAX_LABELS_IN_BUF)
return NULL;
@ -1160,7 +1158,7 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
LE32_TO_CPU(ice_buf_tbl->buf_count));
ice_cache_vlan_mode(hw);
ice_post_pkg_dwnld_vlan_mode_cfg(hw);
return status;
}
@ -1181,8 +1179,13 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
if (!pkg_hdr)
return ICE_ERR_PARAM;
hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810;
ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
hw->pkg_seg_id);
seg_hdr = (struct ice_generic_seg_hdr *)
ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
if (seg_hdr) {
struct ice_meta_sect *meta;
struct ice_pkg_enum state;
@ -1449,7 +1452,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
}
/* find ICE segment in given package */
*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
*seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
ospkg);
if (!*seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
@ -2811,7 +2814,6 @@ ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
count++;
LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
chk_count++;
/* cppcheck-suppress knownConditionTrueFalse */
if (!count || count != chk_count)
return false;

View File

@ -61,7 +61,7 @@ struct ice_pkg_hdr {
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
#define SEGMENT_TYPE_ICE_E810 0x00000010
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
@ -226,6 +226,8 @@ struct ice_buf_hdr {
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010

View File

@ -159,11 +159,11 @@ static const u32 ice_ptypes_macvlan_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
* include IPV4 other PTYPEs
/* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
* does NOT include IPV4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
0x1D800000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -173,11 +173,11 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv4 header, includes
* IPV4 other PTYPEs
/* Packet types for packets with an Outer/First/Single non-frag IPv4 header,
* includes IPV4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos_all[] = {
0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
0x1D800000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -199,11 +199,11 @@ static const u32 ice_ptypes_ipv4_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
* include IVP6 other PTYPEs
/* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
* does NOT include IVP6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x00000000, 0x76000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -213,11 +213,11 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv6 header, includes
* IPV6 other PTYPEs
/* Packet types for packets with an Outer/First/Single non-frag IPv6 header,
* includes IPV6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos_all[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x00000000, 0x76000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -239,9 +239,11 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
/* Packet types for packets with an Outer/First/Single
* non-frag IPv4 header - no L4
*/
static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
0x10C00000, 0x04000800, 0x00000000, 0x00000000,
0x10800000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -263,9 +265,11 @@ static const u32 ice_ptypes_ipv4_il_no_l4[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
/* Packet types for packets with an Outer/First/Single
* non-frag IPv6 header - no L4
*/
static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x43000000, 0x10002000,
0x00000000, 0x00000000, 0x42000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -1298,9 +1302,19 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
/* set outer most header */
if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_FRAG |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_FRAG |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_GRE |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_GRE |
ICE_FLOW_SEG_HDR_IPV_OTHER;
if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)

View File

@ -87,9 +87,10 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
* ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
* ICE_FLOW_SEG_HDR_IPV6.
*/
ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000,
ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000,
};
enum ice_flow_field {
@ -193,6 +194,10 @@ enum ice_rss_cfg_hdr_type {
/* take inner headers as inputset for packet with outer ipv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
/* take inner as inputset for GTPoGRE with outer ipv4 + gre. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
/* take inner as inputset for GTPoGRE with outer ipv6 + gre. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS
};

View File

@ -0,0 +1,425 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
/**
* @file ice_fw_logging.c
* @brief firmware logging sysctls
*
* Contains sysctls to enable and configure firmware logging debug support.
*/
#include "ice_lib.h"
#include "ice_iflib.h"
#include <sys/queue.h>
#include <sys/sdt.h>
/*
* SDT provider for DTrace probes related to firmware logging events
*/
SDT_PROVIDER_DEFINE(ice_fwlog);
/*
* SDT DTrace probe fired when a firmware log message is received over the
* AdminQ. It passes the buffer of the firwmare log message along with its
* length in bytes to the DTrace framework.
*/
SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int");
/*
* Helper function prototypes
*/
static int ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg);
/*
* dynamic sysctl handlers
*/
static int ice_sysctl_fwlog_set_cfg_options(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_fwlog_log_resolution(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS);
/**
* ice_reconfig_fw_log - Re-program firmware logging configuration
* @sc: private softc structure
* @cfg: firmware log configuration to latch
*
* If the adminq is currently active, ask firmware to update the logging
* configuration. If the adminq is currently down, then do nothing. In this
* case, ice_init_hw() will re-configure firmware logging as soon as it brings
* up the adminq.
*/
static int
ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg)
{
enum ice_status status;
ice_fwlog_init(&sc->hw, cfg);
if (!ice_check_sq_alive(&sc->hw, &sc->hw.adminq))
return (0);
if (!ice_fwlog_supported(&sc->hw))
return (0);
status = ice_fwlog_set(&sc->hw, cfg);
if (status) {
device_printf(sc->dev,
"Failed to reconfigure firmware logging, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(sc->hw.adminq.sq_last_status));
return (ENODEV);
}
return (0);
}
#define ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION \
"\nControl firmware message limit to send per ARQ event" \
"\t\nMin: 1" \
"\t\nMax: 128"
#define ICE_SYSCTL_HELP_FWLOG_ARQ_ENA \
"\nControl whether to enable/disable reporing to admin Rx queue" \
"\n0 - Enable firmware reporting via ARQ" \
"\n1 - Disable firmware reporting via ARQ"
#define ICE_SYSCTL_HELP_FWLOG_UART_ENA \
"\nControl whether to enable/disable reporing to UART" \
"\n0 - Enable firmware reporting via UART" \
"\n1 - Disable firmware reporting via UART"
#define ICE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD \
"\nControl whether to enable logging during the attach phase" \
"\n0 - Enable firmware logging during attach phase" \
"\n1 - Disable firmware logging during attach phase"
#define ICE_SYSCTL_HELP_FWLOG_REGISTER \
"\nControl whether to enable/disable firmware logging" \
"\n0 - Enable firmware logging" \
"\n1 - Disable firmware logging"
#define ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY \
"\nControl the level of log output messages for this module" \
"\n\tverbose <4> - Verbose messages + (Error|Warning|Normal)" \
"\n\tnormal <3> - Normal messages + (Error|Warning)" \
"\n\twarning <2> - Warning messages + (Error)" \
"\n\terror <1> - Error messages" \
"\n\tnone <0> - Disables all logging for this module"
/**
* ice_sysctl_fwlog_set_cfg_options - Sysctl for setting fwlog cfg options
* @oidp: sysctl oid structure
* @arg1: private softc structure
* @arg2: option to adjust
* @req: sysctl request pointer
*
* On read: displays whether firmware logging was reported during attachment
* On write: enables/disables firmware logging during attach phase
*
* This has no effect on the legacy (V1) version of firmware logging.
*/
static int
ice_sysctl_fwlog_set_cfg_options(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
int error;
u16 option = (u16)arg2;
bool enabled;
enabled = !!(cfg->options & option);
error = sysctl_handle_bool(oidp, &enabled, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if (enabled)
cfg->options |= option;
else
cfg->options &= ~option;
return ice_reconfig_fw_log(sc, cfg);
}
/**
* ice_sysctl_fwlog_log_resolution - Sysctl for setting log message resolution
* @oidp: sysctl oid structure
* @arg1: private softc structure
* @arg2: __unused__
* @req: sysctl request pointer
*
* On read: displays message queue limit before posting
* On write: sets message queue limit before posting
*
* This has no effect on the legacy (V1) version of firmware logging.
*/
static int
ice_sysctl_fwlog_log_resolution(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
int error;
u8 resolution;
UNREFERENCED_PARAMETER(arg2);
resolution = cfg->log_resolution;
error = sysctl_handle_8(oidp, &resolution, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if ((resolution < ICE_AQC_FW_LOG_MIN_RESOLUTION) ||
(resolution > ICE_AQC_FW_LOG_MAX_RESOLUTION)) {
device_printf(sc->dev, "Log resolution out-of-bounds\n");
return (EINVAL);
}
cfg->log_resolution = resolution;
return ice_reconfig_fw_log(sc, cfg);
}
/**
* ice_sysctl_fwlog_register - Sysctl for (de)registering firmware logs
* @oidp: sysctl oid structure
* @arg1: private softc structure
* @arg2: __unused__
* @req: sysctl request pointer
*
* On read: displays whether firmware logging is registered
* On write: (de)registers firmware logging.
*/
static int
ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
enum ice_status status;
int error;
u8 enabled;
UNREFERENCED_PARAMETER(arg2);
if (ice_test_state(&sc->state, ICE_STATE_ATTACHING)) {
device_printf(sc->dev, "Registering FW Logging via kenv is supported with the on_load option\n");
return (EIO);
}
if (cfg->options & ICE_FWLOG_OPTION_IS_REGISTERED)
enabled = true;
else
enabled = false;
error = sysctl_handle_bool(oidp, &enabled, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
if (!ice_check_sq_alive(&sc->hw, &sc->hw.adminq))
return (0);
if (enabled) {
status = ice_fwlog_register(&sc->hw);
if (!status)
ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en);
} else {
status = ice_fwlog_unregister(&sc->hw);
if (!status)
ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en);
}
if (status)
return (EIO);
return (0);
}
/**
* ice_sysctl_fwlog_module_log_severity - Add tunables for a FW logging module
* @oidp: sysctl oid structure
* @arg1: private softc structure
* @arg2: index to logging module
* @req: sysctl request pointer
*/
static int
ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
struct sbuf *sbuf;
char *sev_str_end;
enum ice_aqc_fw_logging_mod module = (enum ice_aqc_fw_logging_mod)arg2;
int error, ll_num;
u8 log_level;
char sev_str[16];
bool sev_set = false;
log_level = cfg->module_entries[module].log_level;
sbuf = sbuf_new(NULL, sev_str, sizeof(sev_str), SBUF_FIXEDLEN);
sbuf_printf(sbuf, "%d<%s>", log_level, ice_log_sev_str(log_level));
sbuf_finish(sbuf);
sbuf_delete(sbuf);
error = sysctl_handle_string(oidp, sev_str, sizeof(sev_str), req);
if ((error) || (req->newptr == NULL))
return (error);
if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_VERBOSE), sev_str) == 0) {
log_level = ICE_FWLOG_LEVEL_VERBOSE;
sev_set = true;
} else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_NORMAL), sev_str) == 0) {
log_level = ICE_FWLOG_LEVEL_NORMAL;
sev_set = true;
} else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_WARNING), sev_str) == 0) {
log_level = ICE_FWLOG_LEVEL_WARNING;
sev_set = true;
} else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_ERROR), sev_str) == 0) {
log_level = ICE_FWLOG_LEVEL_ERROR;
sev_set = true;
} else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_NONE), sev_str) == 0) {
log_level = ICE_FWLOG_LEVEL_NONE;
sev_set = true;
}
if (!sev_set) {
ll_num = strtol(sev_str, &sev_str_end, 0);
if (sev_str_end == sev_str)
ll_num = -1;
if ((ll_num >= ICE_FWLOG_LEVEL_NONE) &&
(ll_num < ICE_FWLOG_LEVEL_INVALID))
log_level = ll_num;
else {
device_printf(sc->dev,
"%s: \"%s\" is not a valid log level\n",
__func__, sev_str);
return (EINVAL);
}
}
cfg->module_entries[module].log_level = log_level;
return ice_reconfig_fw_log(sc, cfg);
}
/**
* ice_add_fw_logging_tunables - Add tunables to configure FW logging events
* @sc: private softc structure
* @parent: parent node to add the tunables under
*
* Add tunables for configuring the firmware logging support. This includes
* a control to enable the logging, and controls for each module to configure
* which events to receive.
*/
void
ice_add_fw_logging_tunables(struct ice_softc *sc, struct sysctl_oid *parent)
{
struct sysctl_oid_list *parent_list, *fwlog_list, *module_list;
struct sysctl_oid *fwlog_node, *module_node;
struct sysctl_ctx_list *ctx;
struct ice_hw *hw = &sc->hw;
struct ice_fwlog_cfg *cfg;
device_t dev = sc->dev;
enum ice_aqc_fw_logging_mod module;
u16 i;
cfg = &hw->fwlog_cfg;
ctx = device_get_sysctl_ctx(dev);
parent_list = SYSCTL_CHILDREN(parent);
fwlog_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "fw_log",
ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL,
"Firmware Logging");
fwlog_list = SYSCTL_CHILDREN(fwlog_node);
cfg->log_resolution = 10;
SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "log_resolution",
ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
0, ice_sysctl_fwlog_log_resolution,
"CU", ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION);
cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "arq_en",
ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
ICE_FWLOG_OPTION_ARQ_ENA, ice_sysctl_fwlog_set_cfg_options,
"CU", ICE_SYSCTL_HELP_FWLOG_ARQ_ENA);
SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "uart_en",
ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
ICE_FWLOG_OPTION_UART_ENA, ice_sysctl_fwlog_set_cfg_options,
"CU", ICE_SYSCTL_HELP_FWLOG_UART_ENA);
SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "on_load",
ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
ICE_FWLOG_OPTION_REGISTER_ON_INIT, ice_sysctl_fwlog_set_cfg_options,
"CU", ICE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD);
SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "register",
ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
0, ice_sysctl_fwlog_register,
"CU", ICE_SYSCTL_HELP_FWLOG_REGISTER);
module_node = SYSCTL_ADD_NODE(ctx, fwlog_list, OID_AUTO, "severity",
ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL,
"Level of log output");
module_list = SYSCTL_CHILDREN(module_node);
for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
/* Setup some defaults */
cfg->module_entries[i].module_id = i;
cfg->module_entries[i].log_level = ICE_FWLOG_LEVEL_NONE;
module = (enum ice_aqc_fw_logging_mod)i;
SYSCTL_ADD_PROC(ctx, module_list,
OID_AUTO, ice_fw_module_str(module),
ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RWTUN, sc,
module, ice_sysctl_fwlog_module_log_severity,
"A", ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY);
}
}
/**
* ice_handle_fw_log_event - Handle a firmware logging event from the AdminQ
* @sc: pointer to private softc structure
* @desc: the AdminQ descriptor for this firmware event
* @buf: pointer to the buffer accompanying the AQ message
*/
void
ice_handle_fw_log_event(struct ice_softc *sc, struct ice_aq_desc *desc,
void *buf)
{
/* Trigger a DTrace probe event for this firmware message */
SDT_PROBE2(ice_fwlog, , , message, (const u8 *)buf, desc->datalen);
/* Possibly dump the firmware message to the console, if enabled */
ice_fwlog_event_dump(&sc->hw, desc, buf);
}

505
sys/dev/ice/ice_fwlog.c Normal file
View File

@ -0,0 +1,505 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "ice_common.h"
#include "ice_fwlog.h"
/**
* cache_cfg - Cache FW logging config
* @hw: pointer to the HW structure
* @cfg: config to cache
*/
static void cache_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
hw->fwlog_cfg = *cfg;
}
/**
* valid_module_entries - validate all the module entry IDs and log levels
* @hw: pointer to the HW structure
* @entries: entries to validate
* @num_entries: number of entries to validate
*/
static bool
valid_module_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
u16 num_entries)
{
u16 i;
if (!entries) {
ice_debug(hw, ICE_DBG_FW_LOG, "Null ice_fwlog_module_entry array\n");
return false;
}
if (!num_entries) {
ice_debug(hw, ICE_DBG_FW_LOG, "num_entries must be non-zero\n");
return false;
}
for (i = 0; i < num_entries; i++) {
struct ice_fwlog_module_entry *entry = &entries[i];
if (entry->module_id >= ICE_AQC_FW_LOG_ID_MAX) {
ice_debug(hw, ICE_DBG_FW_LOG, "Invalid module_id %u, max valid module_id is %u\n",
entry->module_id, ICE_AQC_FW_LOG_ID_MAX - 1);
return false;
}
if (entry->log_level >= ICE_FWLOG_LEVEL_INVALID) {
ice_debug(hw, ICE_DBG_FW_LOG, "Invalid log_level %u, max valid log_level is %u\n",
entry->log_level,
ICE_AQC_FW_LOG_ID_MAX - 1);
return false;
}
}
return true;
}
/**
* valid_cfg - validate entire configuration
* @hw: pointer to the HW structure
* @cfg: config to validate
*/
static bool valid_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
if (!cfg) {
ice_debug(hw, ICE_DBG_FW_LOG, "Null ice_fwlog_cfg\n");
return false;
}
if (cfg->log_resolution < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
cfg->log_resolution > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
ice_debug(hw, ICE_DBG_FW_LOG, "Unsupported log_resolution %u, must be between %u and %u\n",
cfg->log_resolution, ICE_AQC_FW_LOG_MIN_RESOLUTION,
ICE_AQC_FW_LOG_MAX_RESOLUTION);
return false;
}
if (!valid_module_entries(hw, cfg->module_entries,
ICE_AQC_FW_LOG_ID_MAX))
return false;
return true;
}
/**
* ice_fwlog_init - Initialize cached structures for tracking FW logging
* @hw: pointer to the HW structure
* @cfg: config used to initialize the cached structures
*
* This function should be called on driver initialization and before calling
* ice_init_hw(). Firmware logging will be configured based on these settings
* and also the PF will be registered on init.
*/
enum ice_status
ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
if (!valid_cfg(hw, cfg))
return ICE_ERR_PARAM;
cache_cfg(hw, cfg);
return ICE_SUCCESS;
}
/**
* ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
* @hw: pointer to the HW structure
* @entries: entries to configure
* @num_entries: number of @entries
* @options: options from ice_fwlog_cfg->options structure
* @log_resolution: logging resolution
*/
static enum ice_status
ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
u16 num_entries, u16 options, u16 log_resolution)
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc;
enum ice_status status;
u16 i;
fw_modules = (struct ice_aqc_fw_log_cfg_resp *)
ice_calloc(hw, num_entries, sizeof(*fw_modules));
if (!fw_modules)
return ICE_ERR_NO_MEMORY;
for (i = 0; i < num_entries; i++) {
fw_modules[i].module_identifier =
CPU_TO_LE16(entries[i].module_id);
fw_modules[i].log_level = entries[i].log_level;
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
cmd = &desc.params.fw_log;
cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
cmd->ops.cfg.log_resolution = CPU_TO_LE16(log_resolution);
cmd->ops.cfg.mdl_cnt = CPU_TO_LE16(num_entries);
if (options & ICE_FWLOG_OPTION_ARQ_ENA)
cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
if (options & ICE_FWLOG_OPTION_UART_ENA)
cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
status = ice_aq_send_cmd(hw, &desc, fw_modules,
sizeof(*fw_modules) * num_entries,
NULL);
ice_free(hw, fw_modules);
return status;
}
/**
* ice_fwlog_supported - Cached for whether FW supports FW logging or not
* @hw: pointer to the HW structure
*
* This will always return false if called before ice_init_hw(), so it must be
* called after ice_init_hw().
*/
bool ice_fwlog_supported(struct ice_hw *hw)
{
return hw->fwlog_support_ena;
}
/**
* ice_fwlog_set - Set the firmware logging settings
* @hw: pointer to the HW structure
* @cfg: config used to set firmware logging
*
* This function should be called whenever the driver needs to set the firmware
* logging configuration. It can be called on initialization, reset, or during
* runtime.
*
* If the PF wishes to receive FW logging then it must register via
* ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
* for init.
*/
enum ice_status
ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
enum ice_status status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
if (!valid_cfg(hw, cfg))
return ICE_ERR_PARAM;
status = ice_aq_fwlog_set(hw, cfg->module_entries,
ICE_AQC_FW_LOG_ID_MAX, cfg->options,
cfg->log_resolution);
if (!status)
cache_cfg(hw, cfg);
return status;
}
/**
* update_cached_entries - Update module entries in cached FW logging config
* @hw: pointer to the HW structure
* @entries: entries to cache
* @num_entries: number of @entries
*/
static void
update_cached_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
u16 num_entries)
{
u16 i;
for (i = 0; i < num_entries; i++) {
struct ice_fwlog_module_entry *updated = &entries[i];
u16 j;
for (j = 0; j < ICE_AQC_FW_LOG_ID_MAX; j++) {
struct ice_fwlog_module_entry *cached =
&hw->fwlog_cfg.module_entries[j];
if (cached->module_id == updated->module_id) {
cached->log_level = updated->log_level;
break;
}
}
}
}
/**
* ice_fwlog_update_modules - Update the log level 1 or more FW logging modules
* @hw: pointer to the HW structure
* @entries: array of ice_fwlog_module_entry(s)
* @num_entries: number of entries
*
* This function should be called to update the log level of 1 or more FW
* logging modules via module ID.
*
* Only the entries passed in will be affected. All other firmware logging
* settings will be unaffected.
*/
enum ice_status
ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries,
u16 num_entries)
{
struct ice_fwlog_cfg *cfg;
enum ice_status status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
if (!valid_module_entries(hw, entries, num_entries))
return ICE_ERR_PARAM;
cfg = (struct ice_fwlog_cfg *)ice_calloc(hw, 1, sizeof(*cfg));
if (!cfg)
return ICE_ERR_NO_MEMORY;
status = ice_fwlog_get(hw, cfg);
if (status)
goto status_out;
status = ice_aq_fwlog_set(hw, entries, num_entries, cfg->options,
cfg->log_resolution);
if (!status)
update_cached_entries(hw, entries, num_entries);
status_out:
ice_free(hw, cfg);
return status;
}
/**
* ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
* @hw: pointer to the HW structure
* @reg: true to register and false to unregister
*/
static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
if (reg)
desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_fwlog_register - Register the PF for firmware logging
* @hw: pointer to the HW structure
*
* After this call the PF will start to receive firmware logging based on the
* configuration set in ice_fwlog_set.
*/
enum ice_status ice_fwlog_register(struct ice_hw *hw)
{
enum ice_status status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
status = ice_aq_fwlog_register(hw, true);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
else
hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
return status;
}
/**
* ice_fwlog_unregister - Unregister the PF from firmware logging
* @hw: pointer to the HW structure
*/
enum ice_status ice_fwlog_unregister(struct ice_hw *hw)
{
enum ice_status status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
status = ice_aq_fwlog_register(hw, false);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
else
hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
return status;
}
/**
* ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
* @hw: pointer to the HW structure
* @cfg: firmware logging configuration to populate
*/
static enum ice_status
ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc;
enum ice_status status;
u16 i, module_id_cnt;
void *buf;
ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
buf = ice_calloc(hw, 1, ICE_AQ_MAX_BUF_LEN);
if (!buf)
return ICE_ERR_NO_MEMORY;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
cmd = &desc.params.fw_log;
cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
if (status) {
ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
goto status_out;
}
module_id_cnt = LE16_TO_CPU(cmd->ops.cfg.mdl_cnt);
if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
} else {
if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX)
ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
ICE_AQC_FW_LOG_ID_MAX);
module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
}
cfg->log_resolution = LE16_TO_CPU(cmd->ops.cfg.log_resolution);
if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
for (i = 0; i < module_id_cnt; i++) {
struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
cfg->module_entries[i].module_id =
LE16_TO_CPU(fw_module->module_identifier);
cfg->module_entries[i].log_level = fw_module->log_level;
}
status_out:
ice_free(hw, buf);
return status;
}
/**
* ice_fwlog_set_support_ena - Set if FW logging is supported by FW
* @hw: pointer to the HW struct
*
* If FW returns success to the ice_aq_fwlog_get call then it supports FW
* logging, else it doesn't. Set the fwlog_support_ena flag accordingly.
*
* This function is only meant to be called during driver init to determine if
* the FW support FW logging.
*/
void ice_fwlog_set_support_ena(struct ice_hw *hw)
{
struct ice_fwlog_cfg *cfg;
enum ice_status status;
hw->fwlog_support_ena = false;
cfg = (struct ice_fwlog_cfg *)ice_calloc(hw, 1, sizeof(*cfg));
if (!cfg)
return;
/* don't call ice_fwlog_get() because that would overwrite the cached
* configuration from the call to ice_fwlog_init(), which is expected to
* be called prior to this function
*/
status = ice_aq_fwlog_get(hw, cfg);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "ice_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
status);
else
hw->fwlog_support_ena = true;
ice_free(hw, cfg);
}
/**
* ice_fwlog_get - Get the firmware logging settings
* @hw: pointer to the HW structure
* @cfg: config to populate based on current firmware logging settings
*/
enum ice_status
ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
enum ice_status status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
if (!cfg)
return ICE_ERR_PARAM;
status = ice_aq_fwlog_get(hw, cfg);
if (status)
return status;
cache_cfg(hw, cfg);
return ICE_SUCCESS;
}
/**
* ice_fwlog_event_dump - Dump the event received over the Admin Receive Queue
* @hw: pointer to the HW structure
* @desc: Admin Receive Queue descriptor
* @buf: buffer that contains the FW log event data
*
* If the driver receives the ice_aqc_opc_fw_logs_event on the Admin Receive
* Queue, then it should call this function to dump the FW log data.
*/
void
ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
{
if (!ice_fwlog_supported(hw))
return;
ice_info_fwlog(hw, 32, 1, (u8 *)buf, LE16_TO_CPU(desc->datalen));
}

91
sys/dev/ice/ice_fwlog.h Normal file
View File

@ -0,0 +1,91 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_FWLOG_H_
#define _ICE_FWLOG_H_
#include "ice_adminq_cmd.h"
struct ice_hw;
/* Only a single log level should be set and all log levels under the set value
* are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
* other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
*/
enum ice_fwlog_level {
ICE_FWLOG_LEVEL_NONE = 0,
ICE_FWLOG_LEVEL_ERROR = 1,
ICE_FWLOG_LEVEL_WARNING = 2,
ICE_FWLOG_LEVEL_NORMAL = 3,
ICE_FWLOG_LEVEL_VERBOSE = 4,
ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
};
struct ice_fwlog_module_entry {
/* module ID for the corresponding firmware logging event */
u16 module_id;
/* verbosity level for the module_id */
u8 log_level;
};
struct ice_fwlog_cfg {
/* list of modules for configuring log level */
struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
/* set before calling ice_fwlog_init() so the PF registers for firmware
* logging on initialization
*/
#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
/* set in the ice_fwlog_get() response if the PF is registered for FW
* logging events over ARQ
*/
#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
/* options used to configure firmware logging */
u16 options;
/* minimum number of log events sent per Admin Receive Queue event */
u8 log_resolution;
};
void ice_fwlog_set_support_ena(struct ice_hw *hw);
bool ice_fwlog_supported(struct ice_hw *hw);
enum ice_status ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
enum ice_status ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
enum ice_status ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
enum ice_status
ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries,
u16 num_entries);
enum ice_status ice_fwlog_register(struct ice_hw *hw);
enum ice_status ice_fwlog_unregister(struct ice_hw *hw);
void
ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
#endif /* _ICE_FWLOG_H_ */

View File

@ -30,10 +30,20 @@
*/
/*$FreeBSD$*/
/* Machine-generated file; do not edit */
/* Machine generated file. Do not edit. */
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
#define GL_HICR 0x00082040
#define GL_HICR_EN 0x00082044
#define GLGEN_CSR_DEBUG_C 0x00075750
#define GLNVM_GENS 0x000B6100
#define GLNVM_FLA 0x000B6108
#define GL_HIDA_MAX_INDEX 15
#define GL_HIBA_MAX_INDEX 1023
#define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */
#define GL_RDPU_CNTRL_RX_PAD_EN_S 0
#define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0)
@ -476,8 +486,8 @@
#define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23)
#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24
#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24)
#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26
#define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26)
#define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27
@ -540,8 +550,8 @@
#define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23)
#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24
#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24)
#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26
#define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26)
#define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27
@ -589,8 +599,8 @@
#define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23)
#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24
#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24)
#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25
#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25)
#define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26
#define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26)
#define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27
@ -730,8 +740,8 @@
#define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M MAKEMASK(0x1F, 8)
#define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15
#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0
#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M MAKEMASK(0xF, 0)
#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0
#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M MAKEMASK(0xF, 0)
#define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7
#define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0
@ -889,8 +899,8 @@
#define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6)
#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7
#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M MAKEMASK(0x7F, 7)
#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14
#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M MAKEMASK(0xFF, 14)
#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14
#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M MAKEMASK(0xFF, 14)
#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22
#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M MAKEMASK(0x3FF, 22)
#define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
@ -2233,8 +2243,8 @@
#define PRTDCB_TX_DSCP2UP_CTL 0x00040980 /* Reset Source: CORER */
#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_S 0
#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_M BIT(0)
#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1
#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M MAKEMASK(0x7, 1)
#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1
#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M MAKEMASK(0x7, 1)
#define PRTDCB_TX_DSCP2UP_IPV4_LUT(_i) (0x000409A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */
#define PRTDCB_TX_DSCP2UP_IPV4_LUT_MAX_INDEX 7
#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_S 0
@ -2382,8 +2392,8 @@
#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0
#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS 0x00099320 /* Reset Source: CORER */
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0
#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
#define TPB_WB_RL_TC_CFG(_i) (0x00099360 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define TPB_WB_RL_TC_CFG_MAX_INDEX 31
#define TPB_WB_RL_TC_CFG_TOKENS_S 0
@ -2448,8 +2458,8 @@
#define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_ACLEXT_FORCE_PID_MAX_INDEX 2
#define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0
@ -2642,8 +2652,8 @@
#define GL_PREEXT_FORCE_L1CDID_MAX_INDEX 2
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_S 0
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_PREEXT_FORCE_PID(_i) (0x0020F000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FORCE_PID_MAX_INDEX 2
#define GL_PREEXT_FORCE_PID_STATIC_PID_S 0
@ -2844,8 +2854,8 @@
#define GL_PSTEXT_FORCE_L1CDID_MAX_INDEX 2
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_S 0
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
#define GL_PSTEXT_FORCE_PID(_i) (0x0020E000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FORCE_PID_MAX_INDEX 2
#define GL_PSTEXT_FORCE_PID_STATIC_PID_S 0
@ -3012,10 +3022,10 @@
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_M MAKEMASK(0x7, 4)
#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_S 8
#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_M MAKEMASK(0x7, 8)
#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12
#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M MAKEMASK(0x3, 12)
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M MAKEMASK(0x3, 14)
#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12
#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M MAKEMASK(0x3, 12)
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14
#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M MAKEMASK(0x3, 14)
#define GLFLXP_RX_CMD_PROTIDS(_i, _j) (0x0045A000 + ((_i) * 4 + (_j) * 1024)) /* _i=0...255, _j=0...5 */ /* Reset Source: CORER */
#define GLFLXP_RX_CMD_PROTIDS_MAX_INDEX 255
#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_S 0
@ -3094,8 +3104,8 @@
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_M MAKEMASK(0xFF, 0)
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_S 8
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_M MAKEMASK(0x1F, 8)
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M MAKEMASK(0xFF, 16)
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16
#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M MAKEMASK(0xFF, 16)
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_S 24
#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_M MAKEMASK(0x1F, 24)
#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
@ -3308,18 +3318,18 @@
#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M MAKEMASK(0xF, 5)
#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9
#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M MAKEMASK(0x1F, 9)
#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14
#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14)
#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14
#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14)
#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_S 16
#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_M MAKEMASK(0xF, 16)
#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20
#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20)
#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20
#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20)
#define GLGEN_ANA_TX_ABORT_PTYPE 0x0020D21C /* Reset Source: CORER */
#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_S 0
#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_M MAKEMASK(0x3FF, 0)
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT 0x0020D208 /* Reset Source: CORER */
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0)
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0
#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0)
#define GLGEN_ANA_TX_CFG_CTRL 0x0020D104 /* Reset Source: CORER */
#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_S 0
#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_M MAKEMASK(0x3FFFF, 0)
@ -3345,10 +3355,10 @@
#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_S 0
#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_M MAKEMASK(0xFFFFFFFF, 0)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT 0x0020D15C /* Reset Source: CORER */
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1)
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_S 4
#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4)
#define GLGEN_ANA_TX_CFG_WRDATA 0x0020D108 /* Reset Source: CORER */
@ -3667,8 +3677,8 @@
#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_S 0
#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_FWSDDATAHIGH_FPMAT 0x00102078 /* Reset Source: CORER */
#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_FWSDDATALOW 0x00522074 /* Reset Source: CORER */
#define GLHMC_FWSDDATALOW_PMSDVALID_S 0
#define GLHMC_FWSDDATALOW_PMSDVALID_M BIT(0)
@ -4066,8 +4076,8 @@
#define GLHMC_VFPEMRCNT_FPMPEMRSZ_M MAKEMASK(0x1FFFFFFF, 0)
#define GLHMC_VFPEOOISCBASE(_i) (0x0052E600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPEOOISCBASE_MAX_INDEX 31
#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0
#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0
#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFPEOOISCCNT(_i) (0x0052E700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPEOOISCCNT_MAX_INDEX 31
#define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_S 0
@ -4114,8 +4124,8 @@
#define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFPERRFFLBASE(_i) (0x0052EA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPERRFFLBASE_MAX_INDEX 31
#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0
#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0
#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFPETIMERBASE(_i) (0x0052DA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFPETIMERBASE_MAX_INDEX 31
#define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_S 0
@ -4142,8 +4152,8 @@
#define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFSDDATAHIGH_FPMAT(_i) (0x00108200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFSDDATAHIGH_FPMAT_MAX_INDEX 31
#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLHMC_VFSDDATALOW(_i) (0x00528100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLHMC_VFSDDATALOW_MAX_INDEX 31
#define GLHMC_VFSDDATALOW_PMSDVALID_S 0
@ -4249,8 +4259,8 @@
#define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_M BIT(7)
#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_S 8
#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_M MAKEMASK(0xF, 8)
#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16
#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16)
#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16
#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16)
#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_S 31
#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_M BIT(31)
#define PFHMC_PDINV 0x00520300 /* Reset Source: PFR */
@ -4337,8 +4347,8 @@
#define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_M BIT(11)
#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_S 12
#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_M BIT(12)
#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13
#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13)
#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13
#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13)
#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_S 14
#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_M BIT(14)
#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_S 15
@ -5233,10 +5243,10 @@
#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0
#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0)
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0)
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0
#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0)
#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */
@ -5337,10 +5347,10 @@
#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_M BIT(17)
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_S 18
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_M BIT(18)
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19)
#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20
#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20)
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19
#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19)
#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20
#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20)
#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_S 21
#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_M BIT(21)
#define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_S 22
@ -5359,8 +5369,8 @@
#define GL_MDCK_TX_TDPU 0x00049348 /* Reset Source: CORER */
#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_S 0
#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_M BIT(0)
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_S 2
#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_M BIT(2)
#define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_S 3
@ -5373,8 +5383,8 @@
#define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_M BIT(6)
#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_S 7
#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_M BIT(7)
#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8
#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8)
#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8
#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8)
#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_S 9
#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_M BIT(9)
#define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_S 10
@ -5456,8 +5466,8 @@
#define VP_MDET_TX_TDPU_VALID_M BIT(0)
#define GENERAL_MNG_FW_DBG_CSR(_i) (0x000B6180 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: POR */
#define GENERAL_MNG_FW_DBG_CSR_MAX_INDEX 9
#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0
#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M MAKEMASK(0xFFFFFFFF, 0)
#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0
#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M MAKEMASK(0xFFFFFFFF, 0)
#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
#define GL_FWRESETCNT_FWRESETCNT_S 0
#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
@ -5869,8 +5879,8 @@
#define GL_XLR_MARKER_TRIG_RCU_PRS 0x002001C0 /* Reset Source: CORER */
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_S 0
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_M MAKEMASK(0x3FF, 0)
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M MAKEMASK(0x3, 10)
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10
#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M MAKEMASK(0x3, 10)
#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_S 12
#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_M MAKEMASK(0x7, 12)
#define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_S 16
@ -6749,11 +6759,11 @@
#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_S 0
#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPES_TCPRXFOURHOLEHI 0x0055E03C /* Reset Source: CORER */
#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0
#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M MAKEMASK(0xFFFFFF, 0)
#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0
#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M MAKEMASK(0xFFFFFF, 0)
#define GLPES_TCPRXFOURHOLELO 0x0055E038 /* Reset Source: CORER */
#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0
#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0
#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPES_TCPRXONEHOLEHI 0x0055E024 /* Reset Source: CORER */
#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_S 0
#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_M MAKEMASK(0xFFFFFF, 0)
@ -8234,7 +8244,7 @@
#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_S 0
#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_M MAKEMASK(0xFFFFFFFF, 0)
#define TPB_PRTTPB_STAT_TC_BYTES_SENT(_i) (0x00099094 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
#define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63
#define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63
#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_S 0
#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_M MAKEMASK(0xFFFFFFFF, 0)
#define EMP_SWT_PRUNIND 0x00204020 /* Reset Source: CORER */
@ -9476,5 +9486,5 @@
#define VFPE_WQEALLOC1_PEQPID_M MAKEMASK(0x3FFFF, 0)
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20)
#endif /* !_ICE_HW_AUTOGEN_H_ */
#endif

View File

@ -137,6 +137,8 @@ struct ice_irq_vector {
* @tx_paddr: the physical address for this queue
* @q_teid: the Tx queue TEID returned from firmware
* @stats: queue statistics
* @tc: traffic class queue belongs to
* @q_handle: qidx in tc; used in TXQ enable functions
*
* Other parameters may be iflib driver specific
*/
@ -151,6 +153,8 @@ struct ice_tx_queue {
struct ice_irq_vector *irqv;
u32 q_teid;
u32 me;
u16 q_handle;
u8 tc;
/* descriptor writeback status */
qidx_t *tx_rsq;
@ -171,6 +175,7 @@ struct ice_tx_queue {
* @rx_paddr: the physical address for this queue
* @tail: the tail register address for this queue
* @stats: queue statistics
* @tc: traffic class queue belongs to
*
* Other parameters may be iflib driver specific
*/
@ -183,6 +188,7 @@ struct ice_rx_queue {
u32 tail;
struct ice_irq_vector *irqv;
u32 me;
u8 tc;
struct if_irq que_irq;
};

View File

@ -55,6 +55,7 @@ static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m);
/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
* advanced 32byte Rx descriptors.
@ -78,6 +79,7 @@ struct if_txrx ice_txrx = {
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
.ift_rxd_refill = ice_ift_rxd_refill,
.ift_rxd_flush = ice_ift_rxd_flush,
.ift_txq_select = ice_ift_queue_select,
};
/**
@ -276,7 +278,7 @@ ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
*
* This function is called by iflib, and executes in ithread context. It is
* called by iflib to obtain data which has been DMA'ed into host memory.
* Returns zero on success, and an error code on failure.
* Returns zero on success, and EBADMSG on failure.
*/
static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
@ -300,8 +302,6 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
status0 = le16toh(cur->wb.status_error0);
plen = le16toh(cur->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
/* we should never be called without a valid descriptor */
MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0);
@ -311,14 +311,6 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
cur->wb.status_error0 = 0;
eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S));
/*
* Make sure packets with bad L2 values are discarded.
* NOTE: Only the EOP descriptor has valid error results.
*/
if (eop && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S))) {
rxq->stats.desc_errs++;
return (EBADMSG);
}
ri->iri_frags[i].irf_flid = 0;
ri->iri_frags[i].irf_idx = cidx;
ri->iri_frags[i].irf_len = plen;
@ -327,19 +319,36 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
i++;
} while (!eop);
/* capture soft statistics for this Rx queue */
rxq->stats.rx_packets++;
rxq->stats.rx_bytes += ri->iri_len;
/* End of Packet reached; cur is eop/last descriptor */
if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
ri->iri_rsstype = ice_ptype_to_hash(ptype);
/* Make sure packets with bad L2 values are discarded.
* This bit is only valid in the last descriptor.
*/
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S)) {
rxq->stats.desc_errs++;
return (EBADMSG);
}
/* Get VLAN tag information if one is in descriptor */
if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
ri->iri_vtag = le16toh(cur->wb.l2tag1);
ri->iri_flags |= M_VLANTAG;
}
/* Capture soft statistics for this Rx queue */
rxq->stats.rx_packets++;
rxq->stats.rx_bytes += ri->iri_len;
/* Get packet type and set checksum flags */
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
/* Set remaining iflib RX descriptor info fields */
ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
ri->iri_rsstype = ice_ptype_to_hash(ptype);
ri->iri_nfrags = i;
return (0);
}
@ -397,3 +406,40 @@ ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
wr32(hw, rxq->tail, pidx);
}
static qidx_t
ice_ift_queue_select(void *arg, struct mbuf *m)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_vsi *vsi = &sc->pf_vsi;
u16 tc_base_queue, tc_qcount;
u8 up, tc;
/* Only go out on default queue if ALTQ is enabled */
if (ALTQ_IS_ENABLED(&ifp->if_snd))
return (0);
if (!ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) {
if (M_HASHTYPE_GET(m)) {
/* Default iflib queue selection method */
return (m->m_pkthdr.flowid % sc->pf_vsi.num_tx_queues);
} else
return (0);
}
/* Use default TC unless overridden */
tc = 0; /* XXX: Get default TC for traffic if >1 TC? */
if (m->m_flags & M_VLANTAG) {
up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag);
tc = sc->hw.port_info->qos_cfg.local_dcbx_cfg.etscfg.prio_table[up];
}
tc_base_queue = vsi->tc_info[tc].qoffset;
tc_qcount = vsi->tc_info[tc].qcount_tx;
if (M_HASHTYPE_GET(m))
return ((m->m_pkthdr.flowid % tc_qcount) + tc_base_queue);
else
return (tc_base_queue);
}

View File

@ -280,7 +280,6 @@ enum ice_rx_l2_ptype {
};
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
u32 outer_ip:1;
u32 outer_ip_ver:2;
@ -794,6 +793,15 @@ enum ice_rx_flex_desc_exstat_bits {
ICE_RX_FLEX_DESC_EXSTAT_OVERSIZE_S = 3,
};
/*
* For ice_32b_rx_flex_desc.ts_low:
* [0]: Timestamp-low validity bit
* [1:7]: Timestamp-low value
*/
#define ICE_RX_FLEX_DESC_TS_L_VALID_S 0x01
#define ICE_RX_FLEX_DESC_TS_L_VALID_M ICE_RX_FLEX_DESC_TS_L_VALID_S
#define ICE_RX_FLEX_DESC_TS_L_M 0xFE
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
@ -940,6 +948,11 @@ struct ice_tx_ctx_desc {
__le64 qw1;
};
#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */
#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */
#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */
#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */
#define ICE_TXD_CTX_QW1_DTYPE_S 0
#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)
@ -1162,8 +1175,7 @@ struct ice_tx_drbell_q_ctx {
/* macro to make the table lines short */
#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
{ PTYPE, \
1, \
{ 1, \
ICE_RX_PTYPE_OUTER_##OUTER_IP, \
ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
ICE_RX_PTYPE_##OUTER_FRAG, \
@ -1173,14 +1185,14 @@ struct ice_tx_drbell_q_ctx {
ICE_RX_PTYPE_INNER_PROT_##I, \
ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
#define ICE_PTT_UNUSED_ENTRY(PTYPE) { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
/* shorter macros makes the table fit but are terse */
#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
/* Lookup table mapping the 10-bit HW PTYPE to the bit field for decoding */
static const struct ice_rx_ptype_decoded ice_ptype_lkup[1024] = {
/* L2 Packet types */
ICE_PTT_UNUSED_ENTRY(0),
ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
@ -2342,7 +2354,7 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
ICE_PTT_UNUSED_ENTRY(1020),
ICE_PTT_UNUSED_ENTRY(1021),
ICE_PTT_UNUSED_ENTRY(1022),
ICE_PTT_UNUSED_ENTRY(1023),
ICE_PTT_UNUSED_ENTRY(1023)
};
static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
@ -2362,5 +2374,4 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
#define ICE_LINK_SPEED_40000MBPS 40000
#define ICE_LINK_SPEED_50000MBPS 50000
#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -151,6 +151,8 @@ struct ice_bar_info {
#define ICE_MSIX_BAR 3
#define ICE_MAX_DCB_TCS 8
#define ICE_DEFAULT_DESC_COUNT 1024
#define ICE_MAX_DESC_COUNT 8160
#define ICE_MIN_DESC_COUNT 64
@ -199,7 +201,7 @@ struct ice_bar_info {
#define ICE_NVM_ACCESS \
(((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
#define ICE_AQ_LEN 512
#define ICE_AQ_LEN 1023
#define ICE_MBXQ_LEN 512
#define ICE_SBQ_LEN 512
@ -246,6 +248,11 @@ struct ice_bar_info {
#define ICE_DEFAULT_VF_QUEUES 4
/*
* The maximum number of RX queues allowed per TC in a VSI.
*/
#define ICE_MAX_RXQS_PER_TC 256
/*
* There are three settings that can be updated independently or
* altogether: Link speed, FEC, and Flow Control. These macros allow
@ -463,6 +470,19 @@ struct ice_pf_sw_stats {
u32 rx_mdd_count;
};
/**
* @struct ice_tc_info
* @brief Traffic class information for a VSI
*
* Stores traffic class information used in configuring
* a VSI.
*/
struct ice_tc_info {
u16 qoffset; /* Offset in VSI queue space */
u16 qcount_tx; /* TX queues for this Traffic Class */
u16 qcount_rx; /* RX queues */
};
/**
* @struct ice_vsi
* @brief VSI structure
@ -504,6 +524,12 @@ struct ice_vsi {
struct ice_aqc_vsi_props info;
/* DCB configuration */
u8 num_tcs; /* Total number of enabled TCs */
u16 tc_map; /* bitmap of enabled Traffic Classes */
/* Information for each traffic class */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
/* context for per-VSI sysctls */
struct sysctl_ctx_list ctx;
struct sysctl_oid *vsi_node;
@ -541,9 +567,11 @@ enum ice_state {
ICE_STATE_RECOVERY_MODE,
ICE_STATE_ROLLBACK_MODE,
ICE_STATE_LINK_STATUS_REPORTED,
ICE_STATE_ATTACHING,
ICE_STATE_DETACHING,
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
ICE_STATE_MULTIPLE_TCS,
/* This entry must be last */
ICE_STATE_LAST,
};
@ -648,6 +676,7 @@ struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err);
struct ice_str_buf _ice_status_str(enum ice_status status);
struct ice_str_buf _ice_err_str(int err);
struct ice_str_buf _ice_fltr_flag_str(u16 flag);
struct ice_str_buf _ice_log_sev_str(u8 log_level);
struct ice_str_buf _ice_mdd_tx_tclan_str(u8 event);
struct ice_str_buf _ice_mdd_tx_pqm_str(u8 event);
struct ice_str_buf _ice_mdd_rx_str(u8 event);
@ -662,6 +691,7 @@ struct ice_str_buf _ice_fw_lldp_status(u32 lldp_status);
#define ice_mdd_tx_pqm_str(event) _ice_mdd_tx_pqm_str(event).str
#define ice_mdd_rx_str(event) _ice_mdd_rx_str(event).str
#define ice_log_sev_str(log_level) _ice_log_sev_str(log_level).str
#define ice_fw_lldp_status(lldp_status) _ice_fw_lldp_status(lldp_status).str
/**
@ -738,6 +768,12 @@ void ice_request_stack_reinit(struct ice_softc *sc);
/* Details of how to check if the network stack is detaching us */
bool ice_driver_is_detaching(struct ice_softc *sc);
const char * ice_fw_module_str(enum ice_aqc_fw_logging_mod module);
void ice_add_fw_logging_tunables(struct ice_softc *sc,
struct sysctl_oid *parent);
void ice_handle_fw_log_event(struct ice_softc *sc, struct ice_aq_desc *desc,
void *buf);
int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending);
int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num);
void ice_free_bar(device_t dev, struct ice_bar_info *bar);
@ -826,5 +862,6 @@ int ice_alloc_intr_tracking(struct ice_softc *sc);
void ice_free_intr_tracking(struct ice_softc *sc);
void ice_set_default_local_lldp_mib(struct ice_softc *sc);
void ice_init_health_events(struct ice_softc *sc);
void ice_cfg_pba_num(struct ice_softc *sc);
#endif /* _ICE_LIB_H_ */

View File

@ -32,6 +32,8 @@
#include "ice_common.h"
#define GL_MNG_DEF_DEVID 0x000B611C
/**
* ice_aq_read_nvm
* @hw: pointer to the HW struct
@ -583,6 +585,42 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
return status;
}
/**
* ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
* @hw: pointer to the HW struct
* @bank: whether to read from the active or inactive flash bank
* @hdr_len: storage for header length in words
*
* Read the CSS header length from the NVM CSS header and add the Authentication
* header size, and then convert to words.
*/
static enum ice_status
ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
u32 *hdr_len)
{
u16 hdr_len_l, hdr_len_h;
enum ice_status status;
u32 hdr_len_dword;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
&hdr_len_l);
if (status)
return status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
&hdr_len_h);
if (status)
return status;
/* CSS header length is in DWORD, so convert to words and add
* authentication header size
*/
hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
*hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
return ICE_SUCCESS;
}
/**
* ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
* @hw: pointer to the HW structure
@ -596,7 +634,16 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
static enum ice_status
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
enum ice_status status;
u32 hdr_len;
status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
return status;
hdr_len = ROUND_UP(hdr_len, 32);
return ice_read_nvm_module(hw, bank, hdr_len + offset, data);
}
/**
@ -889,22 +936,26 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
*/
static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{
u32 orom_size_word = hw->flash.banks.orom_size / 2;
enum ice_status status;
u16 srev_l, srev_h;
u32 css_start;
u32 hdr_len;
if (hw->flash.banks.orom_size < ICE_NVM_OROM_TRAILER_LENGTH) {
status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
return status;
if (orom_size_word < hdr_len) {
ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n",
hw->flash.banks.orom_size);
return ICE_ERR_CFG;
}
/* calculate how far into the Option ROM the CSS header starts. Note
* that ice_read_orom_module takes a word offset so we need to
* divide by 2 here.
* that ice_read_orom_module takes a word offset
*/
css_start = (hw->flash.banks.orom_size - ICE_NVM_OROM_TRAILER_LENGTH) / 2;
css_start = orom_size_word - hdr_len;
status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l);
if (status)
return status;
@ -956,7 +1007,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
/* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(tmp); i++)
/* cppcheck-suppress objectIndex */
sum += ((u8 *)&tmp)[i];
if (sum) {
@ -1908,6 +1958,7 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
case GLGEN_CSR_DEBUG_C:
case GLGEN_RSTAT:
case GLPCI_LBARCTRL:
case GL_MNG_DEF_DEVID:
case GLNVM_GENS:
case GLNVM_FLA:
case PF_FUNC_RID:
@ -1916,11 +1967,11 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
break;
}
for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++)
for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
if (offset == (u32)GL_HIDA(i))
return ICE_SUCCESS;
for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++)
for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
if (offset == (u32)GL_HIBA(i))
return ICE_SUCCESS;

View File

@ -94,19 +94,6 @@ union ice_nvm_access_data {
struct ice_nvm_features drv_features; /* NVM features */
};
/* NVM Access registers */
#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
#define GL_HICR 0x00082040
#define GL_HICR_EN 0x00082044
#define GLGEN_CSR_DEBUG_C 0x00075750
#define GLPCI_LBARCTRL 0x0009DE74
#define GLNVM_GENS 0x000B6100
#define GLNVM_FLA 0x000B6108
#define ICE_NVM_ACCESS_GL_HIDA_MAX 15
#define ICE_NVM_ACCESS_GL_HIBA_MAX 1023
u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd);

View File

@ -145,6 +145,42 @@ ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
hexdump(buf, len, prettyname, HD_OMIT_CHARS | rowsize);
}
/**
* ice_info_fwlog - Format and print an array of values to the console
* @hw: private hardware structure
* @rowsize: preferred number of rows to use
* @groupsize: preferred size in bytes to print each chunk
* @buf: the array buffer to print
* @len: size of the array buffer
*
* Format the given array as a series of uint8_t values with hexadecimal
* notation and log the contents to the console log. This variation is
* specific to firmware logging.
*
* TODO: Currently only supports a group size of 1, due to the way hexdump is
* implemented.
*/
void
ice_info_fwlog(struct ice_hw *hw, uint32_t rowsize, uint32_t __unused groupsize,
uint8_t *buf, size_t len)
{
device_t dev = ice_hw_to_dev(hw);
char prettyname[20];
if (!ice_fwlog_supported(hw))
return;
/* Format the device header to a string */
snprintf(prettyname, sizeof(prettyname), "%s: FWLOG: ",
device_get_nameunit(dev));
/* Make sure the row-size isn't too large */
if (rowsize > 0xFF)
rowsize = 0xFF;
hexdump(buf, len, prettyname, HD_OMIT_CHARS | rowsize);
}
/**
* rd32 - Read a 32bit hardware register value
* @hw: the private hardware structure

View File

@ -69,6 +69,8 @@ device_t ice_hw_to_dev(struct ice_hw *hw);
void ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...) __printflike(3, 4);
void ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
uint32_t groupsize, uint8_t *buf, size_t len);
void ice_info_fwlog(struct ice_hw *hw, uint32_t rowsize, uint32_t groupsize,
uint8_t *buf, size_t len);
#define ice_info(_hw, _fmt, args...) \
device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)

View File

@ -96,10 +96,20 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_IPV6_GTP_IPV4_UDP,
ICE_SW_TUN_IPV6_GTP_IPV6_TCP,
ICE_SW_TUN_IPV6_GTP_IPV6_UDP,
/* following adds support for GTP, just using inner protocols,
* outer L3 and L4 protocols can be anything
*/
ICE_SW_TUN_GTP_IPV4_TCP,
ICE_SW_TUN_GTP_IPV4_UDP,
ICE_SW_TUN_GTP_IPV6_TCP,
ICE_SW_TUN_GTP_IPV6_UDP,
ICE_SW_TUN_IPV4_GTPU_IPV4,
ICE_SW_TUN_IPV4_GTPU_IPV6,
ICE_SW_TUN_IPV6_GTPU_IPV4,
ICE_SW_TUN_IPV6_GTPU_IPV6,
ICE_SW_TUN_GTP_IPV4,
ICE_SW_TUN_GTP_IPV6,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@ -127,8 +137,10 @@ enum ice_prot_id {
ICE_PROT_MPLS_IL = 29,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV4_IL_IL = 34,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_IPV6_IL_IL = 42,
ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
@ -205,8 +217,8 @@ struct ice_ether_vlan_hdr {
};
struct ice_vlan_hdr {
__be16 vlan;
__be16 type;
__be16 vlan;
};
struct ice_ipv4_hdr {

View File

@ -1062,7 +1062,6 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
/* cppcheck-suppress unusedVariable */
u32 temp;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
@ -2013,7 +2012,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
}
/**
* ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
* ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
@ -2870,8 +2869,8 @@ static enum ice_status
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, ice_bitmap_t *tc_bitmap)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
u8 tc;
@ -2881,6 +2880,20 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info)
return ICE_ERR_PARAM;
/* If the vsi is already part of another aggregator then update
* its vsi info list
*/
old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (old_agg_info && old_agg_info != agg_info) {
struct ice_sched_agg_vsi_info *vtmp;
LIST_FOR_EACH_ENTRY_SAFE(old_agg_vsi_info, vtmp,
&old_agg_info->agg_vsi_list,
ice_sched_agg_vsi_info, list_entry)
if (old_agg_vsi_info->vsi_handle == vsi_handle)
break;
}
/* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info) {
@ -2905,6 +2918,12 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
break;
ice_set_bit(tc, agg_vsi_info->tc_bitmap);
if (old_agg_vsi_info)
ice_clear_bit(tc, old_agg_vsi_info->tc_bitmap);
}
if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) {
LIST_DEL(&old_agg_vsi_info->list_entry);
ice_free(pi->hw, old_agg_vsi_info);
}
return status;
}
@ -2954,6 +2973,9 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
u16 num_elems = 1;
buf = *info;
/* For TC nodes, CIR config is not supported */
if (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_TC)
buf.data.valid_sections &= ~ICE_AQC_ELEM_VALID_CIR;
/* Parent TEID is reserved field in this aq call */
buf.parent_teid = 0;
/* Element type is reserved field in this aq call */
@ -3389,7 +3411,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
}
/**
* ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC
* ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: traffic class
@ -3544,7 +3566,7 @@ ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
}
/**
* ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc
* ice_cfg_agg_bw_shared_lmt_per_tc - config aggregator BW shared limit per tc
* @pi: port information structure
* @agg_id: aggregator ID
* @tc: traffic class
@ -3564,7 +3586,7 @@ ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
}
/**
* ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc
* ice_cfg_agg_bw_no_shared_lmt_per_tc - cfg aggregator BW shared limit per tc
* @pi: port information structure
* @agg_id: aggregator ID
* @tc: traffic class
@ -3582,7 +3604,7 @@ ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
}
/**
* ice_config_vsi_queue_priority - config VSI queue priority of node
* ice_cfg_vsi_q_priority - config VSI queue priority of node
* @pi: port information structure
* @num_qs: number of VSI queues
* @q_ids: queue IDs array
@ -3678,7 +3700,6 @@ ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
ice_sched_agg_vsi_info, list_entry)
if (agg_vsi_info->vsi_handle == vsi_handle) {
/* cppcheck-suppress unreadVariable */
vsi_handle_valid = true;
break;
}
@ -3837,8 +3858,8 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
u16 wakeup = 0;
/* Get the wakeup integer value */
bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec);
bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec);
if (wakeup_int > 63) {
wakeup = (u16)((1 << 15) | wakeup_int);
} else {
@ -3846,18 +3867,18 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
* Convert Integer value to a constant multiplier
*/
wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER *
hw->psm_clk_freq, bytes_per_sec);
wakeup_a = DIV_S64(ICE_RL_PROF_MULTIPLIER *
hw->psm_clk_freq, bytes_per_sec);
/* Get Fraction value */
wakeup_f = wakeup_a - wakeup_b;
/* Round up the Fractional value via Ceil(Fractional value) */
if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2))
if (wakeup_f > DIV_S64(ICE_RL_PROF_MULTIPLIER, 2))
wakeup_f += 1;
wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION,
ICE_RL_PROF_MULTIPLIER);
wakeup_f_int = (s32)DIV_S64(wakeup_f * ICE_RL_PROF_FRACTION,
ICE_RL_PROF_MULTIPLIER);
wakeup |= (u16)(wakeup_int << 9);
wakeup |= (u16)(0x1ff & wakeup_f_int);
}
@ -3889,20 +3910,20 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
return status;
/* Bytes per second from Kbps */
bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
/* encode is 6 bits but really useful are 5 bits */
for (i = 0; i < 64; i++) {
u64 pow_result = BIT_ULL(i);
ts_rate = DIV_64BIT((s64)hw->psm_clk_freq,
pow_result * ICE_RL_PROF_TS_MULTIPLIER);
ts_rate = DIV_S64(hw->psm_clk_freq,
pow_result * ICE_RL_PROF_TS_MULTIPLIER);
if (ts_rate <= 0)
continue;
/* Multiplier value */
mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
ts_rate);
mv_tmp = DIV_S64(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
ts_rate);
/* Round to the nearest ICE_RL_PROF_MULTIPLIER */
mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);

View File

@ -1,596 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "ice_common.h"
#include "ice_sriov.h"
/**
* ice_aq_send_msg_to_vf
* @hw: pointer to the hardware structure
* @vfid: VF ID to send msg
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cd: pointer to command details
*
* Send message to VF driver (0x0802) using mailbox
* queue and asynchronously sending message via
* ice_sq_send_cmd() function
*/
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
struct ice_aqc_pf_vf_msg *cmd;
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
cmd = &desc.params.virt;
cmd->id = CPU_TO_LE32(vfid);
desc.cookie_high = CPU_TO_LE32(v_opcode);
desc.cookie_low = CPU_TO_LE32(v_retval);
if (msglen)
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
/**
* ice_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
* @msg: pointer to the msg buffer
* @msglen: msg length
* @cd: pointer to command details
*
* Send message to PF driver using mailbox queue. By default, this
* message is sent asynchronously, i.e. ice_sq_send_cmd()
* does not wait for completion before returning.
*/
enum ice_status
ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
enum ice_status v_retval, u8 *msg, u16 msglen,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_pf);
desc.cookie_high = CPU_TO_LE32(v_opcode);
desc.cookie_low = CPU_TO_LE32(v_retval);
if (msglen)
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
/**
* ice_conv_link_speed_to_virtchnl
* @adv_link_support: determines the format of the returned link speed
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
* If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
* expect the speed in Mbps.
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
u32 speed;
if (adv_link_support)
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
speed = ICE_LINK_SPEED_10MBPS;
break;
case ICE_AQ_LINK_SPEED_100MB:
speed = ICE_LINK_SPEED_100MBPS;
break;
case ICE_AQ_LINK_SPEED_1000MB:
speed = ICE_LINK_SPEED_1000MBPS;
break;
case ICE_AQ_LINK_SPEED_2500MB:
speed = ICE_LINK_SPEED_2500MBPS;
break;
case ICE_AQ_LINK_SPEED_5GB:
speed = ICE_LINK_SPEED_5000MBPS;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = ICE_LINK_SPEED_10000MBPS;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = ICE_LINK_SPEED_20000MBPS;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = ICE_LINK_SPEED_25000MBPS;
break;
case ICE_AQ_LINK_SPEED_40GB:
speed = ICE_LINK_SPEED_40000MBPS;
break;
case ICE_AQ_LINK_SPEED_50GB:
speed = ICE_LINK_SPEED_50000MBPS;
break;
case ICE_AQ_LINK_SPEED_100GB:
speed = ICE_LINK_SPEED_100000MBPS;
break;
default:
speed = ICE_LINK_SPEED_UNKNOWN;
break;
}
else
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
switch (link_speed) {
case ICE_AQ_LINK_SPEED_10MB:
case ICE_AQ_LINK_SPEED_100MB:
speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
break;
case ICE_AQ_LINK_SPEED_1000MB:
case ICE_AQ_LINK_SPEED_2500MB:
case ICE_AQ_LINK_SPEED_5GB:
speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
break;
case ICE_AQ_LINK_SPEED_10GB:
speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
break;
case ICE_AQ_LINK_SPEED_20GB:
speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
break;
case ICE_AQ_LINK_SPEED_25GB:
speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
break;
case ICE_AQ_LINK_SPEED_40GB:
case ICE_AQ_LINK_SPEED_50GB:
case ICE_AQ_LINK_SPEED_100GB:
speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
break;
default:
speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
break;
}
return speed;
}
/* The mailbox overflow detection algorithm helps to check if there
* is a possibility of a malicious VF transmitting too many MBX messages to the
* PF.
* 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
* driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
* The struct ice_mbx_snapshot helps to track and traverse a static window of
* messages within the mailbox queue while looking for a malicious VF.
*
* 2. When the caller starts processing its mailbox queue in response to an
* interrupt, the structure ice_mbx_snapshot is expected to be cleared before
* the algorithm can be run for the first time for that interrupt. This can be
* done via ice_mbx_reset_snapshot().
*
* 3. For every message read by the caller from the MBX Queue, the caller must
* call the detection algorithm's entry function ice_mbx_vf_state_handler().
* Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
* filled as it is required to be passed to the algorithm.
*
* 4. Every time a message is read from the MBX queue, a VFId is received which
* is passed to the state handler. The boolean output is_malvf of the state
* handler ice_mbx_vf_state_handler() serves as an indicator to the caller
* whether this VF is malicious or not.
*
* 5. When a VF is identified to be malicious, the caller can send a message
* to the system administrator. The caller can invoke ice_mbx_report_malvf()
* to help determine if a malicious VF is to be reported or not. This function
* requires the caller to maintain a global bitmap to track all malicious VFs
* and pass that to ice_mbx_report_malvf() along with the VFID which was identified
* to be malicious by ice_mbx_vf_state_handler().
*
* 6. The global bitmap maintained by PF can be cleared completely if PF is in
* reset or the bit corresponding to a VF can be cleared if that VF is in reset.
* When a VF is shut down and brought back up, we assume that the new VF
* brought up is not malicious and hence report it if found malicious.
*
* 7. The function ice_mbx_reset_snapshot() is called to reset the information
* in ice_mbx_snapshot for every new mailbox interrupt handled.
*
* 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
* when driver is unloaded.
*/
#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
* the max messages check must be ignored in the algorithm
*/
#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
/**
* ice_mbx_traverse - Pass through mailbox snapshot
* @hw: pointer to the HW struct
* @new_state: new algorithm state
*
* Traversing the mailbox static snapshot without checking
* for malicious VFs.
*/
static void
ice_mbx_traverse(struct ice_hw *hw,
enum ice_mbx_snapshot_state *new_state)
{
struct ice_mbx_snap_buffer_data *snap_buf;
u32 num_iterations;
snap_buf = &hw->mbx_snapshot.mbx_buf;
/* As mailbox buffer is circular, applying a mask
* on the incremented iteration count.
*/
num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
/* Checking either of the below conditions to exit snapshot traversal:
* Condition-1: If the number of iterations in the mailbox is equal to
* the mailbox head which would indicate that we have reached the end
* of the static snapshot.
* Condition-2: If the maximum messages serviced in the mailbox for a
* given interrupt is the highest possible value then there is no need
* to check if the number of messages processed is equal to it. If not
* check if the number of messages processed is greater than or equal
* to the maximum number of mailbox entries serviced in current work item.
*/
if (num_iterations == snap_buf->head ||
(snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
*new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
}
/**
* ice_mbx_detect_malvf - Detect malicious VF in snapshot
* @hw: pointer to the HW struct
* @vf_id: relative virtual function ID
* @new_state: new algorithm state
* @is_malvf: boolean output to indicate if VF is malicious
*
* This function tracks the number of asynchronous messages
* sent per VF and marks the VF as malicious if it exceeds
* the permissible number of messages to send.
*/
static enum ice_status
ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
enum ice_mbx_snapshot_state *new_state,
bool *is_malvf)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
if (vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE;
/* increment the message count in the VF array */
snap->mbx_vf.vf_cntr[vf_id]++;
if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
*is_malvf = true;
/* continue to iterate through the mailbox snapshot */
ice_mbx_traverse(hw, new_state);
return ICE_SUCCESS;
}
/**
* ice_mbx_reset_snapshot - Reset mailbox snapshot structure
* @snap: pointer to mailbox snapshot structure in the ice_hw struct
*
* Reset the mailbox snapshot structure and clear VF counter array.
*/
static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
{
u32 vfcntr_len;
if (!snap || !snap->mbx_vf.vf_cntr)
return;
/* Clear VF counters. */
vfcntr_len = snap->mbx_vf.vfcntr_len;
if (vfcntr_len)
ice_memset(snap->mbx_vf.vf_cntr, 0,
(vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)),
ICE_NONDMA_MEM);
/* Reset mailbox snapshot for a new capture. */
ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf),
ICE_NONDMA_MEM);
snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
}
/**
* ice_mbx_vf_state_handler - Handle states of the overflow algorithm
* @hw: pointer to the HW struct
* @mbx_data: pointer to structure containing mailbox data
* @vf_id: relative virtual function (VF) ID
* @is_malvf: boolean output to indicate if VF is malicious
*
* The function serves as an entry point for the malicious VF
* detection algorithm by handling the different states and state
* transitions of the algorithm:
* New snapshot: This state is entered when creating a new static
* snapshot. The data from any previous mailbox snapshot is
* cleared and a new capture of the mailbox head and tail is
* logged. This will be the new static snapshot to detect
* asynchronous messages sent by VFs. On capturing the snapshot
* and depending on whether the number of pending messages in that
* snapshot exceed the watermark value, the state machine enters
* traverse or detect states.
* Traverse: If pending message count is below watermark then iterate
* through the snapshot without any action on VF.
* Detect: If pending message count exceeds watermark traverse
* the static snapshot and look for a malicious VF.
*/
enum ice_status
ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_data *mbx_data, u16 vf_id,
bool *is_malvf)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
struct ice_mbx_snap_buffer_data *snap_buf;
struct ice_ctl_q_info *cq = &hw->mailboxq;
enum ice_mbx_snapshot_state new_state;
enum ice_status status = ICE_SUCCESS;
if (!is_malvf || !mbx_data)
return ICE_ERR_BAD_PTR;
/* When entering the mailbox state machine assume that the VF
* is not malicious until detected.
*/
*is_malvf = false;
/* Checking if max messages allowed to be processed while servicing current
* interrupt is not less than the defined AVF message threshold.
*/
if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
return ICE_ERR_INVAL_SIZE;
/* The watermark value should not be lesser than the threshold limit
* set for the number of asynchronous messages a VF can send to mailbox
* nor should it be greater than the maximum number of messages in the
* mailbox serviced in current interrupt.
*/
if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
return ICE_ERR_PARAM;
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
snap_buf = &snap->mbx_buf;
switch (snap_buf->state) {
case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
/* Clear any previously held data in mailbox snapshot structure. */
ice_mbx_reset_snapshot(snap);
/* Collect the pending ARQ count, number of messages processed and
* the maximum number of messages allowed to be processed from the
* Mailbox for current interrupt.
*/
snap_buf->num_pending_arq = mbx_data->num_pending_arq;
snap_buf->num_msg_proc = mbx_data->num_msg_proc;
snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
/* Capture a new static snapshot of the mailbox by logging the
* head and tail of snapshot and set num_iterations to the tail
* value to mark the start of the iteration through the snapshot.
*/
snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
mbx_data->num_pending_arq);
snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
snap_buf->num_iterations = snap_buf->tail;
/* Pending ARQ messages returned by ice_clean_rq_elem
* is the difference between the head and tail of the
* mailbox queue. Comparing this value against the watermark
* helps to check if we potentially have malicious VFs.
*/
if (snap_buf->num_pending_arq >=
mbx_data->async_watermark_val) {
new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
} else {
new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
ice_mbx_traverse(hw, &new_state);
}
break;
case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
ice_mbx_traverse(hw, &new_state);
break;
case ICE_MAL_VF_DETECT_STATE_DETECT:
new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
break;
default:
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
status = ICE_ERR_CFG;
}
snap_buf->state = new_state;
return status;
}
/**
* ice_mbx_report_malvf - Track and note malicious VF
* @hw: pointer to the HW struct
* @all_malvfs: all malicious VFs tracked by PF
* @bitmap_len: length of bitmap in bits
* @vf_id: relative virtual function ID of the malicious VF
* @report_malvf: boolean to indicate if malicious VF must be reported
*
* This function will update a bitmap that keeps track of the malicious
* VFs attached to the PF. A malicious VF must be reported only once if
* discovered between VF resets or loading so the function checks
* the input vf_id against the bitmap to verify if the VF has been
* detected in any previous mailbox iterations.
*/
enum ice_status
ice_mbx_report_malvf(struct ice_hw *hw, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf)
{
if (!all_malvfs || !report_malvf)
return ICE_ERR_PARAM;
*report_malvf = false;
if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE;
if (vf_id >= bitmap_len)
return ICE_ERR_OUT_OF_RANGE;
/* If the vf_id is found in the bitmap set bit and boolean to true */
if (!ice_is_bit_set(all_malvfs, vf_id)) {
ice_set_bit(vf_id, all_malvfs);
ice_debug(hw, ICE_DBG_TRACE, "Malicious VF=%d found\n", vf_id);
*report_malvf = true;
}
return ICE_SUCCESS;
}
/**
* ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
* @snap: pointer to the mailbox snapshot structure
* @all_malvfs: all malicious VFs tracked by PF
* @bitmap_len: length of bitmap in bits
* @vf_id: relative virtual function ID of the malicious VF
*
* In case of a VF reset, this function can be called to clear
* the bit corresponding to the VF ID in the bitmap tracking all
* malicious VFs attached to the PF. The function also clears the
* VF counter array at the index of the VF ID. This is to ensure
* that the new VF loaded is not considered malicious before going
* through the overflow detection algorithm.
*/
enum ice_status
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id)
{
if (!snap || !all_malvfs)
return ICE_ERR_PARAM;
if (bitmap_len < snap->mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE;
/* Ensure VF ID value is not larger than bitmap or VF counter length */
if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE;
/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
ice_clear_bit(vf_id, all_malvfs);
/* Clear the VF counter in the mailbox snapshot structure for that VF ID.
* This is to ensure that if a VF is unloaded and a new one brought back
* up with the same VF ID for a snapshot currently in traversal or detect
* state the counter for that VF ID does not increment on top of existing
* values in the mailbox overflow detection algorithm.
*/
snap->mbx_vf.vf_cntr[vf_id] = 0;
return ICE_SUCCESS;
}
/**
* ice_mbx_init_snapshot - Initialize mailbox snapshot structure
* @hw: pointer to the hardware structure
* @vf_count: number of VFs allocated on a PF
*
* Clear the mailbox snapshot structure and allocate memory
* for the VF counter array based on the number of VFs allocated
* on that PF.
*
* Assumption: This function will assume ice_get_caps() has already been
* called to ensure that the vf_count can be compared against the number
* of VFs supported as defined in the functional capabilities of the device.
*/
enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
/* Ensure that the number of VFs allocated is non-zero and
* is not greater than the number of supported VFs defined in
* the functional capabilities of the PF.
*/
if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
return ICE_ERR_INVAL_SIZE;
snap->mbx_vf.vf_cntr =
(u32 *)ice_calloc(hw, vf_count,
sizeof(*snap->mbx_vf.vf_cntr));
if (!snap->mbx_vf.vf_cntr)
return ICE_ERR_NO_MEMORY;
/* Setting the VF counter length to the number of allocated
* VFs for given PF's functional capabilities.
*/
snap->mbx_vf.vfcntr_len = vf_count;
/* Clear mbx_buf in the mailbox snaphot structure and setting the
* mailbox snapshot state to a new capture.
*/
ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
return ICE_SUCCESS;
}
/**
* ice_mbx_deinit_snapshot - Free mailbox snapshot structure
* @hw: pointer to the hardware structure
*
* Clear the mailbox snapshot structure and free the VF counter array.
*/
void ice_mbx_deinit_snapshot(struct ice_hw *hw)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
/* Free VF counter array and reset vf counter length */
ice_free(hw, snap->mbx_vf.vf_cntr);
snap->mbx_vf.vfcntr_len = 0;
/* Clear mbx_buf in the mailbox snaphot structure */
ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
}

View File

@ -1,65 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
#include "ice_type.h"
#include "ice_controlq.h"
/* Defining the mailbox message threshold as 63 asynchronous
* pending messages. Normal VF functionality does not require
* sending more than 63 asynchronous pending message.
*/
#define ICE_ASYNC_VF_MSG_THRESHOLD 63
enum ice_status
ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
enum ice_status v_retval, u8 *msg, u16 msglen,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
enum ice_status
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
u16 vf_id, bool *is_mal_vf);
enum ice_status
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id);
enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
void ice_mbx_deinit_snapshot(struct ice_hw *hw);
enum ice_status
ice_mbx_report_malvf(struct ice_hw *hw, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf);
#endif /* _ICE_SRIOV_H_ */

View File

@ -699,6 +699,47 @@ _ice_fltr_flag_str(u16 flag)
return buf;
}
/**
* ice_log_sev_str - Convert log level to a string
* @log_level: the log level to convert
*
* Convert the u8 log level of a FW logging module into a readable
* string for outputting in a sysctl.
*/
struct ice_str_buf
_ice_log_sev_str(u8 log_level)
{
struct ice_str_buf buf = { .str = "" };
const char *str = NULL;
switch (log_level) {
case ICE_FWLOG_LEVEL_NONE:
str = "none";
break;
case ICE_FWLOG_LEVEL_ERROR:
str = "error";
break;
case ICE_FWLOG_LEVEL_WARNING:
str = "warning";
break;
case ICE_FWLOG_LEVEL_NORMAL:
str = "normal";
break;
case ICE_FWLOG_LEVEL_VERBOSE:
str = "verbose";
break;
default:
break;
}
if (str)
snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
else
snprintf(buf.str, ICE_STR_BUF_LEN, "%u", log_level);
return buf;
}
/**
* ice_fwd_act_str - convert filter action enum to a string
* @action: the filter action to convert
@ -986,12 +1027,16 @@ ice_state_to_str(enum ice_state state)
return "ROLLBACK_MODE";
case ICE_STATE_LINK_STATUS_REPORTED:
return "LINK_STATUS_REPORTED";
case ICE_STATE_ATTACHING:
return "ATTACHING";
case ICE_STATE_DETACHING:
return "DETACHING";
case ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING:
return "LINK_DEFAULT_OVERRIDE_PENDING";
case ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER:
return "LLDP_RX_FLTR_FROM_DRIVER";
case ICE_STATE_MULTIPLE_TCS:
return "MULTIPLE_TCS";
case ICE_STATE_LAST:
return NULL;
}
@ -999,6 +1044,91 @@ ice_state_to_str(enum ice_state state)
return NULL;
}
/**
* ice_fw_module_str - Convert a FW logging module to a string name
* @module: the module to convert
*
* Given a FW logging module id, convert it to a shorthand human readable
* name, for generating sysctl tunables.
*/
const char *
ice_fw_module_str(enum ice_aqc_fw_logging_mod module)
{
switch (module) {
case ICE_AQC_FW_LOG_ID_GENERAL:
return "general";
case ICE_AQC_FW_LOG_ID_CTRL:
return "ctrl";
case ICE_AQC_FW_LOG_ID_LINK:
return "link";
case ICE_AQC_FW_LOG_ID_LINK_TOPO:
return "link_topo";
case ICE_AQC_FW_LOG_ID_DNL:
return "dnl";
case ICE_AQC_FW_LOG_ID_I2C:
return "i2c";
case ICE_AQC_FW_LOG_ID_SDP:
return "sdp";
case ICE_AQC_FW_LOG_ID_MDIO:
return "mdio";
case ICE_AQC_FW_LOG_ID_ADMINQ:
return "adminq";
case ICE_AQC_FW_LOG_ID_HDMA:
return "hdma";
case ICE_AQC_FW_LOG_ID_LLDP:
return "lldp";
case ICE_AQC_FW_LOG_ID_DCBX:
return "dcbx";
case ICE_AQC_FW_LOG_ID_DCB:
return "dcb";
case ICE_AQC_FW_LOG_ID_XLR:
return "xlr";
case ICE_AQC_FW_LOG_ID_NVM:
return "nvm";
case ICE_AQC_FW_LOG_ID_AUTH:
return "auth";
case ICE_AQC_FW_LOG_ID_VPD:
return "vpd";
case ICE_AQC_FW_LOG_ID_IOSF:
return "iosf";
case ICE_AQC_FW_LOG_ID_PARSER:
return "parser";
case ICE_AQC_FW_LOG_ID_SW:
return "sw";
case ICE_AQC_FW_LOG_ID_SCHEDULER:
return "scheduler";
case ICE_AQC_FW_LOG_ID_TXQ:
return "txq";
case ICE_AQC_FW_LOG_ID_RSVD:
return "acl";
case ICE_AQC_FW_LOG_ID_POST:
return "post";
case ICE_AQC_FW_LOG_ID_WATCHDOG:
return "watchdog";
case ICE_AQC_FW_LOG_ID_TASK_DISPATCH:
return "task_dispatch";
case ICE_AQC_FW_LOG_ID_MNG:
return "mng";
case ICE_AQC_FW_LOG_ID_SYNCE:
return "synce";
case ICE_AQC_FW_LOG_ID_HEALTH:
return "health";
case ICE_AQC_FW_LOG_ID_TSDRV:
return "tsdrv";
case ICE_AQC_FW_LOG_ID_PFREG:
return "pfreg";
case ICE_AQC_FW_LOG_ID_MDLVER:
return "mdlver";
case ICE_AQC_FW_LOG_ID_MAX:
return "unknown";
}
/* The compiler generates errors on unhandled enum values if we omit
* the default case.
*/
return "unknown";
}
/**
* ice_fw_lldp_status - Convert FW LLDP status to a string
* @lldp_status: firmware LLDP status value to convert

View File

@ -38,6 +38,7 @@
#define ICE_ETH_ETHTYPE_OFFSET 12
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
#define ICE_IPV6_ETHER_ID 0x86DD
#define ICE_ETH_P_8021Q 0x8100
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
@ -175,7 +176,7 @@ enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16
}
/**
* ice_free_global_lut - free a RSS global LUT
* ice_free_rss_global_lut - free a RSS global LUT
* @hw: pointer to the HW struct
* @global_lut_id: ID of the RSS global LUT to free
*/
@ -2271,7 +2272,7 @@ ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
* @sw: pointer to switch info struct for which function add rule
* @lport: logic port number on which function add rule
*
* IMPORTANT: When the ucast_shared flag is set to false and m_list has
* IMPORTANT: When the umac_shared flag is set to false and m_list has
* multiple unicast addresses, the function assumes that all the
* addresses are unique in a given add_mac call. It doesn't
* check for duplicates in this case, removing duplicates from a given
@ -2314,18 +2315,18 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
IS_ZERO_ETHER_ADDR(add))
return ICE_ERR_PARAM;
if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
/* Don't overwrite the unicast address */
ice_acquire_lock(rule_lock);
if (ice_find_rule_entry(rule_head,
&m_list_itr->fltr_info)) {
ice_release_lock(rule_lock);
return ICE_ERR_ALREADY_EXISTS;
continue;
}
ice_release_lock(rule_lock);
num_unicast++;
} else if (IS_MULTICAST_ETHER_ADDR(add) ||
(IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
(IS_UNICAST_ETHER_ADDR(add) && hw->umac_shared)) {
m_list_itr->status =
ice_add_rule_internal(hw, recp_list, lport,
m_list_itr);
@ -2930,7 +2931,7 @@ ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
if (IS_UNICAST_ETHER_ADDR(add) && !hw->umac_shared) {
/* Don't remove the unicast address that belongs to
* another VSI on the switch, since it is not being
* shared...
@ -3147,22 +3148,25 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
* @sw: pointer to switch info struct for which function add rule
* @lkup: switch rule filter lookup type
*/
static enum ice_status
_ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid, struct ice_switch_info *sw)
u16 *vid, struct ice_switch_info *sw,
enum ice_sw_lkup_type lkup)
{
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
if (!ice_is_vsi_valid(hw, vsi_handle))
if (!ice_is_vsi_valid(hw, vsi_handle) ||
(lkup != ICE_SW_LKUP_PROMISC && lkup != ICE_SW_LKUP_PROMISC_VLAN))
return ICE_ERR_PARAM;
*vid = 0;
*promisc_mask = 0;
rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
rule_head = &sw->recp_list[lkup].filt_rules;
rule_lock = &sw->recp_list[lkup].filt_rule_lock;
ice_acquire_lock(rule_lock);
LIST_FOR_EACH_ENTRY(itr, rule_head,
@ -3192,47 +3196,7 @@ ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
{
return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info);
}
/**
* ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to retrieve info from
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
* @sw: pointer to switch info struct for which function add rule
*/
static enum ice_status
_ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid, struct ice_switch_info *sw)
{
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
*vid = 0;
*promisc_mask = 0;
rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
ice_acquire_lock(rule_lock);
LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
list_entry) {
/* Continue if this filter doesn't apply to this VSI or the
* VSI ID is not in the VSI map for this filter
*/
if (!ice_vsi_uses_fltr(itr, vsi_handle))
continue;
*promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
}
ice_release_lock(rule_lock);
return ICE_SUCCESS;
vid, hw->switch_info, ICE_SW_LKUP_PROMISC);
}
/**
@ -3246,8 +3210,9 @@ enum ice_status
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
u16 *vid)
{
return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info);
return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
vid, hw->switch_info,
ICE_SW_LKUP_PROMISC_VLAN);
}
/**
@ -4211,7 +4176,7 @@ ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
}
/**
* ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
* ice_rm_sw_replay_rule_info - helper function to delete filter replay rules
* @hw: pointer to the HW struct
* @sw: pointer to switch info struct for which function removes filters
*

View File

@ -213,12 +213,24 @@ struct ice_rule_query_data {
u16 vsi_handle;
};
/*
* This structure allows to pass info about lb_en and lan_en
* flags to ice_add_adv_rule. Values in act would be used
* only if act_valid was set to true, otherwise dflt
* values would be used.
*/
struct ice_adv_rule_flags_info {
u32 act;
u8 act_valid; /* indicate if flags in act are valid */
};
struct ice_adv_rule_info {
enum ice_sw_tunnel_type tun_type;
struct ice_sw_act_ctrl sw_act;
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
struct ice_adv_rule_flags_info flags_info;
};
/* A collection of one or more four word recipe */

View File

@ -78,17 +78,44 @@
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
static inline bool ice_is_tc_ena(ice_bitmap_t bitmap, u8 tc)
{
return !!(bitmap & BIT(tc));
}
#define DIV_64BIT(n, d) ((n) / (d))
/**
* DIV_S64 - Divide signed 64-bit value with signed 64-bit divisor
* @dividend: value to divide
* @divisor: value to divide by
*
* Use DIV_S64 for any 64-bit divide which operates on signed 64-bit dividends.
* Do not use this for unsigned 64-bit dividends as it will not produce
* correct results if the dividend is larger than S64_MAX.
*/
static inline s64 DIV_S64(s64 dividend, s64 divisor)
{
return dividend / divisor;
}
/**
* DIV_U64 - Divide unsigned 64-bit value by unsigned 64-bit divisor
* @dividend: value to divide
* @divisor: value to divide by
*
* Use DIV_U64 for any 64-bit divide which operates on unsigned 64-bit
* dividends. Do not use this for signed 64-bit dividends as it will not
* handle negative values correctly.
*/
static inline u64 DIV_U64(u64 dividend, u64 divisor)
{
return dividend / divisor;
}
static inline u64 round_up_64bit(u64 a, u32 b)
{
return DIV_64BIT(((a) + (b) / 2), (b));
return DIV_U64(((a) + (b) / 2), (b));
}
static inline u32 ice_round_to_num(u32 N, u32 R)
@ -136,6 +163,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
ICE_DBG_AQ_DESC | \
ICE_DBG_AQ_DESC_BUF | \
ICE_DBG_AQ_CMD)
#define ICE_DBG_PARSER BIT_ULL(28)
#define ICE_DBG_USER BIT_ULL(31)
#define ICE_DBG_ALL 0xFFFFFFFFFFFFFFFFULL
@ -390,6 +418,23 @@ struct ice_hw_common_caps {
#define ICE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
#define ICE_NVM_MGMT_UPDATE_DISABLED BIT(1)
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
/* PCIe reset avoidance */
bool pcie_reset_avoidance; /* false: not supported, true: supported */
/* Post update reset restriction */
bool reset_restrict_support; /* false: not supported, true: supported */
/* External topology device images within the NVM */
#define ICE_EXT_TOPO_DEV_IMG_COUNT 4
u32 ext_topo_dev_img_ver_high[ICE_EXT_TOPO_DEV_IMG_COUNT];
u32 ext_topo_dev_img_ver_low[ICE_EXT_TOPO_DEV_IMG_COUNT];
u8 ext_topo_dev_img_part_num[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
#define ICE_EXT_TOPO_DEV_IMG_PART_NUM_M \
MAKEMASK(0xFF, ICE_EXT_TOPO_DEV_IMG_PART_NUM_S)
bool ext_topo_dev_img_load_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
};
/* Function specific capabilities */
@ -759,7 +804,8 @@ struct ice_dcb_app_priority_table {
};
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 32
#define ICE_DCBX_MAX_APPS 64
#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@ -779,7 +825,14 @@ struct ice_dcbx_cfg {
struct ice_dcb_ets_cfg etscfg;
struct ice_dcb_ets_cfg etsrec;
struct ice_dcb_pfc_cfg pfc;
#define ICE_QOS_MODE_VLAN 0x0
#define ICE_QOS_MODE_DSCP 0x1
u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
/* when DSCP mapping defined by user set its bit to 1 */
ice_declare_bitmap(dscp_mapped, ICE_DSCP_NUM_VAL);
/* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
u8 dscp_map[ICE_DSCP_NUM_VAL];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@ -831,6 +884,7 @@ struct ice_switch_info {
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
@ -964,6 +1018,9 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
struct ice_fwlog_cfg fwlog_cfg;
bool fwlog_support_ena; /* does hardware support FW logging? */
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
* initialization.
@ -983,7 +1040,8 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
u8 ucast_shared; /* true if VSIs can share unicast addr */
/* true if VSIs can share unicast MAC addr */
u8 umac_shared;
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
@ -995,6 +1053,7 @@ struct ice_hw {
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 pkg_seg_id;
u32 active_track_id;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
@ -1186,17 +1245,13 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118
/* CSS Header words */
#define ICE_NVM_CSS_HDR_LEN_L 0x02
#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
/* Length of CSS header section in words */
#define ICE_CSS_HEADER_LENGTH 330
/* Offset of Shadow RAM copy in the NVM bank area. */
#define ICE_NVM_SR_COPY_WORD_OFFSET ROUND_UP(ICE_CSS_HEADER_LENGTH, 32)
/* Size in bytes of Option ROM trailer */
#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
/* Length of Authentication header section in words */
#define ICE_NVM_AUTH_HEADER_LEN 0x08
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
@ -1290,4 +1345,8 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_HEALTH_REPORT_MAJ 1
#define ICE_FW_API_HEALTH_REPORT_MIN 7
#define ICE_FW_API_HEALTH_REPORT_PATCH 6
/* AQ API version for FW auto drop reports */
#define ICE_FW_API_AUTO_DROP_MAJ 1
#define ICE_FW_API_AUTO_DROP_MIN 4
#endif /* _ICE_TYPE_H_ */

View File

@ -33,7 +33,7 @@
#include "ice_common.h"
/**
* ice_pkg_supports_dvm - determine if DDP supports Double VLAN mode (DVM)
* ice_pkg_get_supported_vlan_mode - chk if DDP supports Double VLAN mode (DVM)
* @hw: pointer to the HW struct
* @dvm: output variable to determine if DDP supports DVM(true) or SVM(false)
*/
@ -152,11 +152,52 @@ bool ice_is_dvm_ena(struct ice_hw *hw)
* configuration lock has been released because all ports on a device need to
* cache the VLAN mode.
*/
void ice_cache_vlan_mode(struct ice_hw *hw)
static void ice_cache_vlan_mode(struct ice_hw *hw)
{
hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false;
}
/**
* ice_pkg_supports_dvm - find out if DDP supports DVM
* @hw: pointer to the HW structure
*/
static bool ice_pkg_supports_dvm(struct ice_hw *hw)
{
enum ice_status status;
bool pkg_supports_dvm;
status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n",
status);
return false;
}
return pkg_supports_dvm;
}
/**
* ice_fw_supports_dvm - find out if FW supports DVM
* @hw: pointer to the HW structure
*/
static bool ice_fw_supports_dvm(struct ice_hw *hw)
{
struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
enum ice_status status;
/* If firmware returns success, then it supports DVM, else it only
* supports SVM
*/
status = ice_aq_get_vlan_mode(hw, &get_vlan_mode);
if (status) {
ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n",
status);
return false;
}
return true;
}
/**
* ice_is_dvm_supported - check if Double VLAN Mode is supported
* @hw: pointer to the hardware structure
@ -169,27 +210,13 @@ void ice_cache_vlan_mode(struct ice_hw *hw)
*/
static bool ice_is_dvm_supported(struct ice_hw *hw)
{
struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
enum ice_status status;
bool pkg_supports_dvm;
status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
if (status) {
ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n",
status);
if (!ice_pkg_supports_dvm(hw)) {
ice_debug(hw, ICE_DBG_PKG, "DDP doesn't support DVM\n");
return false;
}
if (!pkg_supports_dvm)
return false;
/* If firmware returns success, then it supports DVM, else it only
* supports SVM
*/
status = ice_aq_get_vlan_mode(hw, &get_vlan_mode);
if (status) {
ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n",
status);
if (!ice_fw_supports_dvm(hw)) {
ice_debug(hw, ICE_DBG_PKG, "FW doesn't support DVM\n");
return false;
}
@ -273,9 +300,26 @@ static enum ice_status ice_set_svm(struct ice_hw *hw)
*/
enum ice_status ice_set_vlan_mode(struct ice_hw *hw)
{
if (!ice_is_dvm_supported(hw))
return ICE_SUCCESS;
return ice_set_svm(hw);
}
/**
* ice_post_pkg_dwnld_vlan_mode_cfg - configure VLAN mode after DDP download
* @hw: pointer to the HW structure
*
* This function is meant to configure any VLAN mode specific functionality
* after the global configuration lock has been released and the DDP has been
* downloaded.
*
* Since only one PF downloads the DDP and configures the VLAN mode there needs
* to be a way to configure the other PFs after the DDP has been downloaded and
* the global configuration lock has been released. All such code should go in
* this function.
*/
void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw)
{
ice_cache_vlan_mode(hw);
}

View File

@ -38,7 +38,7 @@
struct ice_hw;
bool ice_is_dvm_ena(struct ice_hw *hw);
void ice_cache_vlan_mode(struct ice_hw *hw);
enum ice_status ice_set_vlan_mode(struct ice_hw *hw);
void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw);
#endif /* _ICE_VLAN_MODE_H */

View File

@ -446,6 +446,8 @@ ice_if_attach_pre(if_ctx_t ctx)
device_printf(iflib_get_dev(ctx), "Loading the iflib ice driver\n");
ice_set_state(&sc->state, ICE_STATE_ATTACHING);
sc->ctx = ctx;
sc->media = iflib_get_media(ctx);
sc->sctx = iflib_get_sctx(ctx);
@ -776,6 +778,8 @@ ice_if_attach_post(if_ctx_t ctx)
ice_init_saved_phy_cfg(sc);
ice_cfg_pba_num(sc);
ice_add_device_sysctls(sc);
/* Get DCBX/LLDP state and start DCBX agent */
@ -796,6 +800,8 @@ ice_if_attach_post(if_ctx_t ctx)
callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc);
mtx_unlock(&sc->admin_mtx);
ice_clear_state(&sc->state, ICE_STATE_ATTACHING);
return 0;
} /* ice_if_attach_post */
@ -819,6 +825,8 @@ ice_attach_post_recovery_mode(struct ice_softc *sc)
mtx_lock(&sc->admin_mtx);
callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc);
mtx_unlock(&sc->admin_mtx);
ice_clear_state(&sc->state, ICE_STATE_ATTACHING);
}
/**
@ -993,7 +1001,8 @@ ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
ice_vsi_add_txqs_ctx(vsi);
for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
txq->me = i;
/* q_handle == me when only one TC */
txq->me = txq->q_handle = i;
txq->vsi = vsi;
/* store the queue size for easier access */
@ -2375,6 +2384,27 @@ ice_rebuild(struct ice_softc *sc)
goto err_shutdown_ctrlq;
}
/* Re-enable FW logging. Keep going even if this fails */
status = ice_fwlog_set(hw, &hw->fwlog_cfg);
if (!status) {
/*
* We should have the most updated cached copy of the
* configuration, regardless of whether we're rebuilding
* or not. So we'll simply check to see if logging was
* enabled pre-rebuild.
*/
if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
status = ice_fwlog_register(hw);
if (status)
device_printf(dev, "failed to re-register fw logging, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}
} else
device_printf(dev, "failed to rebuild fw logging configuration, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
err = ice_send_version(sc);
if (err)
goto err_shutdown_ctrlq;
@ -2614,6 +2644,8 @@ ice_init_device_features(struct ice_softc *sc)
ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap);
ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap);
ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap);
ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap);
/* Disable features due to hardware limitations... */
if (!sc->hw.func_caps.common_cap.rss_table_size)
@ -2621,6 +2653,14 @@ ice_init_device_features(struct ice_softc *sc)
/* Disable features due to firmware limitations... */
if (!ice_is_fw_health_report_supported(&sc->hw))
ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
if (!ice_fwlog_supported(&sc->hw))
ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap);
if (sc->hw.fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_FW_LOGGING))
ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en);
else
ice_fwlog_unregister(&sc->hw);
}
/* Disable capabilities not supported by the OS */
ice_disable_unsupported_features(sc->feat_cap);

View File

@ -51,7 +51,7 @@
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The returned value
* is of virtchnl_status_code type, defined in the shared type.h.
* is of virtchnl_status_code type, defined here.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
@ -66,7 +66,23 @@
* value in current and future projects
*/
/* Error Codes */
#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
* structure/union is not of the correct size, otherwise it creates an enum
* that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
/* Error Codes
* Note that many older versions of various iAVF drivers convert the reported
* status code directly into an iavf_status enumeration. For this reason, it
* is important that the values of these enumerations line up.
*/
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
VIRTCHNL_STATUS_ERR_PARAM = -5,
@ -113,7 +129,9 @@ enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
enum virtchnl_bw_limit_type {
VIRTCHNL_BW_SHAPER = 0,
};
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
@ -125,6 +143,7 @@ enum virtchnl_ops {
* VFs send requests to the PF using the other ops.
* Use of "advanced opcode" features must be negotiated as part of capabilities
* exchange and are not considered part of base mode feature set.
*
*/
VIRTCHNL_OP_UNKNOWN = 0,
VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
@ -160,7 +179,10 @@ enum virtchnl_ops {
/* opcode 34 is reserved */
/* opcodes 38, 39, 40, 41, 42 and 43 are reserved */
/* opcode 44 is reserved */
/* opcode 45, 46, 47, 48 and 49 are reserved */
VIRTCHNL_OP_ADD_RSS_CFG = 45,
VIRTCHNL_OP_DEL_RSS_CFG = 46,
VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51,
VIRTCHNL_OP_ADD_VLAN_V2 = 52,
@ -171,7 +193,10 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57,
VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58,
VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59,
/* opcodes 60 through 69 are reserved */
/* opcodes 60 through 65 are reserved */
VIRTCHNL_OP_GET_QOS_CAPS = 66,
VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
/* opcode 68, 69 are reserved */
VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
@ -239,6 +264,14 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
case VIRTCHNL_OP_ADD_RSS_CFG:
return "VIRTCHNL_OP_ADD_RSS_CFG";
case VIRTCHNL_OP_DEL_RSS_CFG:
return "VIRTCHNL_OP_DEL_RSS_CFG";
case VIRTCHNL_OP_ADD_FDIR_FILTER:
return "VIRTCHNL_OP_ADD_FDIR_FILTER";
case VIRTCHNL_OP_DEL_FDIR_FILTER:
return "VIRTCHNL_OP_DEL_FDIR_FILTER";
case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
case VIRTCHNL_OP_ENABLE_QUEUES_V2:
@ -272,15 +305,29 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
}
}
/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
* structure/union is not of the correct size, otherwise it creates an enum
* that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
{ virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
static inline const char *virtchnl_stat_str(enum virtchnl_status_code v_status)
{
switch (v_status) {
case VIRTCHNL_STATUS_SUCCESS:
return "VIRTCHNL_STATUS_SUCCESS";
case VIRTCHNL_STATUS_ERR_PARAM:
return "VIRTCHNL_STATUS_ERR_PARAM";
case VIRTCHNL_STATUS_ERR_NO_MEMORY:
return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED";
default:
return "Unknown status code (update virtchnl.h)";
}
}
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
@ -326,8 +373,9 @@ struct virtchnl_version_info {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
#define VF_IS_V10(_ver) (((_ver)->major == 1) && ((_ver)->minor == 0))
#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
#define VF_IS_V20(_ver) (((_ver)->major == 2) && ((_ver)->minor == 0))
/* VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
@ -374,35 +422,36 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
/* 0X00000100 is reserved */
#define VIRTCHNL_VF_LARGE_NUM_QPAIRS 0x00000200
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 0x00008000
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
/* 0x04000000 is reserved */
/* 0X08000000 and 0X10000000 are reserved */
/* 0X20000000 is reserved */
/* 0X40000000 is reserved */
/* 0X80000000 is reserved */
#define VIRTCHNL_VF_OFFLOAD_L2 BIT(0)
#define VIRTCHNL_VF_OFFLOAD_IWARP BIT(1)
#define VIRTCHNL_VF_CAP_RDMA VIRTCHNL_VF_OFFLOAD_IWARP
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ BIT(3)
#define VIRTCHNL_VF_OFFLOAD_RSS_REG BIT(4)
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR BIT(5)
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
/* used to negotiate communicating link speeds in Mbps */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
/* BIT(8) is reserved */
#define VIRTCHNL_VF_LARGE_NUM_QPAIRS BIT(9)
#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 BIT(18)
#define VIRTCHNL_VF_OFFLOAD_RSS_PF BIT(19)
#define VIRTCHNL_VF_OFFLOAD_ENCAP BIT(20)
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM BIT(21)
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM BIT(22)
#define VIRTCHNL_VF_OFFLOAD_ADQ BIT(23)
#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 BIT(24)
#define VIRTCHNL_VF_OFFLOAD_USO BIT(25)
/* BIT(26) is reserved */
#define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27)
#define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28)
#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29)
/* BIT(30) is reserved */
/* BIT(31) is reserved */
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@ -440,6 +489,54 @@ struct virtchnl_txq_info {
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* RX descriptor IDs (range from 0 to 63) */
enum virtchnl_rx_desc_ids {
VIRTCHNL_RXDID_0_16B_BASE = 0,
/* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors
* because they can be differentiated based on queue model; e.g. single
* queue model can only use 32B_BASE and split queue model can only use
* FLEX_SPLITQ. Having these as 1 allows them to be used as default
* descriptors without negotiation.
*/
VIRTCHNL_RXDID_1_32B_BASE = 1,
VIRTCHNL_RXDID_1_FLEX_SPLITQ = 1,
VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2,
VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3,
VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4,
VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5,
VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6,
VIRTCHNL_RXDID_7_HW_RSVD = 7,
/* 9 through 15 are reserved */
VIRTCHNL_RXDID_16_COMMS_GENERIC = 16,
VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17,
VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18,
VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19,
VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20,
VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21,
/* 22 through 63 are reserved */
};
/* RX descriptor ID bitmasks */
enum virtchnl_rx_desc_id_bitmasks {
VIRTCHNL_RXDID_0_16B_BASE_M = BIT(VIRTCHNL_RXDID_0_16B_BASE),
VIRTCHNL_RXDID_1_32B_BASE_M = BIT(VIRTCHNL_RXDID_1_32B_BASE),
VIRTCHNL_RXDID_1_FLEX_SPLITQ_M = BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ),
VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),
VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),
VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),
VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = BIT(VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL),
VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = BIT(VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2),
VIRTCHNL_RXDID_7_HW_RSVD_M = BIT(VIRTCHNL_RXDID_7_HW_RSVD),
/* 9 through 15 are reserved */
VIRTCHNL_RXDID_16_COMMS_GENERIC_M = BIT(VIRTCHNL_RXDID_16_COMMS_GENERIC),
VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = BIT(VIRTCHNL_RXDID_17_COMMS_AUX_VLAN),
VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = BIT(VIRTCHNL_RXDID_18_COMMS_AUX_IPV4),
VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = BIT(VIRTCHNL_RXDID_19_COMMS_AUX_IPV6),
VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = BIT(VIRTCHNL_RXDID_20_COMMS_AUX_FLOW),
VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = BIT(VIRTCHNL_RXDID_21_COMMS_AUX_TCP),
/* 22 through 63 are reserved */
};
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
@ -1127,6 +1224,14 @@ struct virtchnl_rss_hena {
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
/* Type of RSS algorithm */
enum virtchnl_rss_algorithm {
VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
VIRTCHNL_RSS_ALG_R_ASYMMETRIC = 1,
VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
VIRTCHNL_RSS_ALG_XOR_SYMMETRIC = 3,
};
/* This is used by PF driver to enforce how many channels can be supported.
* When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
* PF driver will allow only max 4 channels
@ -1225,6 +1330,14 @@ struct virtchnl_filter {
VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
struct virtchnl_shaper_bw {
/* Unit is Kbps */
u32 committed;
u32 peak;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw);
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
@ -1296,6 +1409,393 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) \
(proto_hdr_type << PROTO_HDR_SHIFT)
#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
/* VF use these macros to configure each protocol header.
* Specify which protocol headers and protocol header fields base on
* virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
* @param hdr: a struct of virtchnl_proto_hdr
* @param hdr_type: ETH/IPV4/TCP, etc
* @param field: SRC/DST/TEID/SPI, etc
*/
#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
(((hdr)->type) >> PROTO_HDR_SHIFT)
#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
((hdr)->type == ((s32)((val) >> PROTO_HDR_SHIFT)))
#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
(VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) && \
VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val))
/* Protocol header type within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
* tunneling or encapsulation protocols for network virtualization.
*/
enum virtchnl_proto_hdr_type {
VIRTCHNL_PROTO_HDR_NONE,
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_S_VLAN,
VIRTCHNL_PROTO_HDR_C_VLAN,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_TCP,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_SCTP,
VIRTCHNL_PROTO_HDR_GTPU_IP,
VIRTCHNL_PROTO_HDR_GTPU_EH,
VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
VIRTCHNL_PROTO_HDR_PPPOE,
VIRTCHNL_PROTO_HDR_L2TPV3,
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_AH,
VIRTCHNL_PROTO_HDR_PFCP,
VIRTCHNL_PROTO_HDR_GTPC,
VIRTCHNL_PROTO_HDR_ECPRI,
VIRTCHNL_PROTO_HDR_L2TPV2,
VIRTCHNL_PROTO_HDR_PPP,
/* IPv4 and IPv6 Fragment header types are only associated to
* VIRTCHNL_PROTO_HDR_IPV4 and VIRTCHNL_PROTO_HDR_IPV6 respectively,
* cannot be used independently.
*/
VIRTCHNL_PROTO_HDR_IPV4_FRAG,
VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
VIRTCHNL_PROTO_HDR_GRE,
};
/* Protocol header field within a protocol header. */
enum virtchnl_proto_hdr_field {
/* ETHER */
VIRTCHNL_PROTO_HDR_ETH_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
VIRTCHNL_PROTO_HDR_ETH_DST,
VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
/* S-VLAN */
VIRTCHNL_PROTO_HDR_S_VLAN_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
/* C-VLAN */
VIRTCHNL_PROTO_HDR_C_VLAN_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
/* IPV4 */
VIRTCHNL_PROTO_HDR_IPV4_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
VIRTCHNL_PROTO_HDR_IPV4_DST,
VIRTCHNL_PROTO_HDR_IPV4_DSCP,
VIRTCHNL_PROTO_HDR_IPV4_TTL,
VIRTCHNL_PROTO_HDR_IPV4_PROT,
VIRTCHNL_PROTO_HDR_IPV4_CHKSUM,
/* IPV6 */
VIRTCHNL_PROTO_HDR_IPV6_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
VIRTCHNL_PROTO_HDR_IPV6_DST,
VIRTCHNL_PROTO_HDR_IPV6_TC,
VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
VIRTCHNL_PROTO_HDR_IPV6_PROT,
/* IPV6 Prefix */
VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_SRC,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX32_DST,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_SRC,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX40_DST,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_SRC,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX48_DST,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_SRC,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX56_DST,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_SRC,
VIRTCHNL_PROTO_HDR_IPV6_PREFIX96_DST,
/* TCP */
VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
VIRTCHNL_PROTO_HDR_TCP_CHKSUM,
/* UDP */
VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
VIRTCHNL_PROTO_HDR_UDP_CHKSUM,
/* SCTP */
VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
VIRTCHNL_PROTO_HDR_SCTP_CHKSUM,
/* GTPU_IP */
VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
/* GTPU_EH */
VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
/* PPPOE */
VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
/* L2TPV3 */
VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
/* ESP */
VIRTCHNL_PROTO_HDR_ESP_SPI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
/* AH */
VIRTCHNL_PROTO_HDR_AH_SPI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
/* PFCP */
VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
VIRTCHNL_PROTO_HDR_PFCP_SEID,
/* GTPC */
VIRTCHNL_PROTO_HDR_GTPC_TEID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPC),
/* ECPRI */
VIRTCHNL_PROTO_HDR_ECPRI_MSG_TYPE =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ECPRI),
VIRTCHNL_PROTO_HDR_ECPRI_PC_RTC_ID,
/* IPv4 Dummy Fragment */
VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4_FRAG),
/* IPv6 Extension Fragment */
VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG),
/* GTPU_DWN/UP */
VIRTCHNL_PROTO_HDR_GTPU_DWN_QFI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
};
struct virtchnl_proto_hdr {
/* see enum virtchnl_proto_hdr_type */
s32 type;
u32 field_selector; /* a bit mask to select field for header type */
u8 buffer[64];
/**
* binary buffer in network order for specific header type.
* For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
* header is expected to be copied into the buffer.
*/
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
struct virtchnl_proto_hdrs {
u8 tunnel_level;
/**
* specify where protocol header start from.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
**/
int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
struct virtchnl_rss_cfg {
struct virtchnl_proto_hdrs proto_hdrs; /* protocol headers */
/* see enum virtchnl_rss_algorithm; rss algorithm type */
s32 rss_algorithm;
u8 reserved[128]; /* reserve for future */
};
VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
/* action configuration for FDIR */
struct virtchnl_filter_action {
/* see enum virtchnl_action type */
s32 type;
union {
/* used for queue and qgroup action */
struct {
u16 index;
u8 region;
} queue;
/* used for count action */
struct {
/* share counter ID with other flow rules */
u8 shared;
u32 id; /* counter ID */
} count;
/* used for mark action */
u32 mark_id;
u8 reserve[32];
} act_conf;
};
VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
#define VIRTCHNL_MAX_NUM_ACTIONS 8
struct virtchnl_filter_action_set {
/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
int count;
struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
};
VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
/* pattern and action for FDIR rule */
struct virtchnl_fdir_rule {
struct virtchnl_proto_hdrs proto_hdrs;
struct virtchnl_filter_action_set action_set;
};
VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
/* Status returned to VF after VF requests FDIR commands
* VIRTCHNL_FDIR_SUCCESS
* VF FDIR related request is successfully done by PF
* The request can be OP_ADD/DEL/QUERY_FDIR_FILTER.
*
* VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
* OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
*
* VIRTCHNL_FDIR_FAILURE_RULE_EXIST
* OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
*
* VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
* OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
*
* VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
* OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
*
* VIRTCHNL_FDIR_FAILURE_RULE_INVALID
* OP_ADD_FDIR_FILTER request is failed due to parameters validation
* or HW doesn't support.
*
* VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
* OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
* for programming.
*
* VIRTCHNL_FDIR_FAILURE_QUERY_INVALID
* OP_QUERY_FDIR_FILTER request is failed due to parameters validation,
* for example, VF query counter of a rule who has no counter action.
*/
enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_SUCCESS = 0,
VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
VIRTCHNL_FDIR_FAILURE_QUERY_INVALID,
};
/* VIRTCHNL_OP_ADD_FDIR_FILTER
* VF sends this request to PF by filling out vsi_id,
* validate_only and rule_cfg. PF will return flow_id
* if the request is successfully done and return add_status to VF.
*/
struct virtchnl_fdir_add {
u16 vsi_id; /* INPUT */
/*
* 1 for validating a fdir rule, 0 for creating a fdir rule.
* Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
*/
u16 validate_only; /* INPUT */
u32 flow_id; /* OUTPUT */
struct virtchnl_fdir_rule rule_cfg; /* INPUT */
/* see enum virtchnl_fdir_prgm_status; OUTPUT */
s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
/* VIRTCHNL_OP_DEL_FDIR_FILTER
* VF sends this request to PF by filling out vsi_id
* and flow_id. PF will return del_status to VF.
*/
struct virtchnl_fdir_del {
u16 vsi_id; /* INPUT */
u16 pad;
u32 flow_id; /* INPUT */
/* see enum virtchnl_fdir_prgm_status; OUTPUT */
s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
/* VIRTCHNL_OP_GET_QOS_CAPS
* VF sends this message to get its QoS Caps, such as
* TC number, Arbiter and Bandwidth.
*/
struct virtchnl_qos_cap_elem {
u8 tc_num;
u8 tc_prio;
#define VIRTCHNL_ABITER_STRICT 0
#define VIRTCHNL_ABITER_ETS 2
u8 arbiter;
#define VIRTCHNL_STRICT_WEIGHT 1
u8 weight;
enum virtchnl_bw_limit_type type;
union {
struct virtchnl_shaper_bw shaper;
u8 pad2[32];
};
};
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem);
struct virtchnl_qos_cap_list {
u16 vsi_id;
u16 num_elem;
struct virtchnl_qos_cap_elem cap[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(44, virtchnl_qos_cap_list);
/* VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP
* VF sends message virtchnl_queue_tc_mapping to set queue to tc
* mapping for all the Tx and Rx queues with a specified VSI, and
* would get response about bitmap of valid user priorities
* associated with queues.
*/
struct virtchnl_queue_tc_mapping {
u16 vsi_id;
u16 num_tc;
u16 num_queue_pairs;
u8 pad[2];
union {
struct {
u16 start_queue_id;
u16 queue_count;
} req;
struct {
#define VIRTCHNL_USER_PRIO_TYPE_UP 0
#define VIRTCHNL_USER_PRIO_TYPE_DSCP 1
u16 prio_type;
u16 valid_prio_bitmap;
} resp;
} tc[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
/* TX and RX queue types are valid in legacy as well as split queue models.
* With Split Queue model, 2 additional types are introduced - TX_COMPLETION
* and RX_BUFFER. In split queue model, RX corresponds to the queue where HW
@ -1605,6 +2105,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
case VIRTCHNL_OP_ADD_RSS_CFG:
case VIRTCHNL_OP_DEL_RSS_CFG:
valid_len = sizeof(struct virtchnl_rss_cfg);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
valid_len = sizeof(struct virtchnl_fdir_add);
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER:
valid_len = sizeof(struct virtchnl_fdir_del);
break;
case VIRTCHNL_OP_GET_QOS_CAPS:
break;
case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:
valid_len = sizeof(struct virtchnl_queue_tc_mapping);
if (msglen >= valid_len) {
struct virtchnl_queue_tc_mapping *q_tc =
(struct virtchnl_queue_tc_mapping *)msg;
if (q_tc->num_tc == 0) {
err_msg_format = true;
break;
}
valid_len += (q_tc->num_tc - 1) *
sizeof(q_tc->tc[0]);
}
break;
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
break;
case VIRTCHNL_OP_ADD_VLAN_V2:

View File

@ -50,6 +50,16 @@
#define VIRTCHNL_CIPHER 2
#define VIRTCHNL_AEAD 3
/* caps enabled */
#define VIRTCHNL_IPSEC_ESN_ENA BIT(0)
#define VIRTCHNL_IPSEC_UDP_ENCAP_ENA BIT(1)
#define VIRTCHNL_IPSEC_SA_INDEX_SW_ENA BIT(2)
#define VIRTCHNL_IPSEC_AUDIT_ENA BIT(3)
#define VIRTCHNL_IPSEC_BYTE_LIMIT_ENA BIT(4)
#define VIRTCHNL_IPSEC_DROP_ON_AUTH_FAIL_ENA BIT(5)
#define VIRTCHNL_IPSEC_ARW_CHECK_ENA BIT(6)
#define VIRTCHNL_IPSEC_24BIT_SPI_ENA BIT(7)
/* algorithm type */
/* Hash Algorithm */
#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */
@ -162,6 +172,7 @@ struct virtchnl_sym_crypto_cap {
* VF pass virtchnl_ipsec_cap to PF
* and PF return capability of ipsec from virtchnl.
*/
#pragma pack(1)
struct virtchnl_ipsec_cap {
/* max number of SA per VF */
u16 max_sa_num;
@ -175,41 +186,22 @@ struct virtchnl_ipsec_cap {
/* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
u8 virtchnl_direction;
/* type of esn - !0:enable/0:disable */
u8 esn_enabled;
/* type of udp_encap - !0:enable/0:disable */
u8 udp_encap_enabled;
/* termination mode - value ref VIRTCHNL_TERM_XXX */
u8 termination_mode;
/* SA index mode - !0:enable/0:disable */
u8 sa_index_sw_enabled;
/* auditing mode - !0:enable/0:disable */
u8 audit_enabled;
/* lifetime byte limit - !0:enable/0:disable */
u8 byte_limit_enabled;
/* drop on authentication failure - !0:enable/0:disable */
u8 drop_on_auth_fail_enabled;
/* anti-replay window check - !0:enable/0:disable */
u8 arw_check_enabled;
/* number of supported crypto capability */
u8 crypto_cap_num;
/* descriptor ID */
u16 desc_id;
/* capabilities enabled - value ref VIRTCHNL_IPSEC_XXX_ENA */
u32 caps_enabled;
/* crypto capabilities */
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
};
#pragma pack(1)
/* configuration of crypto function */
struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;

View File

@ -34,56 +34,94 @@
#define _VIRTCHNL_LAN_DESC_H_
/* Rx */
/* For virtchnl_splitq_base_rx_flex desc members */
#define VIRTCHNL_RXD_FLEX_PTYPE_S 0
#define VIRTCHNL_RXD_FLEX_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_PTYPE_S)
#define VIRTCHNL_RXD_FLEX_UMBCAST_S 10
#define VIRTCHNL_RXD_FLEX_UMBCAST_M \
MAKEMASK(0x3UL, VIRTCHNL_RXD_FLEX_UMBCAST_S)
#define VIRTCHNL_RXD_FLEX_FF0_S 12
#define VIRTCHNL_RXD_FLEX_FF0_M MAKEMASK(0xFUL, VIRTCHNL_RXD_FLEX_FF0_S)
#define VIRTCHNL_RXD_FLEX_LEN_PBUF_S 0
#define VIRTCHNL_RXD_FLEX_LEN_PBUF_M \
MAKEMASK(0x3FFFUL, VIRTCHNL_RXD_FLEX_LEN_PBUF_S)
#define VIRTCHNL_RXD_FLEX_GEN_S 14
#define VIRTCHNL_RXD_FLEX_GEN_M BIT_ULL(VIRTCHNL_RXD_FLEX_GEN_S)
#define VIRTCHNL_RXD_FLEX_BUFQ_ID_S 15
#define VIRTCHNL_RXD_FLEX_BUFQ_ID_M \
BIT_ULL(VIRTCHNL_RXD_FLEX_BUFQ_ID_S)
#define VIRTCHNL_RXD_FLEX_LEN_HDR_S 0
#define VIRTCHNL_RXD_FLEX_LEN_HDR_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_LEN_HDR_S)
#define VIRTCHNL_RXD_FLEX_RSC_S 10
#define VIRTCHNL_RXD_FLEX_RSC_M BIT_ULL(VIRTCHNL_RXD_FLEX_RSC_S)
#define VIRTCHNL_RXD_FLEX_SPH_S 11
#define VIRTCHNL_RXD_FLEX_SPH_M BIT_ULL(VIRTCHNL_RXD_FLEX_SPH_S)
#define VIRTCHNL_RXD_FLEX_MISS_S 12
#define VIRTCHNL_RXD_FLEX_MISS_M \
BIT_ULL(VIRTCHNL_RXD_FLEX_MISS_S)
#define VIRTCHNL_RXD_FLEX_FF1_S 13
#define VIRTCHNL_RXD_FLEX_FF1_M MAKEMASK(0x7UL, VIRTCHNL_RXD_FLEX_FF1_M)
/* For splitq virtchnl_rx_flex_desc_adv desc members */
#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_M \
MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S 10
#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_M \
MAKEMASK(0x3UL, VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S 12
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_M \
MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_M \
MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S 14
#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S 10
#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S 11
#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S 12
#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_S 13
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M \
MAKEMASK(0x7UL, VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M)
/* For virtchnl_singleq_base_rx_legacy desc members */
#define VIRTCHNL_RXD_QW1_LEN_SPH_S 63
#define VIRTCHNL_RXD_QW1_LEN_SPH_M BIT_ULL(VIRTCHNL_RXD_QW1_LEN_SPH_S)
#define VIRTCHNL_RXD_QW1_LEN_HBUF_S 52
#define VIRTCHNL_RXD_QW1_LEN_HBUF_M \
MAKEMASK(0x7FFULL, VIRTCHNL_RXD_QW1_LEN_HBUF_S)
#define VIRTCHNL_RXD_QW1_LEN_PBUF_S 38
#define VIRTCHNL_RXD_QW1_LEN_PBUF_M \
MAKEMASK(0x3FFFULL, VIRTCHNL_RXD_QW1_LEN_PBUF_S)
#define VIRTCHNL_RXD_QW1_PTYPE_S 30
#define VIRTCHNL_RXD_QW1_PTYPE_M \
MAKEMASK(0xFFULL, VIRTCHNL_RXD_QW1_PTYPE_S)
#define VIRTCHNL_RXD_QW1_ERROR_S 19
#define VIRTCHNL_RXD_QW1_ERROR_M \
MAKEMASK(0xFFUL, VIRTCHNL_RXD_QW1_ERROR_S)
#define VIRTCHNL_RXD_QW1_STATUS_S 0
#define VIRTCHNL_RXD_QW1_STATUS_M \
MAKEMASK(0x7FFFFUL, VIRTCHNL_RXD_QW1_STATUS_S)
enum virtchnl_rx_flex_desc_adv_status_error_0_qw1_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_DD_S = 0,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_EOF_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_HBO_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S,
};
enum virtchnl_rx_flex_desc_status_error_0_qw1_bits {
enum virtchnl_rx_flex_desc_adv_status_error_0_qw0_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LPBK_S = 0,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RXE_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_CRCP_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_flex_desc_adv_status_error_1_bits {
/* Note: These are predefined bit offsets */
/* 2 bits */
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_RSVD_S = 0,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S = 2,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S = 3,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S = 4,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S = 5,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S = 6,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S = 7,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_LAST /* this entry must be last!!! */
};
/* for singleq (flex) virtchnl_rx_flex_desc fields */
/* for virtchnl_rx_flex_desc.ptype_flex_flags0 member */
#define VIRTCHNL_RX_FLEX_DESC_PTYPE_S 0
#define VIRTCHNL_RX_FLEX_DESC_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_PTYPE_S) /* 10-bits */
/* for virtchnl_rx_flex_desc.pkt_length member */
#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S 0
#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_M \
MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S) /* 14-bits */
enum virtchnl_rx_flex_desc_status_error_0_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0,
VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S,
@ -93,10 +131,7 @@ enum virtchnl_rx_flex_desc_status_error_0_qw1_bits {
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
};
enum virtchnl_rx_flex_desc_status_error_0_qw0_bits {
VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S = 0,
VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S,
@ -109,53 +144,80 @@ enum virtchnl_rx_flex_desc_status_error_0_qw0_bits {
enum virtchnl_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_STATUS1_RSVD_S = 0, /* 2 bits */
VIRTCHNL_RX_FLEX_DESC_STATUS1_ATRAEFAIL_S = 2,
VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 3,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 4,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 5,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 6,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 7,
VIRTCHNL_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
VIRTCHNL_RX_FLEX_DESC_STATUS1_NAT_S = 4,
VIRTCHNL_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
/* [10:6] reserved */
VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
/* For singleq (non flex) virtchnl_singleq_base_rx_desc legacy desc members */
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S 63
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_M \
BIT_ULL(VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S 52
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_M \
MAKEMASK(0x7FFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S 38
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_M \
MAKEMASK(0x3FFFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S 30
#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_M \
MAKEMASK(0xFFULL, VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S 19
#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_M \
MAKEMASK(0xFFUL, VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S 0
#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_M \
MAKEMASK(0x7FFFFUL, VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S)
enum virtchnl_rx_base_desc_status_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0,
VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1,
VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2,
VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3,
VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2,
VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3,
VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4,
VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8,
VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11,
VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14,
VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15,
VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18,
VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_desc_fltstat_values {
VIRTCHNL_RX_DESC_FLTSTAT_NO_DATA = 0,
VIRTCHNL_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
VIRTCHNL_RX_DESC_FLTSTAT_RSV = 2,
VIRTCHNL_RX_DESC_FLTSTAT_RSS_HASH = 3,
enum virtchnl_rx_base_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S = 0
};
enum virtchnl_rx_base_desc_error_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0,
VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1,
VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1,
VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2,
VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */
VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3,
VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4,
VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5,
VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6,
VIRTCHNL_RX_BASE_DESC_ERROR_RSVD_S = 7
VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6,
VIRTCHNL_RX_BASE_DESC_ERROR_PPRS_S = 7
};
enum virtchnl_rx_base_desc_fltstat_values {
VIRTCHNL_RX_BASE_DESC_FLTSTAT_NO_DATA = 0,
VIRTCHNL_RX_BASE_DESC_FLTSTAT_FD_ID = 1,
VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSV = 2,
VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSS_HASH = 3,
};
/* Receive Descriptors */
@ -277,7 +339,7 @@ struct virtchnl_rx_flex_desc_nic {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
@ -312,7 +374,7 @@ struct virtchnl_rx_flex_desc_sw {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
@ -334,71 +396,6 @@ struct virtchnl_rx_flex_desc_sw {
__le32 ts_high;
};
/* Rx Flex Descriptor NIC VEB Profile
* RxDID Profile Id 4
* Flex-field 0: Destination Vsi
*/
struct virtchnl_rx_flex_desc_nic_veb_dbg {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 dst_vsi; /* [0:12]: destination vsi */
/* 13: vsi valid bit */
/* [14:15] are reserved */
__le16 flex_field_1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 rsvd; /* flex words 2-3 are reserved */
__le32 ts_high;
};
/* Rx Flex Descriptor NIC ACL Profile
* RxDID Profile Id 5
* Flex-field 0: ACL Counter 0
* Flex-field 1: ACL Counter 1
* Flex-field 2: ACL Counter 2
*/
struct virtchnl_rx_flex_desc_nic_acl_dbg {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 acl_ctr0;
__le16 acl_ctr1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 acl_ctr2;
__le16 rsvd; /* flex words 2-3 are reserved */
__le32 ts_high;
};
/* Rx Flex Descriptor NIC Profile
* RxDID Profile Id 6
* Flex-field 0: RSS hash lower 16-bits
@ -411,7 +408,7 @@ struct virtchnl_rx_flex_desc_nic_2 {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flexi_flags0;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
@ -540,10 +537,14 @@ struct virtchnl_rx_flex_desc_adv_nic_3 {
}; /* writeback */
union virtchnl_rx_desc {
struct virtchnl_singleq_rx_buf_desc read;
struct virtchnl_singleq_base_rx_desc base_wb;
struct virtchnl_rx_flex_desc flex_wb;
struct virtchnl_rx_flex_desc_adv flex_wb_adv;
struct virtchnl_singleq_rx_buf_desc read;
struct virtchnl_singleq_base_rx_desc base_wb;
struct virtchnl_rx_flex_desc flex_wb;
struct virtchnl_rx_flex_desc_nic flex_nic_wb;
struct virtchnl_rx_flex_desc_sw flex_sw_wb;
struct virtchnl_rx_flex_desc_nic_2 flex_nic_2_wb;
struct virtchnl_rx_flex_desc_adv flex_adv_wb;
struct virtchnl_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb;
};
#endif /* _VIRTCHNL_LAN_DESC_H_ */

View File

@ -7,9 +7,10 @@ SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h
SRCS += ice_lib.c ice_osdep.c ice_resmgr.c ice_strings.c
SRCS += ice_iflib_recovery_txrx.c ice_iflib_txrx.c if_ice_iflib.c
SRCS += ice_fw_logging.c
# Shared source
SRCS += ice_common.c ice_controlq.c ice_dcb.c ice_flex_pipe.c ice_flow.c
SRCS += ice_nvm.c ice_sched.c ice_sriov.c ice_switch.c ice_vlan_mode.c
SRCS += ice_nvm.c ice_sched.c ice_switch.c ice_vlan_mode.c ice_fwlog.c
.include <bsd.kmod.mk>