ice(4): Update to 1.37.7-k

Notable changes include:

- DSCP QoS Support (leveraging support added in
  rG9c950139051298831ce19d01ea5fb33ec6ea7f89)
- Improved PFC handling and TC queue assignments (now all remaining
  queues are assigned to TC 0 when more than one TC is enabled and the
  number of available queues does not evenly divide between them)
- Support for dumping the internal FW state for additional debugging by
  Intel support
- Support for allowing "No FEC" to be a valid state for the LESM to
  negotiate when using non-standard compliant modules

Also includes various bug fixes and smaller enhancements, too.

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Reviewed by:	erj@
Tested by:	Jeff Pieper <jeffrey.pieper@intel.com>
MFC after:	3 days
Relnotes:	yes
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D38109
This commit is contained in:
Piotr Kubaj 2023-02-13 17:29:44 -08:00 committed by Eric Joyner
parent 3a3450eda6
commit 8923de5905
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
64 changed files with 5840 additions and 3683 deletions

View File

@ -187,6 +187,8 @@ dev/ice/irdma_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/irdma_di_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/ice_ddp_common.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \

View File

@ -294,6 +294,8 @@ dev/ice/irdma_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/irdma_di_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/ice_ddp_common.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \

View File

@ -42,48 +42,50 @@ dev/iicbus/ofw_iicbus.c optional iicbus aim
dev/iicbus/ofw_iicbus_if.m optional iicbus aim
dev/ipmi/ipmi.c optional ipmi
dev/ipmi/ipmi_opal.c optional powernv ipmi
dev/ice/if_ice_iflib.c optional ice pci powerpc64 \
dev/ice/if_ice_iflib.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_lib.c optional ice pci powerpc64 \
dev/ice/ice_lib.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_osdep.c optional ice pci powerpc64 \
dev/ice/ice_osdep.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_resmgr.c optional ice pci powerpc64 \
dev/ice/ice_resmgr.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_strings.c optional ice pci powerpc64 \
dev/ice/ice_strings.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 \
dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 \
dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_common.c optional ice pci powerpc64 \
dev/ice/ice_common.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_controlq.c optional ice pci powerpc64 \
dev/ice/ice_controlq.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_dcb.c optional ice pci powerpc64 \
dev/ice/ice_dcb.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_flex_pipe.c optional ice pci powerpc64 \
dev/ice/ice_flex_pipe.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_flow.c optional ice pci powerpc64 \
dev/ice/ice_flow.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_nvm.c optional ice pci powerpc64 \
dev/ice/ice_nvm.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_sched.c optional ice pci powerpc64 \
dev/ice/ice_sched.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci powerpc64 \
dev/ice/ice_switch.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_vlan_mode.c optional ice pci powerpc64 \
dev/ice/ice_vlan_mode.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fw_logging.c optional ice pci powerpc64 \
dev/ice/ice_fw_logging.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_fwlog.c optional ice pci powerpc64 \
dev/ice/ice_fwlog.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_rdma.c optional ice pci powerpc64 \
dev/ice/ice_rdma.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/irdma_if.m optional ice pci powerpc64 \
dev/ice/irdma_if.m optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/irdma_di_if.m optional ice pci powerpc64 \
dev/ice/irdma_di_if.m optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/ice_ddp_common.c optional ice pci powerpc64 | ice pci powerpc64le \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp powerpc64 | ice pci powerpc64le \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -37,10 +37,19 @@
* descriptor format. It is shared between Firmware and Software.
*/
#include "ice_osdep.h"
#include "ice_defs.h"
#include "ice_bitops.h"
#define ICE_MAX_VSI 768
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
ICE_RES_WRITE
};
struct ice_aqc_generic {
__le32 param0;
__le32 param1;
@ -155,9 +164,6 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_MAX_MTU 0x0047
#define ICE_AQC_CAPS_NVM_VER 0x0048
#define ICE_AQC_CAPS_OROM_VER 0x004A
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_CEM 0x00F2
#define ICE_AQC_CAPS_IWARP 0x0051
#define ICE_AQC_CAPS_LED 0x0061
@ -173,6 +179,10 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083
#define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084
#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
#define ICE_AQC_CAPS_DYN_FLATTENING 0x0090
#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
u8 major_ver;
u8 minor_ver;
@ -526,6 +536,7 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
#define ICE_AQ_VSI_SW_FLAG_RX_PASS_PRUNE_ENA BIT(3)
#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
u8 veb_stat_id;
#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
@ -836,6 +847,8 @@ struct ice_sw_rule_lkup_rx_tx {
#define ICE_SINGLE_ACT_PTR 0x2
#define ICE_SINGLE_ACT_PTR_VAL_S 4
#define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S)
/* Bit 17 should be set if pointed action includes a FWD cmd */
#define ICE_SINGLE_ACT_PTR_HAS_FWD BIT(17)
/* Bit 18 should be set to 1 */
#define ICE_SINGLE_ACT_PTR_BIT BIT(18)
@ -1017,6 +1030,24 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
struct ice_aqc_get_set_tx_topo {
u8 set_flags;
#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
#define ICE_AQC_TX_TOPO_FLAGS_SET_PSM BIT(2)
#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
u8 get_flags;
#define ICE_AQC_TX_TOPO_GET_NO_UPDATE 0
#define ICE_AQC_TX_TOPO_GET_PSM 1
#define ICE_AQC_TX_TOPO_GET_RAM 2
__le16 reserved1;
__le32 reserved2;
__le32 addr_high;
__le32 addr_low;
};
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@ -1170,6 +1201,22 @@ struct ice_aqc_rl_profile_elem {
__le16 rl_encode;
};
/* Config Node Attributes (indirect 0x0419)
* Query Node Attributes (indirect 0x041A)
*/
struct ice_aqc_node_attr {
__le16 num_entries; /* Number of attributes structures in the buffer */
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
struct ice_aqc_node_attr_elem {
__le32 node_teid;
__le16 max_children;
__le16 children_level;
};
/* Configure L2 Node CGD (indirect 0x0414)
* This indirect command allows configuring a congestion domain for given L2
* node TEIDs in the scheduler topology.
@ -1335,7 +1382,7 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@ -1376,6 +1423,7 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2)
#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3)
#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
#define ICE_AQC_PHY_FEC_DIS BIT(5)
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
#define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0)
@ -1484,6 +1532,12 @@ struct ice_aqc_get_link_status {
__le32 addr_low;
};
enum ice_get_link_status_data_version {
ICE_GET_LINK_STATUS_DATA_V1 = 1,
};
#define ICE_GET_LINK_STATUS_DATALEN_V1 32
/* Get link status response data structure, also used for Link Status Event */
struct ice_aqc_get_link_status_data {
u8 topo_media_conflict;
@ -2078,6 +2132,12 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
/* For Write Activate, several flags are sent as part of a separate
* flags2 field using a separate byte. For simplicity of the software
* interface, we pass the flags as a 16 bit value so these flags are
* all offset by 8 bits
*/
#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@ -2108,6 +2168,7 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
#define ICE_AQC_NVM_MINSREV_MOD_ID 0x130
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
* type field is excluded from the section when reading and writing from
@ -2124,6 +2185,13 @@ struct ice_aqc_nvm_minsrev {
__le16 orom_minsrev_h;
};
struct ice_aqc_nvm_tx_topo_user_sel {
__le16 length;
u8 data;
#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
u8 reserved;
};
/* Used for 0x0704 as well as for 0x0705 commands */
struct ice_aqc_nvm_cfg {
u8 cmd_flags;
@ -2218,14 +2286,25 @@ struct ice_aqc_lldp_get_mib {
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
/* DCBX mode */
#define ICE_AQ_LLDP_DCBX_S 6
#define ICE_AQ_LLDP_DCBX_M (0x3 << ICE_AQ_LLDP_DCBX_S)
#define ICE_AQ_LLDP_DCBX_NA 0
#define ICE_AQ_LLDP_DCBX_CEE 1
#define ICE_AQ_LLDP_DCBX_IEEE 2
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
u8 reserved1;
u8 state;
#define ICE_AQ_LLDP_MIB_CHANGE_STATE_S 0
#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M \
(0x1 << ICE_AQ_LLDP_MIB_CHANGE_STATE_S)
#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0
#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1
__le16 local_len;
__le16 remote_len;
u8 reserved2[2];
u8 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
@ -2236,6 +2315,11 @@ struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
#define ICE_AQ_LLDP_MIB_PENDING_S 1
#define ICE_AQ_LLDP_MIB_PENDING_M \
(0x1 << ICE_AQ_LLDP_MIB_PENDING_S)
#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0
#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1
u8 reserved[15];
};
@ -2580,6 +2664,9 @@ struct ice_aqc_add_rdma_qset_data {
/* Move RDMA Queue Set (indirect 0x0C34) */
struct ice_aqc_move_rdma_qset_cmd {
u8 num_rdma_qset; /* Used by commands and response */
#define ICE_AQC_PF_MODE_SAME_PF 0x0
#define ICE_AQC_PF_MODE_GIVE_OWNERSHIP 0x1
#define ICE_AQC_PF_MODE_KEEP_OWNERSHIP 0x2
u8 flags;
u8 reserved[6];
__le32 addr_high;
@ -2656,8 +2743,8 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_driver_shared_params {
u8 set_or_get_op;
#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
#define ICE_AQC_DRIVER_PARAM_SET 0
#define ICE_AQC_DRIVER_PARAM_GET 1
#define ICE_AQC_DRIVER_PARAM_SET ((u8)0)
#define ICE_AQC_DRIVER_PARAM_GET ((u8)1)
u8 param_indx;
#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
u8 rsvd[2];
@ -2676,16 +2763,18 @@ struct ice_aqc_event_lan_overflow {
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
u8 cluster_id;
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
/* EMP_DRAM only dumpable in device debug mode */
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
/* AUX_REGS only dumpable in device debug mode */
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9
#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21
u8 reserved;
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
@ -2729,7 +2818,6 @@ enum ice_aqc_fw_logging_mod {
ICE_AQC_FW_LOG_ID_MAX,
};
/* Set Health Status (direct 0xFF20) */
struct ice_aqc_set_health_status_config {
u8 event_source;
@ -2747,6 +2835,7 @@ struct ice_aqc_set_health_status_config {
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
#define ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
#define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
#define ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
@ -2768,7 +2857,16 @@ struct ice_aqc_set_health_status_config {
#define ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH 0x504
#define ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
#define ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
#define ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
#define ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
#define ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB 0x509
#define ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
#define ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET 0x50B
#define ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
#define ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
#define ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP 0x1000
#define ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
#define ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
/* Get Health Status codes (indirect 0xFF21) */
struct ice_aqc_get_supported_health_status_codes {
@ -2923,6 +3021,7 @@ struct ice_aq_desc {
struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_node_attr node_attr;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_cfg nvm_cfg;
struct ice_aqc_nvm_checksum nvm_checksum;
@ -2949,6 +3048,7 @@ struct ice_aq_desc {
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_move_txqs move_txqs;
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_move_rdma_qset_cmd move_rdma_qset;
struct ice_aqc_txqs_cleanup txqs_cleanup;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
@ -2975,6 +3075,7 @@ struct ice_aq_desc {
struct ice_aqc_clear_health_status clear_health_status;
struct ice_aqc_prog_topo_dev_nvm prog_topo_dev_nvm;
struct ice_aqc_read_topo_dev_nvm read_topo_dev_nvm;
struct ice_aqc_get_set_tx_topo get_set_tx_topo;
} params;
};
@ -3125,6 +3226,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_node_to_root = 0x0413,
ice_aqc_opc_cfg_l2_node_cgd = 0x0414,
ice_aqc_opc_remove_rl_profiles = 0x0415,
ice_aqc_opc_set_tx_topo = 0x0417,
ice_aqc_opc_get_tx_topo = 0x0418,
ice_aqc_opc_cfg_node_attr = 0x0419,
ice_aqc_opc_query_node_attr = 0x041A,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
@ -3196,6 +3301,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
ice_execute_pending_lldp_mib = 0x0A0B,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -33,15 +33,25 @@
#ifndef _ICE_BITOPS_H_
#define _ICE_BITOPS_H_
#include "ice_defs.h"
#include "ice_osdep.h"
/* Define the size of the bitmap chunk */
typedef u32 ice_bitmap_t;
/* NOTE!
* Do not use any of the functions declared in this file
* on memory that was not declared with ice_declare_bitmap.
* Not following this rule might cause issues like split
* locks.
*/
/* Number of bits per bitmap chunk */
#define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t))
/* Determine which chunk a bit belongs in */
#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
/* How many chunks are required to store this many bits */
#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK)
#define BITS_TO_CHUNKS(sz) (((sz) + BITS_PER_CHUNK - 1) / BITS_PER_CHUNK)
/* Which bit inside a chunk this bit corresponds to */
#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
/* How many bits are valid in the last chunk, assumes nr > 0 */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -39,118 +39,110 @@
#define ICE_PF_RESET_WAIT_COUNT 300
/**
* dump_phy_type - helper function that prints PHY type strings
* @hw: pointer to the HW structure
* @phy: 64 bit PHY type to decipher
* @i: bit index within phy
* @phy_string: string corresponding to bit i in phy
* @prefix: prefix string to differentiate multiple dumps
*/
static void
dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string,
const char *prefix)
{
if (phy & BIT_ULL(i))
ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i,
phy_string);
}
static const char * const ice_link_mode_str_low[] = {
[0] = "100BASE_TX",
[1] = "100M_SGMII",
[2] = "1000BASE_T",
[3] = "1000BASE_SX",
[4] = "1000BASE_LX",
[5] = "1000BASE_KX",
[6] = "1G_SGMII",
[7] = "2500BASE_T",
[8] = "2500BASE_X",
[9] = "2500BASE_KX",
[10] = "5GBASE_T",
[11] = "5GBASE_KR",
[12] = "10GBASE_T",
[13] = "10G_SFI_DA",
[14] = "10GBASE_SR",
[15] = "10GBASE_LR",
[16] = "10GBASE_KR_CR1",
[17] = "10G_SFI_AOC_ACC",
[18] = "10G_SFI_C2C",
[19] = "25GBASE_T",
[20] = "25GBASE_CR",
[21] = "25GBASE_CR_S",
[22] = "25GBASE_CR1",
[23] = "25GBASE_SR",
[24] = "25GBASE_LR",
[25] = "25GBASE_KR",
[26] = "25GBASE_KR_S",
[27] = "25GBASE_KR1",
[28] = "25G_AUI_AOC_ACC",
[29] = "25G_AUI_C2C",
[30] = "40GBASE_CR4",
[31] = "40GBASE_SR4",
[32] = "40GBASE_LR4",
[33] = "40GBASE_KR4",
[34] = "40G_XLAUI_AOC_ACC",
[35] = "40G_XLAUI",
[36] = "50GBASE_CR2",
[37] = "50GBASE_SR2",
[38] = "50GBASE_LR2",
[39] = "50GBASE_KR2",
[40] = "50G_LAUI2_AOC_ACC",
[41] = "50G_LAUI2",
[42] = "50G_AUI2_AOC_ACC",
[43] = "50G_AUI2",
[44] = "50GBASE_CP",
[45] = "50GBASE_SR",
[46] = "50GBASE_FR",
[47] = "50GBASE_LR",
[48] = "50GBASE_KR_PAM4",
[49] = "50G_AUI1_AOC_ACC",
[50] = "50G_AUI1",
[51] = "100GBASE_CR4",
[52] = "100GBASE_SR4",
[53] = "100GBASE_LR4",
[54] = "100GBASE_KR4",
[55] = "100G_CAUI4_AOC_ACC",
[56] = "100G_CAUI4",
[57] = "100G_AUI4_AOC_ACC",
[58] = "100G_AUI4",
[59] = "100GBASE_CR_PAM4",
[60] = "100GBASE_KR_PAM4",
[61] = "100GBASE_CP2",
[62] = "100GBASE_SR2",
[63] = "100GBASE_DR",
};
static const char * const ice_link_mode_str_high[] = {
[0] = "100GBASE_KR2_PAM4",
[1] = "100G_CAUI2_AOC_ACC",
[2] = "100G_CAUI2",
[3] = "100G_AUI2_AOC_ACC",
[4] = "100G_AUI2",
};
/**
* ice_dump_phy_type_low - helper function to dump phy_type_low
* ice_dump_phy_type - helper function to dump phy_type
* @hw: pointer to the HW structure
* @low: 64 bit value for phy_type_low
* @prefix: prefix string to differentiate multiple dumps
*/
static void
ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix)
{
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
(unsigned long long)low);
dump_phy_type(hw, low, 0, "100BASE_TX", prefix);
dump_phy_type(hw, low, 1, "100M_SGMII", prefix);
dump_phy_type(hw, low, 2, "1000BASE_T", prefix);
dump_phy_type(hw, low, 3, "1000BASE_SX", prefix);
dump_phy_type(hw, low, 4, "1000BASE_LX", prefix);
dump_phy_type(hw, low, 5, "1000BASE_KX", prefix);
dump_phy_type(hw, low, 6, "1G_SGMII", prefix);
dump_phy_type(hw, low, 7, "2500BASE_T", prefix);
dump_phy_type(hw, low, 8, "2500BASE_X", prefix);
dump_phy_type(hw, low, 9, "2500BASE_KX", prefix);
dump_phy_type(hw, low, 10, "5GBASE_T", prefix);
dump_phy_type(hw, low, 11, "5GBASE_KR", prefix);
dump_phy_type(hw, low, 12, "10GBASE_T", prefix);
dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix);
dump_phy_type(hw, low, 14, "10GBASE_SR", prefix);
dump_phy_type(hw, low, 15, "10GBASE_LR", prefix);
dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix);
dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix);
dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix);
dump_phy_type(hw, low, 19, "25GBASE_T", prefix);
dump_phy_type(hw, low, 20, "25GBASE_CR", prefix);
dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix);
dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix);
dump_phy_type(hw, low, 23, "25GBASE_SR", prefix);
dump_phy_type(hw, low, 24, "25GBASE_LR", prefix);
dump_phy_type(hw, low, 25, "25GBASE_KR", prefix);
dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix);
dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix);
dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix);
dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix);
dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix);
dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix);
dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix);
dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix);
dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix);
dump_phy_type(hw, low, 35, "40G_XLAUI", prefix);
dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix);
dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix);
dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix);
dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix);
dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix);
dump_phy_type(hw, low, 41, "50G_LAUI2", prefix);
dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix);
dump_phy_type(hw, low, 43, "50G_AUI2", prefix);
dump_phy_type(hw, low, 44, "50GBASE_CP", prefix);
dump_phy_type(hw, low, 45, "50GBASE_SR", prefix);
dump_phy_type(hw, low, 46, "50GBASE_FR", prefix);
dump_phy_type(hw, low, 47, "50GBASE_LR", prefix);
dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix);
dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix);
dump_phy_type(hw, low, 50, "50G_AUI1", prefix);
dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix);
dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix);
dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix);
dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix);
dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix);
dump_phy_type(hw, low, 56, "100G_CAUI4", prefix);
dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix);
dump_phy_type(hw, low, 58, "100G_AUI4", prefix);
dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix);
dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix);
dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix);
dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix);
dump_phy_type(hw, low, 63, "100GBASE_DR", prefix);
}
/**
* ice_dump_phy_type_high - helper function to dump phy_type_high
* @hw: pointer to the HW structure
* @high: 64 bit value for phy_type_high
* @prefix: prefix string to differentiate multiple dumps
*/
static void
ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix)
ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
{
u32 i;
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
(unsigned long long)low);
for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) {
if (low & BIT_ULL(i))
ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
prefix, i, ice_link_mode_str_low[i]);
}
ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
(unsigned long long)high);
dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix);
dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix);
dump_phy_type(hw, high, 2, "100G_CAUI2", prefix);
dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix);
dump_phy_type(hw, high, 4, "100G_AUI2", prefix);
for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) {
if (high & BIT_ULL(i))
ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
prefix, i, ice_link_mode_str_high[i]);
}
}
/**
@ -227,13 +219,23 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
switch (hw->subsystem_device_id) {
case ICE_SUBDEV_ID_E810T:
case ICE_SUBDEV_ID_E810T2:
case ICE_SUBDEV_ID_E810T3:
case ICE_SUBDEV_ID_E810T4:
case ICE_SUBDEV_ID_E810T5:
case ICE_SUBDEV_ID_E810T7:
return true;
}
break;
case ICE_DEV_ID_E810C_QSFP:
if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
switch (hw->subsystem_device_id) {
case ICE_SUBDEV_ID_E810T2:
case ICE_SUBDEV_ID_E810T5:
case ICE_SUBDEV_ID_E810T6:
return true;
}
break;
default:
break;
@ -242,6 +244,31 @@ bool ice_is_e810t(struct ice_hw *hw)
return false;
}
/**
* ice_is_e823
* @hw: pointer to the hardware structure
*
* returns true if the device is E823-L or E823-C based, false if not.
*/
bool ice_is_e823(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E823L_BACKPLANE:
case ICE_DEV_ID_E823L_SFP:
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823L_QSFP:
case ICE_DEV_ID_E823C_BACKPLANE:
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_10G_BASE_T:
case ICE_DEV_ID_E823C_SGMII:
return true;
default:
return false;
}
}
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
@ -308,10 +335,10 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
ice_memcpy(hw->port_info->mac.lan_addr,
resp[i].mac_addr, ETH_ALEN,
ICE_DMA_TO_NONDMA);
ICE_NONDMA_TO_NONDMA);
ice_memcpy(hw->port_info->mac.perm_addr,
resp[i].mac_addr,
ETH_ALEN, ICE_DMA_TO_NONDMA);
ETH_ALEN, ICE_NONDMA_TO_NONDMA);
break;
}
return ICE_SUCCESS;
@ -355,23 +382,30 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
cmd->param0 |= CPU_TO_LE16(report_mode);
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA)
switch (report_mode) {
case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
prefix = "phy_caps_media";
else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA)
break;
case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
prefix = "phy_caps_no_media";
else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG)
break;
case ICE_AQC_REPORT_ACTIVE_CFG:
prefix = "phy_caps_active";
else if (report_mode == ICE_AQC_REPORT_DFLT_CFG)
break;
case ICE_AQC_REPORT_DFLT_CFG:
prefix = "phy_caps_default";
else
break;
default:
prefix = "phy_caps_invalid";
}
ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix);
ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix);
ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
LE64_TO_CPU(pcaps->phy_type_high), prefix);
ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
prefix, report_mode);
@ -444,7 +478,7 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
*
* Find and return the node handle for a given node type and part number in the
* netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
* otherwise. If @node_handle provided, it would be set to found node handle.
* otherwise. If node_handle provided, it would be set to found node handle.
*/
enum ice_status
ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
@ -452,11 +486,12 @@ ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
{
struct ice_aqc_get_link_topo cmd;
u8 rec_node_part_number;
enum ice_status status;
u16 rec_node_handle;
u8 idx;
for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
enum ice_status status;
memset(&cmd, 0, sizeof(cmd));
cmd.addr.topo_params.node_type_ctx =
@ -545,7 +580,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_1000BASE_LX:
case ICE_PHY_TYPE_LOW_10GBASE_SR:
case ICE_PHY_TYPE_LOW_10GBASE_LR:
case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_SR:
case ICE_PHY_TYPE_LOW_25GBASE_LR:
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
@ -602,6 +636,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
case ICE_PHY_TYPE_LOW_2500BASE_X:
case ICE_PHY_TYPE_LOW_5GBASE_KR:
case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
case ICE_PHY_TYPE_LOW_25GBASE_KR:
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
@ -629,6 +664,8 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
return ICE_MEDIA_UNKNOWN;
}
#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1
/**
* ice_aq_get_link_info
* @pi: port information structure
@ -668,8 +705,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = CPU_TO_LE16(cmd_flags);
resp->lport_num = pi->lport;
status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
status = ice_aq_send_cmd(hw, &desc, &link_data,
ice_get_link_status_datalen(hw), cd);
if (status != ICE_SUCCESS)
return status;
@ -1255,7 +1292,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* that is occurring during a download package operation.
*/
for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
ICE_PF_RESET_WAIT_COUNT; cnt++) {
ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@ -2341,8 +2378,6 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
case ICE_AQC_CAPS_NVM_VER:
break;
case ICE_AQC_CAPS_NVM_MGMT:
caps->sec_rev_disabled =
(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
@ -2369,6 +2404,11 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->iwarp = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
break;
case ICE_AQC_CAPS_ROCEV2_LAG:
caps->roce_lag = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
prefix, caps->roce_lag);
break;
case ICE_AQC_CAPS_LED:
if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
caps->led[phys_id] = true;
@ -2425,7 +2465,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2:
case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3:
{
u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0;
u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
caps->ext_topo_dev_img_ver_high[index] = number;
caps->ext_topo_dev_img_ver_low[index] = logical_id;
@ -2458,6 +2498,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->ext_topo_dev_img_prog_en[index]);
break;
}
case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
caps->tx_sched_topo_comp_mode_en = (number == 1);
break;
case ICE_AQC_CAPS_DYN_FLATTENING:
caps->dyn_flattening_en = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
prefix, caps->dyn_flattening_en);
break;
default:
/* Not one of the recognized common capabilities */
found = false;
@ -2653,6 +2701,29 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->num_vsi_allocd_to_host);
}
/**
* ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
* @cap: capability element to parse
*
* Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
*/
static void
ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
struct ice_aqc_list_caps_elem *cap)
{
dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
dev_p->nac_topo.id);
}
/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
@ -2695,6 +2766,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
case ICE_AQC_CAPS_NAC_TOPOLOGY:
ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@ -2999,12 +3073,10 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool ice_is_100m_speed_supported(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E822C_10G_BASE_T:
case ICE_DEV_ID_E822C_SGMII:
case ICE_DEV_ID_E822L_10G_BASE_T:
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823C_SGMII:
return true;
default:
return false;
@ -3349,8 +3421,12 @@ enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
*/
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
{
if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
return ICE_FEC_AUTO;
if (caps & ICE_AQC_PHY_EN_AUTO_FEC) {
if (fec_options & ICE_AQC_PHY_FEC_DIS)
return ICE_FEC_DIS_AUTO;
else
return ICE_FEC_AUTO;
}
if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
@ -3641,6 +3717,12 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
/* Clear all FEC option bits. */
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
case ICE_FEC_DIS_AUTO:
/* Set No FEC and auto FEC */
if (!ice_fw_supports_fec_dis_auto(hw))
return ICE_ERR_NOT_SUPPORTED;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
/* fall-through */
case ICE_FEC_AUTO:
/* AND auto FEC bit, and all caps bits. */
cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
@ -3909,7 +3991,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm);
desc.datalen = data_size;
desc.datalen = CPU_TO_LE16(data_size);
ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
ICE_NONDMA_TO_NONDMA);
cmd->start_address = CPU_TO_LE32(start_address);
@ -5932,7 +6014,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
cmd = &desc.params.read_write_gpio;
cmd->gpio_ctrl_handle = gpio_ctrl_handle;
cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
@ -5960,7 +6042,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
cmd->gpio_ctrl_handle = gpio_ctrl_handle;
cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
@ -5971,6 +6053,58 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
return ICE_SUCCESS;
}
/**
* ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure
* @maj: major version
* @min: minor version
* @patch: patch version
*
* Checks if the firmware is minimum version
*/
static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
if (hw->api_maj_ver == maj) {
if (hw->api_min_ver > min)
return true;
if (hw->api_min_ver == min && hw->api_patch >= patch)
return true;
} else if (hw->api_maj_ver > maj) {
return true;
}
return false;
}
/**
* ice_is_fw_min_ver
* @hw: pointer to the hardware structure
* @branch: branch version
* @maj: major version
* @min: minor version
* @patch: patch version
*
* Checks if the firmware is minimum version
*/
static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
u8 patch)
{
if (hw->fw_branch == branch) {
if (hw->fw_maj_ver > maj)
return true;
if (hw->fw_maj_ver == maj) {
if (hw->fw_min_ver > min)
return true;
if (hw->fw_min_ver == min && hw->fw_patch >= patch)
return true;
}
} else if (hw->fw_branch > branch) {
return true;
}
return false;
}
/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
@ -5979,17 +6113,9 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
*/
bool ice_fw_supports_link_override(struct ice_hw *hw)
{
if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
return true;
if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
return true;
} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
return true;
}
return false;
return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
ICE_FW_API_LINK_OVERRIDE_MIN,
ICE_FW_API_LINK_OVERRIDE_PATCH);
}
/**
@ -6254,19 +6380,12 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
*/
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
{
if (hw->mac_type != ICE_MAC_E810)
if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
return false;
if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
return true;
if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
return true;
} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
return true;
}
return false;
return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
ICE_FW_API_LLDP_FLTR_MIN,
ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@ -6295,6 +6414,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_lldp_execute_pending_mib - execute LLDP pending MIB request
* @hw: pointer to HW struct
*/
enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
@ -6303,18 +6435,24 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
return true;
if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
return true;
} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
return true;
}
return false;
return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
ICE_FW_API_REPORT_DFLT_CFG_MIN,
ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
/**
* ice_fw_supports_fec_dis_auto
* @hw: pointer to the hardware structure
*
* Checks if the firmware supports FEC disable in Auto FEC mode
*/
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
{
return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH,
ICE_FW_FEC_DIS_AUTO_MAJ,
ICE_FW_FEC_DIS_AUTO_MIN,
ICE_FW_FEC_DIS_AUTO_PATCH);
}
/**
* ice_is_fw_auto_drop_supported
* @hw: pointer to the hardware structure
@ -6328,3 +6466,4 @@ bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
return true;
return false;
}

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -60,7 +60,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@ -197,6 +197,7 @@ enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw);
enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
@ -301,6 +302,7 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
bool ice_is_e810t(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
enum ice_status
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
@ -332,6 +334,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw);
enum ice_status
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -94,6 +94,17 @@ bool ice_enable_tx_lldp_filter = true;
*/
bool ice_enable_health_events = true;
/**
* @var ice_tx_balance_en
* @brief boolean permitting the 5-layer scheduler topology enablement
*
* Global sysctl variable indicating whether the driver will allow the
* 5-layer scheduler topology feature to be enabled. It's _not_
* specifically enabling the feature, just allowing it depending on what
* the DDP package allows.
*/
bool ice_tx_balance_en = true;
/**
* @var ice_rdma_max_msix
* @brief maximum number of MSI-X vectors to reserve for RDMA interface
@ -137,4 +148,8 @@ SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_lldp_filter, CTLFLAG_RDTUN,
&ice_enable_tx_lldp_filter, 0,
"Drop Ethertype 0x88cc LLDP frames originating from non-HW sources");
SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, tx_balance_en, CTLFLAG_RWTUN,
&ice_tx_balance_en, 0,
"Enable 5-layer scheduler topology");
#endif /* _ICE_COMMON_SYSCTLS_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -508,12 +508,18 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
return false;
} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
hw->api_maj_ver, hw->api_min_ver,
EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
} else {
/* Major API version is older than expected, log a warning */
ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
}
return true;
}
@ -665,10 +671,12 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
* @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
bool unloading)
{
struct ice_ctl_q_info *cq;
@ -678,7 +686,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
ice_aq_q_shutdown(hw, unloading);
break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
@ -694,18 +702,19 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
* @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
void ice_shutdown_all_ctrlq(struct ice_hw *hw)
void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
/**
@ -739,7 +748,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@ -809,7 +818,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
ice_shutdown_all_ctrlq(hw);
ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
ice_destroy_ctrlq_locks(&hw->mailboxq);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -102,6 +102,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
else
cmd->command |= ICE_AQ_LLDP_MIB_PENDING_ENABLE <<
ICE_AQ_LLDP_MIB_PENDING_S;
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@ -857,9 +860,9 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
enum ice_status status;
enum ice_adminq_opc opcode;
struct ice_aq_desc desc;
u16 opcode;
enum ice_status status;
cmd = &desc.params.lldp_agent_ctrl;
@ -1106,8 +1109,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*/
if (!err && sync && oper) {
dcbcfg->app[app_index].priority =
(app_prio & ice_aqc_cee_app_mask) >>
ice_aqc_cee_app_shift;
(u8)((app_prio & ice_aqc_cee_app_mask) >>
ice_aqc_cee_app_shift);
dcbcfg->app[app_index].selector = ice_app_sel_type;
dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
app_index++;
@ -1188,6 +1191,43 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
return ret;
}
/**
* ice_get_dcb_cfg_from_mib_change
* @pi: port information structure
* @event: pointer to the admin queue receive event
*
* Set DCB configuration from received MIB Change event
*/
void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_rq_event_info *event)
{
struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
struct ice_aqc_lldp_get_mib *mib;
u8 change_type, dcbx_mode;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
change_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
dcbx_mode = ((mib->type & ICE_AQ_LLDP_DCBX_M) >>
ICE_AQ_LLDP_DCBX_S);
switch (dcbx_mode) {
case ICE_AQ_LLDP_DCBX_IEEE:
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
break;
case ICE_AQ_LLDP_DCBX_CEE:
pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
event->msg_buf, pi);
break;
}
}
/**
* ice_init_dcb
* @hw: pointer to the HW struct
@ -1597,7 +1637,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = HTONL(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF;
buf[1] = dcbcfg->pfc.pfcena & 0xF;
buf[1] = dcbcfg->pfc.pfcena;
}
/**

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -249,6 +249,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_rq_event_info *event);
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
enum ice_status

2532
sys/dev/ice/ice_ddp_common.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,478 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_DDP_H_
#define _ICE_DDP_H_
#include "ice_osdep.h"
#include "ice_adminq_cmd.h"
#include "ice_controlq.h"
#include "ice_status.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_ddp_state {
/* Indicates that this call to ice_init_pkg
* successfully loaded the requested DDP package
*/
ICE_DDP_PKG_SUCCESS = 0,
/* Generic error for already loaded errors, it is mapped later to
* the more specific one (one of the next 3)
*/
ICE_DDP_PKG_ALREADY_LOADED = -1,
/* Indicates that a DDP package of the same version has already been
* loaded onto the device by a previous call or by another PF
*/
ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
/* The device has a DDP package that is not supported by the driver */
ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
/* The device has a compatible package
* (but different from the request) already loaded
*/
ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
/* The firmware loaded on the device is not compatible with
* the DDP package loaded
*/
ICE_DDP_PKG_FW_MISMATCH = -5,
/* The DDP package file is invalid */
ICE_DDP_PKG_INVALID_FILE = -6,
/* The version of the DDP package provided is higher than
* the driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
/* The version of the DDP package provided is lower than the
* driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
/* Missing security manifest in DDP pkg */
ICE_DDP_PKG_NO_SEC_MANIFEST = -9,
/* The RSA signature of the DDP package file provided is invalid */
ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10,
/* The DDP package file security revision is too low and not
* supported by firmware
*/
ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11,
/* Manifest hash mismatch */
ICE_DDP_PKG_MANIFEST_INVALID = -12,
/* Buffer hash mismatches manifest */
ICE_DDP_PKG_BUFFER_INVALID = -13,
/* Other errors */
ICE_DDP_PKG_ERR = -14,
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[STRUCT_HACK_VAR_LEN];
};
/* Package signing algorithm types */
#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_INVALID 0x00000000
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE_E810 0x00000010
#define SEGMENT_TYPE_SIGNING 0x00001001
#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[STRUCT_HACK_VAR_LEN];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
};
struct ice_run_time_cfg_seg {
struct ice_generic_seg_hdr hdr;
u8 rsvd[8];
struct ice_buf_table buf_table;
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
struct ice_sign_seg {
struct ice_generic_seg_hdr hdr;
__le32 seg_id;
__le32 sign_type;
__le32 signed_seg_idx;
__le32 signed_buf_start;
__le32 signed_buf_count;
#define ICE_SIGN_SEG_RESERVED_COUNT 44
u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
(ent_sz))
/* ice package section IDs */
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_XLT0_BUILDER 53
#define ICE_SID_RXPARSER_NODE_PTYPE 54
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_RXPARSER_XLT0 59
#define ICE_SID_TXPARSER_CAM 60
#define ICE_SID_TXPARSER_NOMATCH_CAM 61
#define ICE_SID_TXPARSER_IMEM 62
#define ICE_SID_TXPARSER_XLT0_BUILDER 63
#define ICE_SID_TXPARSER_NODE_PTYPE 64
#define ICE_SID_TXPARSER_MARKER_PTYPE 65
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_TXPARSER_PROTO_GRP 67
#define ICE_SID_TXPARSER_METADATA_INIT 68
#define ICE_SID_TXPARSER_XLT0 69
#define ICE_SID_RXPARSER_INIT_REDIR 70
#define ICE_SID_TXPARSER_INIT_REDIR 71
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_TXPARSER_MARKER_GRP 73
#define ICE_SID_RXPARSER_LAST_PROTO 74
#define ICE_SID_TXPARSER_LAST_PROTO 75
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_TXPARSER_PG_SPILL 77
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
#define ICE_SID_LBL_RESERVED_12 0x80000012
#define ICE_SID_LBL_RESERVED_13 0x80000013
#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
#define ICE_SID_LBL_PTYPE 0x80000016
#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
#define ICE_SID_LBL_FLAG 0x80000023
#define ICE_SID_LBL_REG 0x80000024
#define ICE_SID_LBL_SW_PTG 0x80000025
#define ICE_SID_LBL_ACL_PTG 0x80000026
#define ICE_SID_LBL_PE_PTG 0x80000027
#define ICE_SID_LBL_RSS_PTG 0x80000028
#define ICE_SID_LBL_FD_PTG 0x80000029
#define ICE_SID_LBL_SW_VSIG 0x8000002A
#define ICE_SID_LBL_ACL_VSIG 0x8000002B
#define ICE_SID_LBL_PE_VSIG 0x8000002C
#define ICE_SID_LBL_RSS_VSIG 0x8000002D
#define ICE_SID_LBL_FD_VSIG 0x8000002E
#define ICE_SID_LBL_PTYPE_META 0x8000002F
#define ICE_SID_LBL_SW_PROFID 0x80000030
#define ICE_SID_LBL_ACL_PROFID 0x80000031
#define ICE_SID_LBL_PE_PROFID 0x80000032
#define ICE_SID_LBL_RSS_PROFID 0x80000033
#define ICE_SID_LBL_FD_PROFID 0x80000034
#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
/* Label ICE runtime configuration section IDs */
#define ICE_SID_TX_5_LAYER_TOPO 0x10
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
struct ice_hw;
enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
void *
ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
enum ice_status
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
enum ice_status
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
enum ice_status
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
enum ice_status
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
enum ice_status
ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
void ice_release_global_cfg_lock(struct ice_hw *hw);
struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
struct ice_pkg_hdr *pkg_hdr);
enum ice_ddp_state
ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
enum ice_ddp_state
ice_get_pkg_info(struct ice_hw *hw);
void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
enum ice_status
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access);
struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
struct ice_buf_hdr *
ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
bool
ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state);
void *
ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type, u32 *offset,
void *(*handler)(u32 sect_type, void *section,
u32 index, u32 *offset));
void *
ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
bool ice_is_init_pkg_successful(enum ice_ddp_state state);
void ice_free_seg(struct ice_hw *hw);
struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
void **section);
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
#endif /* _ICE_DDP_H_ */

71
sys/dev/ice/ice_defs.h Normal file
View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_DEFS_H_
#define _ICE_DEFS_H_
#define ETH_ALEN 6
#define ETH_HEADER_LEN 14
#define BIT(a) (1UL << (a))
#ifndef BIT_ULL
#define BIT_ULL(a) (1ULL << (a))
#endif /* BIT_ULL */
#define BITS_PER_BYTE 8
#define _FORCE_
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
#define ICE_MAX_TRAFFIC_CLASS 8
#ifndef MIN_T
#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
#endif
#define IS_ASCII(_ch) ((_ch) < 0x80)
#define STRUCT_HACK_VAR_LEN
/**
* ice_struct_size - size of struct with C99 flexible array member
* @ptr: pointer to structure
* @field: flexible array member (last member of the structure)
* @num: number of elements of that flexible array member
*/
#define ice_struct_size(ptr, field, num) \
(sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
#endif /* _ICE_DEFS_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -34,6 +34,7 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
@ -52,6 +53,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
#define ICE_SUBDEV_ID_E810T3 0x02E9
#define ICE_SUBDEV_ID_E810T4 0x02EA
#define ICE_SUBDEV_ID_E810T5 0x0010
#define ICE_SUBDEV_ID_E810T6 0x0012
#define ICE_SUBDEV_ID_E810T7 0x0011
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
@ -86,5 +92,4 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* _ICE_DEVIDS_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "1.34.2-k";
const char ice_driver_version[] = "1.37.7-k";
const uint8_t ice_major_version = 1;
const uint8_t ice_minor_version = 34;
const uint8_t ice_patch_version = 2;
const uint8_t ice_minor_version = 37;
const uint8_t ice_patch_version = 7;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 1.34.2-k")
PVID(vendor, devid, name " - 1.37.7-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.34.2-k")
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.7-k")
/**
* @var ice_vendor_info_array
@ -130,9 +130,6 @@ static pci_vendor_info_t ice_vendor_info_array[] = {
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0007, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0008, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x000C, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"),

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -68,6 +68,8 @@ enum feat_list {
ICE_FEATURE_HEALTH_STATUS,
ICE_FEATURE_FW_LOGGING,
ICE_FEATURE_HAS_PBA,
ICE_FEATURE_DCB,
ICE_FEATURE_TX_BALANCE,
/* Must be last entry */
ICE_FEATURE_COUNT
};

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,20 +35,6 @@
#include "ice_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_status
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
enum ice_status
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
@ -61,12 +47,6 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
void
ice_init_prof_result_bm(struct ice_hw *hw);
enum ice_status
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
enum ice_status
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
enum ice_status
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
@ -89,8 +69,8 @@ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
enum ice_status
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
ice_bitmap_t *ptypes, struct ice_fv_word *es);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
enum ice_status
@ -103,11 +83,7 @@ enum ice_status
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
enum ice_status
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
@ -119,10 +95,14 @@ ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
struct ice_buf_build *
ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
void **section);
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
void ice_fill_blk_tbls(struct ice_hw *hw);
/* To support tunneling entries by PF, the package will append the PF number to
* the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
*/
#define ICE_TNL_PRE "TNL_"
void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val);
#endif /* _ICE_FLEX_PIPE_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -42,6 +42,7 @@ struct ice_fv_word {
u16 off; /* Offset within the protocol header */
u8 resvrd;
};
#pragma pack()
#define ICE_MAX_NUM_PROFILES 256
@ -51,251 +52,6 @@ struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[STRUCT_HACK_VAR_LEN];
};
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE_E810 0x00000010
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[STRUCT_HACK_VAR_LEN];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
(ent_sz))
/* ice package section IDs */
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_CAM 50
#define ICE_SID_RXPARSER_NOMATCH_CAM 51
#define ICE_SID_RXPARSER_IMEM 52
#define ICE_SID_RXPARSER_XLT0_BUILDER 53
#define ICE_SID_RXPARSER_NODE_PTYPE 54
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_PROTO_GRP 57
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_RXPARSER_XLT0 59
#define ICE_SID_TXPARSER_CAM 60
#define ICE_SID_TXPARSER_NOMATCH_CAM 61
#define ICE_SID_TXPARSER_IMEM 62
#define ICE_SID_TXPARSER_XLT0_BUILDER 63
#define ICE_SID_TXPARSER_NODE_PTYPE 64
#define ICE_SID_TXPARSER_MARKER_PTYPE 65
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_TXPARSER_PROTO_GRP 67
#define ICE_SID_TXPARSER_METADATA_INIT 68
#define ICE_SID_TXPARSER_XLT0 69
#define ICE_SID_RXPARSER_INIT_REDIR 70
#define ICE_SID_TXPARSER_INIT_REDIR 71
#define ICE_SID_RXPARSER_MARKER_GRP 72
#define ICE_SID_TXPARSER_MARKER_GRP 73
#define ICE_SID_RXPARSER_LAST_PROTO 74
#define ICE_SID_TXPARSER_LAST_PROTO 75
#define ICE_SID_RXPARSER_PG_SPILL 76
#define ICE_SID_TXPARSER_PG_SPILL 77
#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
#define ICE_SID_RXPARSER_FLAG_REDIR 97
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
#define ICE_SID_LBL_RESERVED_12 0x80000012
#define ICE_SID_LBL_RESERVED_13 0x80000013
#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
#define ICE_SID_LBL_PTYPE 0x80000016
#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
#define ICE_SID_LBL_FLAG 0x80000023
#define ICE_SID_LBL_REG 0x80000024
#define ICE_SID_LBL_SW_PTG 0x80000025
#define ICE_SID_LBL_ACL_PTG 0x80000026
#define ICE_SID_LBL_PE_PTG 0x80000027
#define ICE_SID_LBL_RSS_PTG 0x80000028
#define ICE_SID_LBL_FD_PTG 0x80000029
#define ICE_SID_LBL_SW_VSIG 0x8000002A
#define ICE_SID_LBL_ACL_VSIG 0x8000002B
#define ICE_SID_LBL_PE_VSIG 0x8000002C
#define ICE_SID_LBL_RSS_VSIG 0x8000002D
#define ICE_SID_LBL_FD_VSIG 0x8000002E
#define ICE_SID_LBL_PTYPE_META 0x8000002F
#define ICE_SID_LBL_SW_PROFID 0x80000030
#define ICE_SID_LBL_ACL_PROFID 0x80000031
#define ICE_SID_LBL_PE_PROFID 0x80000032
#define ICE_SID_LBL_RSS_PROFID 0x80000033
#define ICE_SID_LBL_FD_PROFID 0x80000034
#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
/* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1
#define ICE_PTYPE_IPV4FRAG_PAY 22
@ -401,10 +157,18 @@ struct ice_sw_fv_list_entry {
* fields of the packet are now little endian.
*/
struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15
#define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
union {
struct {
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
} /* udp_tunnel */;
struct {
__le16 hv_vlan_id_key;
__le16 hv_etype_key;
} vlan;
};
u8 tcam_search_key;
};
#pragma pack()
@ -457,33 +221,15 @@ struct ice_prof_redir_section {
u8 redir_value[STRUCT_HACK_VAR_LEN];
};
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
/* Tunnel enabling */
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GRETAP,
TNL_GTP,
TNL_GTPC,
TNL_GTPU,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
@ -726,10 +472,13 @@ struct ice_chs_chg {
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
enum ice_prof_type {
ICE_PROF_INVALID = 0x0,
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_ALL = 0x6,
ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_GTPC = 0x10,
ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_ALL = 0xFF,
};

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -400,6 +400,7 @@ struct ice_flow_prof_params {
* This will give us the direction flags.
*/
struct ice_fv_word es[ICE_MAX_FV_WORDS];
ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
};
@ -566,8 +567,8 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld)
{
enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
u8 fv_words = (u8)hw->blk[params->blk].es.fvw;
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
u16 off;
@ -593,7 +594,6 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_IPV4_TTL:
case ICE_FLOW_FIELD_IDX_IPV4_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
@ -606,7 +606,6 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_IPV6_TTL:
case ICE_FLOW_FIELD_IDX_IPV6_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
@ -666,7 +665,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
flds[fld].xtrct.prot_id = prot_id;
flds[fld].xtrct.prot_id = (u8)prot_id;
flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
ICE_FLOW_FV_EXTRACT_SZ;
flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
@ -702,7 +701,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
else
idx = params->es_cnt;
params->es[idx].prot_id = prot_id;
params->es[idx].prot_id = (u8)prot_id;
params->es[idx].off = off;
params->es_cnt++;
}
@ -952,8 +951,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->es);
status = ice_add_prof(hw, blk, prof_id, params->ptypes, params->es);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
@ -1286,13 +1284,13 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
{
struct ice_flow_seg_info *seg;
u64 val;
u8 i;
u16 i;
/* set inner most segment */
seg = &segs[seg_cnt - 1];
ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
ICE_FLOW_FIELD_IDX_MAX)
(u16)ICE_FLOW_FIELD_IDX_MAX)
ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -72,7 +72,7 @@ struct ice_fwlog_cfg {
/* options used to configure firmware logging */
u16 options;
/* minimum number of log events sent per Admin Receive Queue event */
u8 log_resolution;
u16 log_resolution;
};
void ice_fwlog_set_support_ena(struct ice_hw *hw);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -285,10 +285,16 @@ struct ice_softc {
/* Ethertype filters enabled */
bool enable_tx_fc_filter;
bool enable_tx_lldp_filter;
/* Other tunable flags */
bool enable_health_events;
/* 5-layer scheduler topology enabled */
bool tx_balance_en;
/* Allow additional non-standard FEC mode */
bool allow_no_fec_mod_in_auto;
int rebuild_ticks;
/* driver state flags, only access using atomic functions */
@ -297,6 +303,8 @@ struct ice_softc {
/* NVM link override settings */
struct ice_link_default_override_tlv ldo_tlv;
u16 fw_debug_dump_cluster_mask;
struct sx *iflib_ctx_lock;
/* Tri-state feature flags (capable/enabled) */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -55,7 +55,7 @@ static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m);
static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi);
/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
* advanced 32byte Rx descriptors.
@ -79,7 +79,7 @@ struct if_txrx ice_txrx = {
.ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
.ift_rxd_refill = ice_ift_rxd_refill,
.ift_rxd_flush = ice_ift_rxd_flush,
.ift_txq_select = ice_ift_queue_select,
.ift_txq_select_v2 = ice_ift_queue_select,
};
/**
@ -284,7 +284,6 @@ static int
ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
struct ice_softc *sc = (struct ice_softc *)arg;
if_softc_ctx_t scctx = sc->scctx;
struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
union ice_32b_rx_flex_desc *cur;
u16 status0, plen, ptype;
@ -342,7 +341,7 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Get packet type and set checksum flags */
ptype = le16toh(cur->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
if ((iflib_get_ifp(sc->ctx)->if_capenable & IFCAP_RXCSUM) != 0)
ice_rx_checksum(rxq, &ri->iri_csum_flags,
&ri->iri_csum_data, status0, ptype);
@ -408,9 +407,10 @@ ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
}
static qidx_t
ice_ift_queue_select(void *arg, struct mbuf *m)
ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi)
{
struct ice_softc *sc = (struct ice_softc *)arg;
struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_vsi *vsi = &sc->pf_vsi;
u16 tc_base_queue, tc_qcount;
u8 up, tc;
@ -431,12 +431,21 @@ ice_ift_queue_select(void *arg, struct mbuf *m)
return (0);
}
/* Use default TC unless overridden */
/* Use default TC unless overridden later */
tc = 0; /* XXX: Get default TC for traffic if >1 TC? */
if (m->m_flags & M_VLANTAG) {
local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg;
#if defined(INET) || defined(INET6)
if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) &&
(pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) {
u8 dscp_val = pi->ipi_ip_tos >> 2;
tc = local_dcbx_cfg->dscp_map[dscp_val];
} else
#endif /* defined(INET) || defined(INET6) */
if (m->m_flags & M_VLANTAG) { /* ICE_QOS_MODE_VLAN */
up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag);
tc = sc->hw.port_info->qos_cfg.local_dcbx_cfg.etscfg.prio_table[up];
tc = local_dcbx_cfg->etscfg.prio_table[up];
}
tc_base_queue = vsi->tc_info[tc].qoffset;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -948,10 +948,10 @@ struct ice_tx_ctx_desc {
__le64 qw1;
};
#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */
#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */
#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */
#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */
#define ICE_TX_GCS_DESC_START 0 /* 7 BITS */
#define ICE_TX_GCS_DESC_OFFSET 7 /* 4 BITS */
#define ICE_TX_GCS_DESC_TYPE 11 /* 2 BITS */
#define ICE_TX_GCS_DESC_ENA 13 /* 1 BIT */
#define ICE_TXD_CTX_QW1_DTYPE_S 0
#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -119,6 +119,9 @@ extern bool ice_enable_tx_lldp_filter;
/* global sysctl indicating whether FW health status events should be enabled */
extern bool ice_enable_health_events;
/* global sysctl indicating whether to enable 5-layer scheduler topology */
extern bool ice_tx_balance_en;
/**
* @struct ice_bar_info
* @brief PCI BAR mapping information
@ -203,6 +206,16 @@ struct ice_bar_info {
#define ICE_NVM_ACCESS \
(((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
/**
* ICE_DEBUG_DUMP
* @brief Private ioctl command number for retrieving debug dump data
*
* The ioctl command number used by a userspace tool for accessing the driver for
* getting debug dump data from the firmware.
*/
#define ICE_DEBUG_DUMP \
(((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 6)
#define ICE_AQ_LEN 1023
#define ICE_MBXQ_LEN 512
#define ICE_SBQ_LEN 512
@ -329,6 +342,7 @@ enum ice_rx_dtype {
#define ICE_FEC_STRING_RS "RS-FEC"
#define ICE_FEC_STRING_BASER "FC-FEC/BASE-R"
#define ICE_FEC_STRING_NONE "None"
#define ICE_FEC_STRING_DIS_AUTO "Auto (w/ No-FEC)"
/* Strings used for displaying Flow Control mode
*
@ -364,6 +378,12 @@ enum ice_rx_dtype {
ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_MCAST_RX)
/*
* Only certain cluster IDs are valid for the FW debug dump functionality,
* so define a mask of those here.
*/
#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x1af
struct ice_softc;
/**
@ -548,6 +568,20 @@ struct ice_vsi {
struct ice_vsi_hw_stats hw_stats;
};
/**
* @struct ice_debug_dump_cmd
* @brief arguments/return value for debug dump ioctl
*/
struct ice_debug_dump_cmd {
u32 offset; /* offset to read/write from table, in bytes */
u16 cluster_id;
u16 table_id;
u16 data_size; /* size of data field, in bytes */
u16 reserved1;
u32 reserved2;
u8 data[];
};
/**
* @enum ice_state
* @brief Driver state flags
@ -574,6 +608,7 @@ enum ice_state {
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
ICE_STATE_MULTIPLE_TCS,
ICE_STATE_DO_FW_DEBUG_DUMP,
/* This entry must be last */
ICE_STATE_LAST,
};
@ -832,8 +867,8 @@ void ice_add_txq_sysctls(struct ice_tx_queue *txq);
void ice_add_rxq_sysctls(struct ice_rx_queue *rxq);
int ice_config_rss(struct ice_vsi *vsi);
void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc);
void ice_load_pkg_file(struct ice_softc *sc);
void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status);
enum ice_status ice_load_pkg_file(struct ice_softc *sc);
void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status);
uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter);
void ice_save_pci_info(struct ice_hw *hw, device_t dev);
int ice_replay_all_vsi_cfg(struct ice_softc *sc);
@ -865,5 +900,7 @@ void ice_free_intr_tracking(struct ice_softc *sc);
void ice_set_default_local_lldp_mib(struct ice_softc *sc);
void ice_init_health_events(struct ice_softc *sc);
void ice_cfg_pba_num(struct ice_softc *sc);
int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg);
#endif /* _ICE_LIB_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -406,7 +406,7 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
/* Report the number of words successfully read */
*words = bytes / 2;
*words = (u16)(bytes / 2);
/* Byte swap the words up to the amount we actually read */
for (i = 0; i < *words; i++)
@ -983,7 +983,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
struct ice_orom_civd_info tmp;
enum ice_status status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
@ -992,6 +991,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* equal 0.
*/
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
enum ice_status status;
u8 sum = 0, i;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
@ -1726,22 +1726,41 @@ enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw)
/**
* ice_nvm_write_activate
* @hw: pointer to the HW struct
* @cmd_flags: NVM activate admin command bits (banks to be validated)
* @cmd_flags: flags for write activate command
* @response_flags: response indicators from firmware
*
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
*
* cmd_flags controls which banks to activate, the preservation level to use
* when activating the NVM bank, and whether an EMP reset is required for
* activation.
*
* Note that the 16bit cmd_flags value is split between two separate 1 byte
* flag values in the descriptor.
*
* On successful return of the firmware command, the response_flags variable
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/
enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
enum ice_status
ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
enum ice_status status;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = cmd_flags;
cmd->cmd_flags = ICE_LO_BYTE(cmd_flags);
cmd->offset_high = ICE_HI_BYTE(cmd_flags);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status && response_flags)
*response_flags = cmd->cmd_flags;
return status;
}
/**
@ -1847,12 +1866,12 @@ ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
/* Update flash data */
status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data,
true, ICE_AQC_NVM_SPECIAL_UPDATE, NULL);
false, ICE_AQC_NVM_SPECIAL_UPDATE, NULL);
if (status)
goto exit_release_res;
/* Dump the Shadow RAM to the flash */
status = ice_nvm_write_activate(hw, 0);
status = ice_nvm_write_activate(hw, 0, NULL);
exit_release_res:
ice_release_nvm(hw);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -155,5 +155,6 @@ enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw);
enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
enum ice_status
ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
#endif /* _ICE_NVM_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -59,6 +59,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
ICE_ETYPE_IL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
@ -73,6 +74,8 @@ enum ice_protocol_type {
ICE_VXLAN_GPE,
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PPPOE,
ICE_PROTOCOL_LAST
};
@ -104,6 +107,8 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_GTP_IPV4_UDP,
ICE_SW_TUN_GTP_IPV6_TCP,
ICE_SW_TUN_GTP_IPV6_UDP,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
ICE_SW_TUN_IPV4_GTPU_IPV4,
ICE_SW_TUN_IPV4_GTPU_IPV6,
ICE_SW_TUN_IPV6_GTPU_IPV4,
@ -141,6 +146,7 @@ enum ice_prot_id {
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
ICE_PROT_IPV6_IL_IL = 42,
ICE_PROT_IPV6_NEXT_PROTO = 43,
ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
@ -165,9 +171,11 @@ enum ice_prot_id {
#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
#define ICE_NAN_OFFSET 511
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
@ -184,12 +192,15 @@ enum ice_prot_id {
*/
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_GRE_OF_HW 64 /* NVGRE */
#define ICE_PPPOE_HW 103
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MDID 20
#define ICE_TUN_FLAG_MDID_OFF(word) \
(ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_DIR_FLAG_MASK 0x10
#define ICE_TUN_FLAG_VLAN_MASK 0x01
#define ICE_TUN_FLAG_FV_IND 2
@ -287,6 +298,13 @@ struct ice_udp_gtp_hdr {
u8 qfi;
u8 rsvrd;
};
struct ice_pppoe_hdr {
u8 rsrvd_ver_type;
u8 rsrvd_code;
__be16 session_id;
__be16 length;
__be16 ppp_prot_id; /* control and data only */
};
struct ice_nvgre {
__be16 flags;
@ -305,6 +323,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
};
/* This is mapping table entry that maps every word within a given protocol

View File

@ -241,9 +241,7 @@ ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_
switch(res->res_type) {
case ICE_RDMA_QSET_ALLOC:
dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg;
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
ena_tc |= BIT(dcbx_cfg->etscfg.prio_table[i]);
}
ena_tc = ice_dcb_get_tc_map(dcbx_cfg);
ice_debug(hw, ICE_DBG_RDMA, "%s:%d ena_tc=%x\n", __func__, __LINE__, ena_tc);
status = ice_cfg_vsi_rdma(hw->port_info, vsi->idx, ena_tc,
@ -401,6 +399,10 @@ ice_rdma_cp_qos_info(struct ice_hw *hw, struct ice_dcbx_cfg *dcbx_cfg,
qos_info->apps[j].prot_id = dcbx_cfg->app[j].prot_id;
qos_info->apps[j].selector = dcbx_cfg->app[j].selector;
}
/* Gather DSCP-to-TC mapping and QoS/PFC mode */
memcpy(qos_info->dscp_map, dcbx_cfg->dscp_map, sizeof(qos_info->dscp_map));
qos_info->pfc_mode = dcbx_cfg->pfc_mode;
}
/**
@ -481,6 +483,7 @@ int
ice_rdma_register(struct ice_rdma_info *info)
{
struct ice_rdma_entry *entry;
struct ice_softc *sc;
int err = 0;
sx_xlock(&ice_rdma.mtx);
@ -513,6 +516,12 @@ ice_rdma_register(struct ice_rdma_info *info)
*/
LIST_FOREACH(entry, &ice_rdma.peers, node) {
kobj_init((kobj_t)&entry->peer, ice_rdma.peer_class);
/* Gather DCB/QOS info into peer */
sc = __containerof(entry, struct ice_softc, rdma_entry);
memset(&entry->peer.initial_qos_info, 0, sizeof(entry->peer.initial_qos_info));
ice_rdma_cp_qos_info(&sc->hw, &sc->hw.port_info->qos_cfg.local_dcbx_cfg,
&entry->peer.initial_qos_info);
IRDMA_PROBE(&entry->peer);
if (entry->initiated)
IRDMA_OPEN(&entry->peer);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -468,7 +468,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
static enum ice_status
enum ice_status
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@ -909,6 +909,33 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
hw->max_cgds = 0;
}
/**
* ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes
* @hw: pointer to the HW struct
* @num_nodes: the number of nodes whose attributes to configure
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL
*
* Configure Node Attributes (0x0417)
*/
enum ice_status
ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_node_attr_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_node_attr *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.node_attr;
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_cfg_node_attr);
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
cmd->num_entries = CPU_TO_LE16(num_nodes);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
/**
* ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
* @hw: pointer to the HW struct
@ -1173,12 +1200,11 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
if (layer > hw->sw_entry_point_layer)
return layer;
}
if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
/* qgroup and VSI layers are same */
return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@ -1195,12 +1221,8 @@ static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
if (layer > hw->sw_entry_point_layer)
return layer;
}
if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@ -1417,9 +1439,10 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
if (status)
goto sched_query_out;
hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels);
hw->num_tx_sched_layers =
(u8)LE16_TO_CPU(buf->sched_props.logical_levels);
hw->num_tx_sched_phys_layers =
LE16_TO_CPU(buf->sched_props.phys_levels);
(u8)LE16_TO_CPU(buf->sched_props.phys_levels);
hw->flattened_layers = buf->sched_props.flattening_bitmap;
hw->max_cgds = buf->sched_props.max_pf_cgds;
@ -1585,10 +1608,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u8 qgrp_layer, vsi_layer;
u16 max_children;
u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@ -1599,6 +1623,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
/* If the queue group and vsi layer are same then queues
* are all attached directly to VSI
*/
if (qgrp_layer == vsi_layer)
return vsi_node;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
@ -1748,7 +1778,6 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
{
struct ice_sched_node *parent, *node;
struct ice_hw *hw = pi->hw;
enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
@ -1757,6 +1786,8 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
enum ice_status status;
if (!parent)
return ICE_ERR_CFG;
@ -1850,7 +1881,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
@ -1860,6 +1890,8 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
enum ice_status status;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
@ -3928,7 +3960,7 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
u16 wakeup = 0;
/* Get the wakeup integer value */
bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec);
if (wakeup_int > 63) {
wakeup = (u16)((1 << 15) | wakeup_int);
@ -3937,7 +3969,7 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
* Convert Integer value to a constant multiplier
*/
wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
wakeup_a = DIV_S64(ICE_RL_PROF_MULTIPLIER *
wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER *
hw->psm_clk_freq, bytes_per_sec);
/* Get Fraction value */
@ -3980,13 +4012,13 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
return status;
/* Bytes per second from Kbps */
bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE);
bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE);
/* encode is 6 bits but really useful are 5 bits */
for (i = 0; i < 64; i++) {
u64 pow_result = BIT_ULL(i);
ts_rate = DIV_S64(hw->psm_clk_freq,
ts_rate = DIV_S64((s64)hw->psm_clk_freq,
pow_result * ICE_RL_PROF_TS_MULTIPLIER);
if (ts_rate <= 0)
continue;
@ -4045,7 +4077,7 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
enum ice_status status;
u8 profile_type;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
if (!hw || layer_num >= hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@ -4061,8 +4093,6 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
return NULL;
}
if (!hw)
return NULL;
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
@ -4264,7 +4294,7 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
enum ice_status status = ICE_SUCCESS;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
if (!hw || layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
/* Check the existing list for RL profile */
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
@ -4844,7 +4874,6 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc)
{
struct ice_sched_node *node = NULL;
struct ice_sched_node *child_node;
switch (agg_type) {
case ICE_AGG_TYPE_VSI: {
@ -4872,16 +4901,19 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
case ICE_AGG_TYPE_Q:
/* The current implementation allows single queue to modify */
node = ice_sched_get_node(pi, id);
node = ice_sched_find_node_by_teid(pi->root, id);
break;
case ICE_AGG_TYPE_QG:
case ICE_AGG_TYPE_QG: {
struct ice_sched_node *child_node;
/* The current implementation allows single qg to modify */
child_node = ice_sched_get_node(pi, id);
child_node = ice_sched_find_node_by_teid(pi->root, id);
if (!child_node)
break;
node = child_node->parent;
break;
}
default:
break;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -35,6 +35,9 @@
#include "ice_common.h"
#define ICE_SCHED_5_LAYERS 5
#define ICE_SCHED_9_LAYERS 9
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_AGG_LAYER_OFFSET 6
@ -106,10 +109,18 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_node_attr_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd);
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -624,6 +624,8 @@ ice_fec_str(enum ice_fec_mode mode)
return ICE_FEC_STRING_BASER;
case ICE_FEC_NONE:
return ICE_FEC_STRING_NONE;
case ICE_FEC_DIS_AUTO:
return ICE_FEC_STRING_DIS_AUTO;
}
/* The compiler generates errors on unhandled enum values if we omit
@ -762,6 +764,8 @@ ice_fwd_act_str(enum ice_sw_fwd_act_type action)
return "FWD_TO_QGRP";
case ICE_DROP_PACKET:
return "DROP_PACKET";
case ICE_LG_ACTION:
return "LG_ACTION";
case ICE_INVAL_ACT:
return "INVAL_ACT";
}
@ -1037,6 +1041,8 @@ ice_state_to_str(enum ice_state state)
return "LLDP_RX_FLTR_FROM_DRIVER";
case ICE_STATE_MULTIPLE_TCS:
return "MULTIPLE_TCS";
case ICE_STATE_DO_FW_DEBUG_DUMP:
return "DO_FW_DEBUG_DUMP";
case ICE_STATE_LAST:
return NULL;
}

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,6 +30,7 @@
*/
/*$FreeBSD$*/
#include "ice_common.h"
#include "ice_switch.h"
#include "ice_flex_type.h"
#include "ice_flow.h"
@ -39,6 +40,7 @@
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
#define ICE_IPV6_ETHER_ID 0x86DD
#define ICE_PPP_IPV6_PROTO_ID 0x0057
#define ICE_ETH_P_8021Q 0x8100
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
@ -60,6 +62,9 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
static bool
ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle);
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@ -819,6 +824,8 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
else /* remove VSI from mirror rule */
mr_list[i] = CPU_TO_LE16(id);
}
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
}
cmd = &desc.params.add_update_rule;
@ -902,6 +909,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
lkup_type == ICE_SW_LKUP_DFLT ||
lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
@ -1002,7 +1010,7 @@ ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
static enum ice_status
enum ice_status
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
@ -1047,8 +1055,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
@ -1517,7 +1523,7 @@ ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_aqc_opc_update_sw_rules, NULL);
if (!status) {
m_ent->lg_act_idx = l_id;
m_ent->counter_index = counter_id;
m_ent->counter_index = (u8)counter_id;
}
ice_free(hw, lg_act);
@ -1588,6 +1594,7 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
lkup_type == ICE_SW_LKUP_DFLT ||
lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
@ -1748,11 +1755,12 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*/
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = ICE_SUCCESS;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
struct ice_switch_info *sw;
sw = hw->switch_info;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
@ -1811,7 +1819,6 @@ ice_add_update_vsi_list(struct ice_hw *hw,
{
enum ice_status status = ICE_SUCCESS;
u16 vsi_list_id = 0;
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
return ICE_ERR_NOT_IMPL;
@ -1936,7 +1943,7 @@ ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
* handle element. This can be extended further to search VSI list with more
* than 1 vsi_count. Returns pointer to VSI list entry if found.
*/
static struct ice_vsi_list_map_info *
struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
u16 *vsi_list_id)
{
@ -2352,7 +2359,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI)
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
@ -2780,6 +2788,83 @@ ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
}
/**
* ice_get_lg_act_aqc_res_type - get resource type for a large action
* @res_type: resource type to be filled in case of function success
* @num_acts: number of actions to hold with a large action entry
*
* Get resource type for a large action depending on the number
* of single actions that it contains.
*/
static enum ice_status
ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts)
{
if (!res_type)
return ICE_ERR_BAD_PTR;
/* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
* If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
* If num_acts is greater than 2, then use
* ICE_AQC_RES_TYPE_WIDE_TABLE_4.
* The num_acts cannot be equal to 0 or greater than 4.
*/
switch (num_acts) {
case 1:
*res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_1;
break;
case 2:
*res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_2;
break;
case 3:
case 4:
*res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_4;
break;
default:
return ICE_ERR_PARAM;
}
return ICE_SUCCESS;
}
/**
* ice_alloc_res_lg_act - add large action resource
* @hw: pointer to the hardware structure
* @l_id: large action ID to fill it in
* @num_acts: number of actions to hold with a large action entry
*/
static enum ice_status
ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
enum ice_status status;
u16 buf_len, res_type;
if (!l_id)
return ICE_ERR_BAD_PTR;
status = ice_get_lg_act_aqc_res_type(&res_type, num_acts);
if (status)
return status;
/* Allocate resource for large action */
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
sw_buf->res_type = CPU_TO_LE16(res_type);
sw_buf->num_elems = CPU_TO_LE16(1);
status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
ice_aqc_opc_alloc_res, NULL);
if (!status)
*l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
ice_free(hw, sw_buf);
return status;
}
/**
* ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
@ -2832,79 +2917,83 @@ enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction)
{
struct ice_aqc_sw_rules_elem *s_rule;
struct ice_fltr_list_entry f_list_entry;
struct ice_sw_recipe *recp_list;
struct ice_fltr_info f_info;
struct ice_hw *hw = pi->hw;
enum ice_adminq_opc opcode;
enum ice_status status;
u16 s_rule_size;
u8 lport = pi->lport;
u16 hw_vsi_id;
recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
f_info.lkup_type = ICE_SW_LKUP_DFLT;
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = pi->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
if (!set)
f_info.fltr_rule_id =
pi->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
if (!set)
f_info.fltr_rule_id =
pi->dflt_tx_vsi_rule_id;
}
f_list_entry.fltr_info = f_info;
if (set)
opcode = ice_aqc_opc_add_sw_rules;
status = ice_add_rule_internal(hw, recp_list, lport,
&f_list_entry);
else
opcode = ice_aqc_opc_remove_sw_rules;
status = ice_remove_rule_internal(hw, recp_list,
&f_list_entry);
ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
return status;
}
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
if (status || !(f_info.flag & ICE_FLTR_TX_RX))
goto out;
if (set) {
u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
/**
* ice_check_if_dflt_vsi - check if VSI is default VSI
* @pi: pointer to the port_info structure
* @vsi_handle: vsi handle to check for in filter list
* @rule_exists: indicates if there are any VSI's in the rule list
*
* checks if the VSI is in a default VSI list, and also indicates
* if the default VSI list is empty
*/
bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
bool *rule_exists)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
struct LIST_HEAD_TYPE *rule_head;
struct ice_sw_recipe *recp_list;
struct ice_lock *rule_lock;
bool ret = false;
recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
rule_lock = &recp_list->filt_rule_lock;
rule_head = &recp_list->filt_rules;
if (f_info.flag & ICE_FLTR_TX) {
pi->dflt_tx_vsi_num = hw_vsi_id;
pi->dflt_tx_vsi_rule_id = index;
} else if (f_info.flag & ICE_FLTR_RX) {
pi->dflt_rx_vsi_num = hw_vsi_id;
pi->dflt_rx_vsi_rule_id = index;
}
} else {
if (f_info.flag & ICE_FLTR_TX) {
pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
} else if (f_info.flag & ICE_FLTR_RX) {
pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
ice_acquire_lock(rule_lock);
if (rule_exists && !LIST_EMPTY(rule_head))
*rule_exists = true;
LIST_FOR_EACH_ENTRY(fm_entry, rule_head,
ice_fltr_mgmt_list_entry, list_entry) {
if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
ret = true;
break;
}
}
out:
ice_free(hw, s_rule);
return status;
ice_release_lock(rule_lock);
return ret;
}
/**
@ -3546,6 +3635,13 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
list_entry) {
/* Avoid enabling or disabling vlan zero twice when in double
* vlan mode
*/
if (ice_is_dvm_ena(hw) &&
list_itr->fltr_info.l_data.vlan.tpid == 0)
continue;
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = _ice_clear_vsi_promisc(hw, vsi_handle,
@ -3555,7 +3651,7 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
status = _ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id,
lport, sw);
if (status)
if (status && status != ICE_ERR_ALREADY_EXISTS)
break;
}
@ -3624,7 +3720,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
break;
case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_PROMISC_VLAN:
ice_remove_promisc(hw, lkup, &remove_list_head);
ice_remove_promisc(hw, (u8)lkup, &remove_list_head);
break;
case ICE_SW_LKUP_MAC_VLAN:
ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n");
@ -3787,53 +3883,6 @@ enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
counter_id);
}
/**
* ice_alloc_res_lg_act - add large action resource
* @hw: pointer to the hardware structure
* @l_id: large action ID to fill it in
* @num_acts: number of actions to hold with a large action entry
*/
static enum ice_status
ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
enum ice_status status;
u16 buf_len;
if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
return ICE_ERR_PARAM;
/* Allocate resource for large action */
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
sw_buf->num_elems = CPU_TO_LE16(1);
/* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
* If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
* If num_acts is greater than 2, then use
* ICE_AQC_RES_TYPE_WIDE_TABLE_4.
* The num_acts cannot exceed 4. This was ensured at the
* beginning of the function.
*/
if (num_acts == 1)
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
else if (num_acts == 2)
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
else
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
ice_aqc_opc_alloc_res, NULL);
if (!status)
*l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
ice_free(hw, sw_buf);
return status;
}
/**
* ice_add_mac_with_sw_marker - add filter with sw marker
* @hw: pointer to the hardware structure
@ -4201,10 +4250,12 @@ enum ice_status
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_switch_info *sw;
enum ice_status status = ICE_SUCCESS;
u8 i;
sw = hw->switch_info;
/* Update the recipes that were created */
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
struct LIST_HEAD_TYPE *head;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -33,7 +33,7 @@
#ifndef _ICE_SWITCH_H_
#define _ICE_SWITCH_H_
#include "ice_common.h"
#include "ice_type.h"
#include "ice_protocol_type.h"
#define ICE_SW_CFG_MAX_BUF_LEN 2048
@ -43,6 +43,14 @@
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
#define ICE_PROFID_IPV4_GTPC_TEID 41
#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
#define ICE_PROFID_IPV4_GTPU_TEID 43
#define ICE_PROFID_IPV6_GTPC_TEID 44
#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP 70
#define DUMMY_ETH_HDR_LEN 16
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
@ -232,6 +240,7 @@ struct ice_adv_rule_info {
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
u16 lg_id;
struct ice_adv_rule_flags_info flags_info;
};
@ -382,6 +391,42 @@ enum ice_promisc_flags {
ICE_PROMISC_VLAN_TX = 0x80,
};
struct ice_dummy_pkt_offsets {
enum ice_protocol_type type;
u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
};
void
ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
enum ice_sw_tunnel_type tun_type, const u8 **pkt,
u16 *pkt_len,
const struct ice_dummy_pkt_offsets **offsets);
enum ice_status
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_aqc_sw_rules_elem *s_rule,
const u8 *dummy_pkt, u16 pkt_len,
const struct ice_dummy_pkt_offsets *offsets);
enum ice_status
ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid);
struct ice_adv_fltr_mgmt_list_entry *
ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, u16 recp_id,
struct ice_adv_rule_info *rinfo);
enum ice_status
ice_adv_add_update_vsi_list(struct ice_hw *hw,
struct ice_adv_fltr_mgmt_list_entry *m_entry,
struct ice_adv_rule_info *cur_fltr,
struct ice_adv_rule_info *new_fltr);
struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
u16 *vsi_list_id);
/* VSI related commands */
enum ice_status
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
@ -468,6 +513,8 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction);
bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
bool *rule_exists);
enum ice_status
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
@ -498,4 +545,7 @@ ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle);
void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
enum ice_status
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -33,48 +33,15 @@
#ifndef _ICE_TYPE_H_
#define _ICE_TYPE_H_
#define ETH_ALEN 6
#define ETH_HEADER_LEN 14
#define BIT(a) (1UL << (a))
#ifndef BIT_ULL
#define BIT_ULL(a) (1ULL << (a))
#endif /* BIT_ULL */
#define BITS_PER_BYTE 8
#define _FORCE_
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
#define ICE_MAX_TRAFFIC_CLASS 8
#ifndef MIN_T
#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
#endif
#define IS_ASCII(_ch) ((_ch) < 0x80)
#define STRUCT_HACK_VAR_LEN
/**
* ice_struct_size - size of struct with C99 flexible array member
* @ptr: pointer to structure
* @field: flexible array member (last member of the structure)
* @num: number of elements of that flexible array member
*/
#define ice_struct_size(ptr, field, num) \
(sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
#include "ice_defs.h"
#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_devids.h"
#include "ice_osdep.h"
#include "ice_bitops.h" /* Must come before ice_controlq.h */
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_ddp_common.h"
#include "ice_controlq.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_vlan_mode.h"
@ -135,6 +102,8 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
#define ICE_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
#define ICE_LO_WORD(x) ((u16)((x) & 0xFFFF))
#define ICE_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
#define ICE_LO_BYTE(x) ((u8)((x) & 0xFF))
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_TRACE BIT_ULL(0) /* for function-trace only */
@ -203,11 +172,6 @@ enum ice_aq_res_ids {
#define ICE_CHANGE_LOCK_TIMEOUT 1000
#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
ICE_RES_WRITE
};
struct ice_driver_ver {
u8 major_ver;
u8 minor_ver;
@ -236,7 +200,8 @@ enum ice_fec_mode {
ICE_FEC_NONE = 0,
ICE_FEC_RS,
ICE_FEC_BASER,
ICE_FEC_AUTO
ICE_FEC_AUTO,
ICE_FEC_DIS_AUTO
};
struct ice_phy_cache_mode_data {
@ -261,6 +226,7 @@ enum ice_mac_type {
ICE_MAC_VF,
ICE_MAC_E810,
ICE_MAC_GENERIC,
ICE_MAC_GENERIC_3K,
};
/* Media Types */
@ -338,6 +304,15 @@ struct ice_phy_info {
#define ICE_MAX_NUM_MIRROR_RULES 64
#define ICE_L2TPV2_FLAGS_CTRL 0x8000
#define ICE_L2TPV2_FLAGS_LEN 0x4000
#define ICE_L2TPV2_FLAGS_SEQ 0x0800
#define ICE_L2TPV2_FLAGS_OFF 0x0200
#define ICE_L2TPV2_FLAGS_VER 0x0002
#define ICE_L2TPV2_PKT_LENGTH 6
#define ICE_PPP_PKT_LENGTH 4
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
/* Write CSR protection */
@ -406,6 +381,7 @@ struct ice_hw_common_caps {
u8 iscsi;
u8 mgmt_cem;
u8 iwarp;
u8 roce_lag;
/* WoL and APM support */
#define ICE_WOL_SUPPORT_M BIT(0)
@ -437,6 +413,17 @@ struct ice_hw_common_caps {
#define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT];
#define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
bool tx_sched_topo_comp_mode_en;
bool dyn_flattening_en;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
#define ICE_NAC_TOPO_DUAL_M BIT(1)
#define ICE_NAC_TOPO_ID_M MAKEMASK(0xf, 0)
struct ice_nac_topology {
u32 mode;
u8 id;
};
/* Function specific capabilities */
@ -453,6 +440,7 @@ struct ice_hw_dev_caps {
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
u32 num_funcs;
struct ice_nac_topology nac_topo;
};
/* Information about MAC such as address, etc... */
@ -862,10 +850,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
u16 dflt_tx_vsi_rule_id;
u16 dflt_tx_vsi_num;
u16 dflt_rx_vsi_rule_id;
u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
@ -887,7 +871,6 @@ struct ice_switch_info {
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
@ -962,6 +945,13 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
/* PHY configuration */
enum ice_phy_cfg {
ICE_PHY_E810 = 1,
ICE_PHY_E822,
ICE_PHY_ETH56G,
};
/* Port hardware description */
struct ice_hw {
u8 *hw_addr;
@ -985,6 +975,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
enum ice_phy_cfg phy_cfg;
u16 max_burst_size; /* driver sets this value */
@ -1046,23 +1037,23 @@ struct ice_hw {
/* true if VSIs can share unicast MAC addr */
u8 umac_shared;
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
#define ICE_NUM_QUAD_TYPE 2
#define ICE_PORTS_PER_QUAD 4
#define ICE_PHY_0_LAST_QUAD 1
#define ICE_PORTS_PER_PHY 8
#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
#define ICE_PHY_PER_NAC_E822 1
#define ICE_MAX_QUAD 2
#define ICE_QUADS_PER_PHY_E822 2
#define ICE_PORTS_PER_PHY_E822 8
#define ICE_PORTS_PER_QUAD 4
#define ICE_PORTS_PER_PHY_E810 4
#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 pkg_seg_id;
u32 pkg_sign_type;
u32 active_track_id;
u8 pkg_has_signing_seg:1;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
enum ice_aq_err pkg_dwnld_status;
/* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
@ -1173,6 +1164,7 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
ICE_LG_ACTION,
ICE_INVAL_ACT
};
@ -1344,6 +1336,12 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
/* FW version for FEC disable in Auto FEC mode */
#define ICE_FW_FEC_DIS_AUTO_BRANCH 1
#define ICE_FW_FEC_DIS_AUTO_MAJ 7
#define ICE_FW_FEC_DIS_AUTO_MIN 0
#define ICE_FW_FEC_DIS_AUTO_PATCH 5
/* AQ API version for FW health reports */
#define ICE_FW_API_HEALTH_REPORT_MAJ 1
#define ICE_FW_API_HEALTH_REPORT_MIN 7

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -32,6 +32,7 @@
#include "ice_common.h"
#include "ice_ddp_common.h"
/**
* ice_pkg_get_supported_vlan_mode - chk if DDP supports Double VLAN mode (DVM)
* @hw: pointer to the HW struct

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -473,6 +473,8 @@ ice_if_attach_pre(if_ctx_t ctx)
/* Setup ControlQ lengths */
ice_set_ctrlq_len(hw);
reinit_hw:
fw_mode = ice_get_fw_mode(hw);
if (fw_mode == ICE_FW_MODE_REC) {
device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
@ -507,12 +509,22 @@ ice_if_attach_pre(if_ctx_t ctx)
goto free_pci_mapping;
}
ice_init_device_features(sc);
/* Notify firmware of the device driver version */
err = ice_send_version(sc);
if (err)
goto deinit_hw;
ice_load_pkg_file(sc);
/*
* Success indicates a change was made that requires a reinitialization
* of the hardware
*/
err = ice_load_pkg_file(sc);
if (err == ICE_SUCCESS) {
ice_deinit_hw(hw);
goto reinit_hw;
}
err = ice_init_link_events(sc);
if (err) {
@ -521,9 +533,19 @@ ice_if_attach_pre(if_ctx_t ctx)
goto deinit_hw;
}
ice_print_nvm_version(sc);
/* Initialize VLAN mode in FW; if dual VLAN mode is supported by the package
* and firmware, this will force them to use single VLAN mode.
*/
status = ice_set_vlan_mode(hw);
if (status) {
err = EIO;
device_printf(dev, "Unable to initialize VLAN mode, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
goto deinit_hw;
}
ice_init_device_features(sc);
ice_print_nvm_version(sc);
/* Setup the MAC address */
iflib_set_mac(ctx, hw->port_info->mac.lan_addr);
@ -971,7 +993,7 @@ ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
if (!(vsi->tx_queues =
(struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_WAITOK | M_ZERO))) {
(struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_NOWAIT | M_ZERO))) {
device_printf(sc->dev, "Unable to allocate Tx queue memory\n");
return (ENOMEM);
}
@ -979,7 +1001,7 @@ ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate report status arrays */
for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
if (!(txq->tx_rsq =
(uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_WAITOK))) {
(uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_NOWAIT))) {
device_printf(sc->dev, "Unable to allocate tx_rsq memory\n");
err = ENOMEM;
goto free_tx_queues;
@ -1063,7 +1085,7 @@ ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
if (!(vsi->rx_queues =
(struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_WAITOK | M_ZERO))) {
(struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_NOWAIT | M_ZERO))) {
device_printf(sc->dev, "Unable to allocate Rx queue memory\n");
return (ENOMEM);
}
@ -2296,7 +2318,7 @@ ice_prepare_for_reset(struct ice_softc *sc)
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
ice_shutdown_all_ctrlq(hw);
ice_shutdown_all_ctrlq(hw, false);
}
/**
@ -2403,6 +2425,7 @@ ice_rebuild(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_ddp_state pkg_state;
enum ice_status status;
int err;
@ -2497,10 +2520,9 @@ ice_rebuild(struct ice_softc *sc)
/* If we previously loaded the package, it needs to be reloaded now */
if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) {
status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
if (status) {
ice_log_pkg_init(sc, &status);
pkg_state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
if (!ice_is_init_pkg_successful(pkg_state)) {
ice_log_pkg_init(sc, pkg_state);
ice_transition_safe_mode(sc);
}
}
@ -2576,7 +2598,8 @@ ice_rebuild(struct ice_softc *sc)
err_sched_cleanup:
ice_sched_cleanup_all(hw);
err_shutdown_ctrlq:
ice_shutdown_all_ctrlq(hw);
ice_shutdown_all_ctrlq(hw, false);
ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
device_printf(dev, "Driver rebuild failed, please reload the device driver\n");
}
@ -2688,13 +2711,6 @@ ice_handle_pf_reset_request(struct ice_softc *sc)
static void
ice_init_device_features(struct ice_softc *sc)
{
/*
* A failed pkg file download triggers safe mode, disabling advanced
* device feature support
*/
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE))
return;
/* Set capabilities that all devices support */
ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
@ -2705,12 +2721,16 @@ ice_init_device_features(struct ice_softc *sc)
ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap);
ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap);
ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap);
ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
/* Disable features due to hardware limitations... */
if (!sc->hw.func_caps.common_cap.rss_table_size)
ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
if (!sc->hw.func_caps.common_cap.iwarp || !ice_enable_irdma)
ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
if (!sc->hw.func_caps.common_cap.dcb)
ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap);
/* Disable features due to firmware limitations... */
if (!ice_is_fw_health_report_supported(&sc->hw))
ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap);
@ -2729,6 +2749,10 @@ ice_init_device_features(struct ice_softc *sc)
/* RSS is always enabled for iflib */
if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS))
ice_set_bit(ICE_FEATURE_RSS, sc->feat_en);
/* Disable features based on sysctl settings */
if (!ice_tx_balance_en)
ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
}
/**
@ -2992,6 +3016,8 @@ ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
switch (ifd->ifd_cmd) {
case ICE_NVM_ACCESS:
return ice_handle_nvm_access_ioctl(sc, ifd);
case ICE_DEBUG_DUMP:
return ice_handle_debug_dump_ioctl(sc, ifd);
default:
return EINVAL;
}

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021, Intel Corporation
# Copyright (c) 2022, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021, Intel Corporation
# Copyright (c) 2022, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -196,10 +196,12 @@ enum virtchnl_ops {
/* opcodes 60 through 65 are reserved */
VIRTCHNL_OP_GET_QOS_CAPS = 66,
VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67,
/* opcode 68, 69 are reserved */
/* opcode 68 through 70 are reserved */
VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
VIRTCHNL_OP_CONFIG_QUANTA = 113,
VIRTCHNL_OP_MAX,
};
@ -274,12 +276,6 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
return "VIRTCHNL_OP_DEL_FDIR_FILTER";
case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
case VIRTCHNL_OP_ENABLE_QUEUES_V2:
return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
case VIRTCHNL_OP_DISABLE_QUEUES_V2:
return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS";
case VIRTCHNL_OP_ADD_VLAN_V2:
@ -298,6 +294,12 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2";
case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2:
return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2";
case VIRTCHNL_OP_ENABLE_QUEUES_V2:
return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
case VIRTCHNL_OP_DISABLE_QUEUES_V2:
return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
case VIRTCHNL_OP_MAX:
return "VIRTCHNL_OP_MAX";
default:
@ -492,21 +494,14 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* RX descriptor IDs (range from 0 to 63) */
enum virtchnl_rx_desc_ids {
VIRTCHNL_RXDID_0_16B_BASE = 0,
/* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors
* because they can be differentiated based on queue model; e.g. single
* queue model can only use 32B_BASE and split queue model can only use
* FLEX_SPLITQ. Having these as 1 allows them to be used as default
* descriptors without negotiation.
*/
VIRTCHNL_RXDID_1_32B_BASE = 1,
VIRTCHNL_RXDID_1_FLEX_SPLITQ = 1,
VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2,
VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3,
VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4,
VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5,
VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6,
VIRTCHNL_RXDID_7_HW_RSVD = 7,
/* 9 through 15 are reserved */
/* 8 through 15 are reserved */
VIRTCHNL_RXDID_16_COMMS_GENERIC = 16,
VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17,
VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18,
@ -520,7 +515,6 @@ enum virtchnl_rx_desc_ids {
enum virtchnl_rx_desc_id_bitmasks {
VIRTCHNL_RXDID_0_16B_BASE_M = BIT(VIRTCHNL_RXDID_0_16B_BASE),
VIRTCHNL_RXDID_1_32B_BASE_M = BIT(VIRTCHNL_RXDID_1_32B_BASE),
VIRTCHNL_RXDID_1_FLEX_SPLITQ_M = BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ),
VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC),
VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW),
VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB),
@ -1211,6 +1205,46 @@ struct virtchnl_rss_lut {
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
/* enum virthcnl_hash_filter
*
* Bits defining the hash filters in the hena field of the virtchnl_rss_hena
* structure. Each bit indicates a specific hash filter for RSS.
*
* Note that not all bits are supported on all hardware. The VF should use
* VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of
* before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters.
*/
enum virtchnl_hash_filter {
/* Bits 0 through 28 are reserved for future use */
/* Bit 29, 30, and 32 are not supported on XL710 a X710 */
VIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP = 29,
VIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP = 30,
VIRTCHNL_HASH_FILTER_IPV4_UDP = 31,
VIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK = 32,
VIRTCHNL_HASH_FILTER_IPV4_TCP = 33,
VIRTCHNL_HASH_FILTER_IPV4_SCTP = 34,
VIRTCHNL_HASH_FILTER_IPV4_OTHER = 35,
VIRTCHNL_HASH_FILTER_FRAG_IPV4 = 36,
/* Bits 37 and 38 are reserved for future use */
/* Bit 39, 40, and 42 are not supported on XL710 a X710 */
VIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP = 39,
VIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP = 40,
VIRTCHNL_HASH_FILTER_IPV6_UDP = 41,
VIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK = 42,
VIRTCHNL_HASH_FILTER_IPV6_TCP = 43,
VIRTCHNL_HASH_FILTER_IPV6_SCTP = 44,
VIRTCHNL_HASH_FILTER_IPV6_OTHER = 45,
VIRTCHNL_HASH_FILTER_FRAG_IPV6 = 46,
/* Bit 37 is reserved for future use */
VIRTCHNL_HASH_FILTER_FCOE_OX = 48,
VIRTCHNL_HASH_FILTER_FCOE_RX = 49,
VIRTCHNL_HASH_FILTER_FCOE_OTHER = 50,
/* Bits 51 through 62 are reserved for future use */
VIRTCHNL_HASH_FILTER_L2_PAYLOAD = 63,
};
#define VIRTCHNL_HASH_FILTER_INVALID (0)
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
@ -1219,6 +1253,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
* traffic types that are hashed by the hardware.
*/
struct virtchnl_rss_hena {
/* see enum virtchnl_hash_filter */
u64 hena;
};
@ -1378,13 +1413,6 @@ struct virtchnl_pf_event {
u8 link_status;
u8 pad[3];
} link_event_adv;
struct {
/* link_speed provided in Mbps */
u32 link_speed;
u16 vport_id;
u8 link_status;
u8 pad;
} link_event_adv_vport;
} event_data;
s32 severity;
@ -1410,6 +1438,7 @@ enum virtchnl_vfr_states {
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) \
(proto_hdr_type << PROTO_HDR_SHIFT)
@ -1581,6 +1610,10 @@ enum virtchnl_proto_hdr_field {
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN),
VIRTCHNL_PROTO_HDR_GTPU_UP_QFI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP),
/* L2TPv2 */
VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2),
VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID,
};
struct virtchnl_proto_hdr {
@ -1601,13 +1634,26 @@ struct virtchnl_proto_hdrs {
u8 tunnel_level;
/**
* specify where protocol header start from.
* must be 0 when sending a raw packet request.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
**/
int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
*/
int count;
/**
* number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
* must be 0 for a raw packet request.
*/
union {
struct virtchnl_proto_hdr
proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
struct {
u16 pkt_len;
u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET];
} raw;
};
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
@ -1796,18 +1842,28 @@ struct virtchnl_queue_tc_mapping {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping);
/* TX and RX queue types are valid in legacy as well as split queue models.
* With Split Queue model, 2 additional types are introduced - TX_COMPLETION
* and RX_BUFFER. In split queue model, RX corresponds to the queue where HW
* posts completions.
*/
/* VIRTCHNL_OP_CONFIG_QUEUE_BW */
struct virtchnl_queue_bw {
u16 queue_id;
u8 tc;
u8 pad;
struct virtchnl_shaper_bw shaper;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw);
struct virtchnl_queues_bw_cfg {
u16 vsi_id;
u16 num_queues;
struct virtchnl_queue_bw cfg[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg);
/* queue types */
enum virtchnl_queue_type {
VIRTCHNL_QUEUE_TYPE_TX = 0,
VIRTCHNL_QUEUE_TYPE_RX = 1,
VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2,
VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3,
VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4,
VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5
};
/* structure to specify a chunk of contiguous queues */
@ -1831,19 +1887,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);
/* VIRTCHNL_OP_ENABLE_QUEUES_V2
* VIRTCHNL_OP_DISABLE_QUEUES_V2
* VIRTCHNL_OP_DEL_QUEUES
*
* If VIRTCHNL version was negotiated in VIRTCHNL_OP_VERSION as 2.0
* then all of these ops are available.
* These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in
* VIRTCHNL_OP_GET_VF_RESOURCES
*
* If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are
* available.
*
* PF sends these messages to enable, disable or delete queues specified in
* chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues
* to be enabled/disabled/deleted. Also applicable to single queue RX or
* TX. CP performs requested action and returns status.
* VF sends virtchnl_ena_dis_queues struct to specify the queues to be
* enabled/disabled in chunks. Also applicable to single queue RX or
* TX. PF performs requested action and returns status.
*/
struct virtchnl_del_ena_dis_queues {
u16 vport_id;
@ -1877,13 +1927,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
/* VIRTCHNL_OP_MAP_QUEUE_VECTOR
*
* If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available.
* This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated
* in VIRTCHNL_OP_GET_VF_RESOURCES
*
* PF sends this message to map or unmap queues to vectors and ITR index
* registers. External data buffer contains virtchnl_queue_vector_maps structure
* VF sends this message to map queues to vectors and ITR index registers.
* External data buffer contains virtchnl_queue_vector_maps structure
* that contains num_qv_maps of virtchnl_queue_vector structures.
* CP maps the requested queue vector maps after validating the queue and vector
* PF maps the requested queue vector maps after validating the queue and vector
* ids and returns a status code.
*/
struct virtchnl_queue_vector_maps {
@ -1895,6 +1945,13 @@ struct virtchnl_queue_vector_maps {
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
struct virtchnl_quanta_cfg {
u16 quanta_size;
struct virtchnl_queue_chunk queue_select;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
/* Since VF messages are limited by u16 size, precalculate the maximum possible
* values of nested elements in virtchnl structures that virtual channel can
* possibly handle in a single message.
@ -2130,6 +2187,31 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
sizeof(q_tc->tc[0]);
}
break;
case VIRTCHNL_OP_CONFIG_QUEUE_BW:
valid_len = sizeof(struct virtchnl_queues_bw_cfg);
if (msglen >= valid_len) {
struct virtchnl_queues_bw_cfg *q_bw =
(struct virtchnl_queues_bw_cfg *)msg;
if (q_bw->num_queues == 0) {
err_msg_format = true;
break;
}
valid_len += (q_bw->num_queues - 1) *
sizeof(q_bw->cfg[0]);
}
break;
case VIRTCHNL_OP_CONFIG_QUANTA:
valid_len = sizeof(struct virtchnl_quanta_cfg);
if (msglen >= valid_len) {
struct virtchnl_quanta_cfg *q_quanta =
(struct virtchnl_quanta_cfg *)msg;
if (q_quanta->quanta_size == 0 ||
q_quanta->queue_select.num_queues == 0) {
err_msg_format = true;
break;
}
}
break;
case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
break;
case VIRTCHNL_OP_ADD_VLAN_V2:

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
/* Copyright (c) 2022, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -478,6 +478,15 @@ struct virtchnl_ipsec_sp_cfg {
/* Set TC (congestion domain) if true. For future use. */
u8 set_tc;
/* 0 for NAT-T unsupported, 1 for NAT-T supported */
u8 is_udp;
/* reserved */
u8 reserved;
/* NAT-T UDP port number. Only valid in case NAT-T supported */
u16 udp_port;
};
#pragma pack(1)

View File

@ -1,550 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _VIRTCHNL_LAN_DESC_H_
#define _VIRTCHNL_LAN_DESC_H_
/* Rx */
/* For splitq virtchnl_rx_flex_desc_adv desc members */
#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_M \
MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S 10
#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_M \
MAKEMASK(0x3UL, VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S 12
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_M \
MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_M \
MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S 14
#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S 0
#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S 10
#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S 11
#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S 12
#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_M \
BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S)
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_S 13
#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M \
MAKEMASK(0x7UL, VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M)
enum virtchnl_rx_flex_desc_adv_status_error_0_qw1_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_DD_S = 0,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_EOF_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_HBO_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S,
};
enum virtchnl_rx_flex_desc_adv_status_error_0_qw0_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LPBK_S = 0,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RXE_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_CRCP_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_flex_desc_adv_status_error_1_bits {
/* Note: These are predefined bit offsets */
/* 2 bits */
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_RSVD_S = 0,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S = 2,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S = 3,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S = 4,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S = 5,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S = 6,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S = 7,
VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_LAST /* this entry must be last!!! */
};
/* for singleq (flex) virtchnl_rx_flex_desc fields */
/* for virtchnl_rx_flex_desc.ptype_flex_flags0 member */
#define VIRTCHNL_RX_FLEX_DESC_PTYPE_S 0
#define VIRTCHNL_RX_FLEX_DESC_PTYPE_M \
MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_PTYPE_S) /* 10-bits */
/* for virtchnl_rx_flex_desc.pkt_length member */
#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S 0
#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_M \
MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S) /* 14-bits */
enum virtchnl_rx_flex_desc_status_error_0_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0,
VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_HBO_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_L3L4P_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
VIRTCHNL_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
VIRTCHNL_RX_FLEX_DESC_STATUS1_NAT_S = 4,
VIRTCHNL_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
/* [10:6] reserved */
VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
/* For singleq (non flex) virtchnl_singleq_base_rx_desc legacy desc members */
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S 63
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_M \
BIT_ULL(VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S 52
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_M \
MAKEMASK(0x7FFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S 38
#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_M \
MAKEMASK(0x3FFFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S 30
#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_M \
MAKEMASK(0xFFULL, VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S 19
#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_M \
MAKEMASK(0xFFUL, VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S)
#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S 0
#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_M \
MAKEMASK(0x7FFFFUL, VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S)
enum virtchnl_rx_base_desc_status_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0,
VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1,
VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2,
VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3,
VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4,
VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8,
VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11,
VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14,
VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15,
VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */
VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18,
VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */
};
enum virtchnl_rx_base_desc_ext_status_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S = 0
};
enum virtchnl_rx_base_desc_error_bits {
/* Note: These are predefined bit offsets */
VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0,
VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1,
VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2,
VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */
VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3,
VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4,
VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5,
VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6,
VIRTCHNL_RX_BASE_DESC_ERROR_PPRS_S = 7
};
enum virtchnl_rx_base_desc_fltstat_values {
VIRTCHNL_RX_BASE_DESC_FLTSTAT_NO_DATA = 0,
VIRTCHNL_RX_BASE_DESC_FLTSTAT_FD_ID = 1,
VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSV = 2,
VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSS_HASH = 3,
};
/* Receive Descriptors */
/* splitq buf
| 16| 0|
----------------------------------------------------------------
| RSV | Buffer ID |
----------------------------------------------------------------
| Rx packet buffer adresss |
----------------------------------------------------------------
| Rx header buffer adresss |
----------------------------------------------------------------
| RSV |
----------------------------------------------------------------
| 0|
*/
struct virtchnl_splitq_rx_buf_desc {
struct {
__le16 buf_id; /* Buffer Identifier */
__le16 rsvd0;
__le32 rsvd1;
} qword0;
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 rsvd2;
}; /* read used with buffer queues*/
/* singleq buf
| 0|
----------------------------------------------------------------
| Rx packet buffer adresss |
----------------------------------------------------------------
| Rx header buffer adresss |
----------------------------------------------------------------
| RSV |
----------------------------------------------------------------
| RSV |
----------------------------------------------------------------
| 0|
*/
struct virtchnl_singleq_rx_buf_desc {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 rsvd1;
__le64 rsvd2;
}; /* read used with buffer queues*/
union virtchnl_rx_buf_desc {
struct virtchnl_singleq_rx_buf_desc read;
struct virtchnl_splitq_rx_buf_desc split_rd;
};
/* (0x00) singleq wb(compl) */
struct virtchnl_singleq_base_rx_desc {
struct {
struct {
__le16 mirroring_status;
__le16 l2tag1;
} lo_dword;
union {
__le32 rss; /* RSS Hash */
__le32 fd_id; /* Flow Director filter id */
} hi_dword;
} qword0;
struct {
/* status/error/PTYPE/length */
__le64 status_error_ptype_len;
} qword1;
struct {
__le16 ext_status; /* extended status */
__le16 rsvd;
__le16 l2tag2_1;
__le16 l2tag2_2;
} qword2;
struct {
__le32 reserved;
__le32 fd_id;
} qword3;
}; /* writeback */
/* (0x01) singleq flex compl */
struct virtchnl_rx_flex_desc {
/* Qword 0 */
u8 rxdid; /* descriptor builder profile id */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
__le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
/* sph=[11:11] */
/* ff1/ext=[15:12] */
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 flex_meta0;
__le16 flex_meta1;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 time_stamp_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 flex_meta2;
__le16 flex_meta3;
union {
struct {
__le16 flex_meta4;
__le16 flex_meta5;
} flex;
__le32 ts_high;
} flex_ts;
};
/* (0x02) */
struct virtchnl_rx_flex_desc_nic {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le32 rss_hash;
/* Qword 2 */
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 flow_id;
union {
struct {
__le16 rsvd;
__le16 flow_id_ipv6;
} flex;
__le32 ts_high;
} flex_ts;
};
/* Rx Flex Descriptor Switch Profile
* RxDID Profile Id 3
* Flex-field 0: Source Vsi
*/
struct virtchnl_rx_flex_desc_sw {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le16 src_vsi; /* [10:15] are reserved */
__le16 flex_md1_rsvd;
/* Qword 2 */
__le16 status_error1;
u8 flex_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le32 rsvd; /* flex words 2-3 are reserved */
__le32 ts_high;
};
/* Rx Flex Descriptor NIC Profile
* RxDID Profile Id 6
* Flex-field 0: RSS hash lower 16-bits
* Flex-field 1: RSS hash upper 16-bits
* Flex-field 2: Flow Id lower 16-bits
* Flex-field 3: Source Vsi
* Flex-field 4: reserved, Vlan id taken from L2Tag
*/
struct virtchnl_rx_flex_desc_nic_2 {
/* Qword 0 */
u8 rxdid;
u8 mir_id_umb_cast;
__le16 ptype_flex_flags0;
__le16 pkt_len;
__le16 hdr_len_sph_flex_flags1;
/* Qword 1 */
__le16 status_error0;
__le16 l2tag1;
__le32 rss_hash;
/* Qword 2 */
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
__le16 l2tag2_1st;
__le16 l2tag2_2nd;
/* Qword 3 */
__le16 flow_id;
__le16 src_vsi;
union {
struct {
__le16 rsvd;
__le16 flow_id_ipv6;
} flex;
__le32 ts_high;
} flex_ts;
};
/* Rx Flex Descriptor Advanced (Split Queue Model)
* RxDID Profile Id 7
*/
struct virtchnl_rx_flex_desc_adv {
/* Qword 0 */
u8 rxdid_ucast; /* profile_id=[3:0] */
/* rsvd=[5:4] */
/* ucast=[7:6] */
u8 status_err0_qw0;
__le16 ptype_err_fflags0; /* ptype=[9:0] */
/* ip_hdr_err=[10:10] */
/* udp_len_err=[11:11] */
/* ff0=[15:12] */
__le16 pktlen_gen_bufq_id; /* plen=[13:0] */
/* gen=[14:14] only in splitq */
/* bufq_id=[15:15] only in splitq */
__le16 hdrlen_flags; /* header=[9:0] */
/* rsc=[10:10] only in splitq */
/* sph=[11:11] only in splitq */
/* ext_udp_0=[12:12] */
/* int_udp_0=[13:13] */
/* trunc_mirr=[14:14] */
/* miss_prepend=[15:15] */
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
u8 fflags1;
u8 ts_low;
__le16 fmd0;
__le16 fmd1;
/* Qword 2 */
__le16 fmd2;
u8 fflags2;
u8 hash3;
__le16 fmd3;
__le16 fmd4;
/* Qword 3 */
__le16 fmd5;
__le16 fmd6;
__le16 fmd7_0;
__le16 fmd7_1;
}; /* writeback */
/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile
* RxDID Profile Id 8
* Flex-field 0: BufferID
* Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
* Flex-field 2: Hash[15:0]
* Flex-flags 2: Hash[23:16]
* Flex-field 3: L2TAG2
* Flex-field 5: L2TAG1
* Flex-field 7: Timestamp (upper 32 bits)
*/
struct virtchnl_rx_flex_desc_adv_nic_3 {
/* Qword 0 */
u8 rxdid_ucast; /* profile_id=[3:0] */
/* rsvd=[5:4] */
/* ucast=[7:6] */
u8 status_err0_qw0;
__le16 ptype_err_fflags0; /* ptype=[9:0] */
/* ip_hdr_err=[10:10] */
/* udp_len_err=[11:11] */
/* ff0=[15:12] */
__le16 pktlen_gen_bufq_id; /* plen=[13:0] */
/* gen=[14:14] only in splitq */
/* bufq_id=[15:15] only in splitq */
__le16 hdrlen_flags; /* header=[9:0] */
/* rsc=[10:10] only in splitq */
/* sph=[11:11] only in splitq */
/* ext_udp_0=[12:12] */
/* int_udp_0=[13:13] */
/* trunc_mirr=[14:14] */
/* miss_prepend=[15:15] */
/* Qword 1 */
u8 status_err0_qw1;
u8 status_err1;
u8 fflags1;
u8 ts_low;
__le16 buf_id; /* only in splitq */
union {
__le16 raw_cs;
__le16 l2tag1;
__le16 rscseglen;
} misc;
/* Qword 2 */
__le16 hash1;
union {
u8 fflags2;
u8 mirrorid;
u8 hash2;
} ff2_mirrid_hash2;
u8 hash3;
__le16 l2tag2;
__le16 fmd4;
/* Qword 3 */
__le16 l2tag1;
__le16 fmd6;
__le32 ts_high;
}; /* writeback */
union virtchnl_rx_desc {
struct virtchnl_singleq_rx_buf_desc read;
struct virtchnl_singleq_base_rx_desc base_wb;
struct virtchnl_rx_flex_desc flex_wb;
struct virtchnl_rx_flex_desc_nic flex_nic_wb;
struct virtchnl_rx_flex_desc_sw flex_sw_wb;
struct virtchnl_rx_flex_desc_nic_2 flex_nic_2_wb;
struct virtchnl_rx_flex_desc_adv flex_adv_wb;
struct virtchnl_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb;
};
#endif /* _VIRTCHNL_LAN_DESC_H_ */

View File

@ -14,7 +14,7 @@ SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h
# Core source
SRCS += ice_lib.c ice_osdep.c ice_resmgr.c ice_strings.c
SRCS += ice_iflib_recovery_txrx.c ice_iflib_txrx.c if_ice_iflib.c
SRCS += ice_fw_logging.c
SRCS += ice_fw_logging.c ice_ddp_common.c
# RDMA Client interface
# TODO: Is this the right way to compile this?