ice(4): Update to version 0.28.1-k

This updates the driver to align with the version included in
the "Intel Ethernet Adapter Complete Driver Pack", version 25.6.

There are no major functional changes; this mostly contains
bug fixes and changes to prepare for new features. This version
of the driver uses the previously committed ice_ddp package
1.3.19.0.

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Tested by:	jeffrey.e.pieper@intel.com
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D28640
This commit is contained in:
Eric Joyner 2021-02-22 17:45:09 -08:00
parent e53138694a
commit d08b8680e1
54 changed files with 2904 additions and 740 deletions

View File

@ -191,6 +191,8 @@ dev/ice/ice_sriov.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_vlan_mode.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031300 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \

View File

@ -205,6 +205,8 @@ dev/ice/ice_sriov.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
dev/ice/ice_vlan_mode.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031300 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -166,6 +166,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073
#define ICE_AQC_CAPS_SKU 0x0074
#define ICE_AQC_CAPS_PORT_MAP 0x0075
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@ -1449,6 +1450,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2)
#define ICE_AQ_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
#define ICE_AQ_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
#define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
#define ICE_AQ_LINK_FAULT BIT(1)
@ -1496,7 +1498,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M
/* External Device Power Ability */
u8 power_desc;
#define ICE_AQ_PWR_CLASS_M 0x3
#define ICE_AQ_PWR_CLASS_M 0x3F
#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0
#define ICE_AQ_LINK_PWR_BASET_HIGH 1
#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0
@ -1860,7 +1862,9 @@ struct ice_aqc_mdio {
#define ICE_AQC_MDIO_DEV_M (0x1F << ICE_AQC_MDIO_DEV_S)
#define ICE_AQC_MDIO_CLAUSE_22 BIT(5)
#define ICE_AQC_MDIO_CLAUSE_45 BIT(6)
u8 rsvd;
u8 mdio_bus_address;
#define ICE_AQC_MDIO_BUS_ADDR_S 0
#define ICE_AQC_MDIO_BUS_ADDR_M (0x1F << ICE_AQC_MDIO_BUS_ADDR_S)
__le16 offset;
__le16 data; /* Input in write cmd, output in read cmd. */
u8 rsvd1[4];
@ -2001,6 +2005,22 @@ struct ice_aqc_sff_eeprom {
__le32 addr_low;
};
/* SW Set GPIO command (indirect 0x6EF)
* SW Get GPIO command (indirect 0x6F0)
*/
struct ice_aqc_sw_gpio {
__le16 gpio_ctrl_handle;
#define ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_S 0
#define ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_M (0x3FF << ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_S)
u8 gpio_num;
#define ICE_AQC_SW_GPIO_NUMBER_S 0
#define ICE_AQC_SW_GPIO_NUMBER_M (0x1F << ICE_AQC_SW_GPIO_NUMBER_S)
u8 gpio_params;
#define ICE_AQC_SW_GPIO_PARAMS_DIRECTION BIT(1)
#define ICE_AQC_SW_GPIO_PARAMS_VALUE BIT(0)
u8 rsvd[12];
};
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Write commands (indirect 0x0703)
@ -2027,6 +2047,9 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
#define ICE_AQC_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
#define ICE_AQC_NVM_POR_FLAG 0 /* Used by NVM Write completion on ARQ */
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@ -2056,32 +2079,22 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
/* The result of netlist NVM read comes in a TLV format. The actual data
* (netlist header) starts from word offset 1 (byte 2). The FW strips
* out the type field from the TLV header so all the netlist fields
* should adjust their offset value by 1 word (2 bytes) in order to map
* their correct location.
*/
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M MAKEMASK(0x3FF, 0)
#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
#define ICE_AQC_NVM_MINSREV_MOD_ID 0x130
/* netlist ID block field offsets (word offsets) */
#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
* type field is excluded from the section when reading and writing from
* a module using the module_typeid field with these AQ commands.
*/
struct ice_aqc_nvm_minsrev {
__le16 length;
__le16 validity;
#define ICE_AQC_NVM_MINSREV_NVM_VALID BIT(0)
#define ICE_AQC_NVM_MINSREV_OROM_VALID BIT(1)
__le16 nvm_minsrev_l;
__le16 nvm_minsrev_h;
__le16 orom_minsrev_l;
__le16 orom_minsrev_h;
};
/* Used for 0x0704 as well as for 0x0705 commands */
struct ice_aqc_nvm_cfg {
@ -2114,7 +2127,7 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
/**
/*
* Send to PF command (indirect 0x0801) ID is only used by PF
*
* Send to VF command (indirect 0x0802) ID is only used by PF
@ -2541,6 +2554,7 @@ struct ice_pkg_ver {
};
#define ICE_PKG_NAME_SIZE 32
#define ICE_SEG_ID_SIZE 28
#define ICE_SEG_NAME_SIZE 28
struct ice_aqc_get_pkg_info {
@ -2589,6 +2603,35 @@ struct ice_aqc_set_health_status_config {
u8 reserved[15];
};
#define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE 0x102
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL 0x103
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM 0x104
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105
#define ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
#define ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
#define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
#define ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
#define ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
#define ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F
#define ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110
#define ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111
#define ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112
#define ICE_AQC_HEALTH_STATUS_ERR_NETLIST 0x113
#define ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114
#define ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115
#define ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116
#define ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT 0x117
#define ICE_AQC_HEALTH_STATUS_INFO_RECOVERY 0x500
#define ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501
#define ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH 0x502
#define ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH 0x503
#define ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH 0x504
#define ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
#define ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
#define ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB 0x509
/* Get Health Status codes (indirect 0xFF21) */
struct ice_aqc_get_supported_health_status_codes {
__le16 health_code_count;
@ -2630,8 +2673,8 @@ struct ice_aqc_clear_health_status {
* @opcode: AQ command opcode
* @datalen: length in bytes of indirect/external data buffer
* @retval: return value from firmware
* @cookie_h: opaque data high-half
* @cookie_l: opaque data low-half
* @cookie_high: opaque data high-half
* @cookie_low: opaque data low-half
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
@ -2920,6 +2963,8 @@ enum ice_adminq_opc {
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
ice_aqc_opc_sw_set_gpio = 0x06EF,
ice_aqc_opc_sw_get_gpio = 0x06F0,
/* NVM commands */
ice_aqc_opc_nvm_read = 0x0701,

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -423,7 +423,7 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits)
{
u16 i;
for (i = pos; i < num_bits; i++)
for (i = pos; i < pos + num_bits; i++)
ice_set_bit(i, dst);
}

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -265,7 +265,7 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
}
/*
/**
* ice_is_media_cage_present
* @pi: port information structure
*
@ -704,13 +704,14 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
void ice_print_rollback_msg(struct ice_hw *hw)
{
char nvm_str[ICE_NVM_VER_LEN] = { 0 };
struct ice_nvm_info *nvm = &hw->nvm;
struct ice_orom_info *orom;
struct ice_nvm_info *nvm;
orom = &nvm->orom;
orom = &hw->flash.orom;
nvm = &hw->flash.nvm;
SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
nvm->major, nvm->minor, nvm->eetrack, orom->major,
orom->build, orom->patch);
ice_warn(hw,
"Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
@ -805,8 +806,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
ice_free(hw, pcaps);
if (status)
ice_debug(hw, ICE_DBG_PHY, "%s: Get PHY capabilities failed, continuing anyway\n",
__func__);
ice_debug(hw, ICE_DBG_PHY, "Get PHY capabilities failed, continuing anyway\n");
/* Initialize port_info struct with link information */
status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
@ -850,6 +850,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
ice_init_lock(&hw->tnl_lock);
ice_init_vlan_mode_ops(hw);
return ICE_SUCCESS;
err_unroll_fltr_mgmt_struct:
@ -1701,7 +1704,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
if (!buf)
return ICE_ERR_PARAM;
if (buf_size < (num_entries * sizeof(buf->elem[0])))
if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@ -1982,6 +1985,16 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
case ICE_AQC_CAPS_NVM_VER:
break;
case ICE_AQC_CAPS_NVM_MGMT:
caps->sec_rev_disabled =
(number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
caps->sec_rev_disabled);
caps->update_disabled =
(number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
caps->update_disabled);
caps->nvm_unified_update =
(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
true : false;
@ -2389,26 +2402,25 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
{
struct ice_hw_func_caps *func_caps = &hw->func_caps;
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
u32 valid_func, rxq_first_id, txq_first_id;
u32 msix_vector_first_id, max_mtu;
struct ice_hw_common_caps cached_caps;
u32 num_funcs;
/* cache some func_caps values that should be restored after memset */
valid_func = func_caps->common_cap.valid_functions;
txq_first_id = func_caps->common_cap.txq_first_id;
rxq_first_id = func_caps->common_cap.rxq_first_id;
msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
max_mtu = func_caps->common_cap.max_mtu;
cached_caps = func_caps->common_cap;
/* unset func capabilities */
memset(func_caps, 0, sizeof(*func_caps));
#define ICE_RESTORE_FUNC_CAP(name) \
func_caps->common_cap.name = cached_caps.name
/* restore cached values */
func_caps->common_cap.valid_functions = valid_func;
func_caps->common_cap.txq_first_id = txq_first_id;
func_caps->common_cap.rxq_first_id = rxq_first_id;
func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
func_caps->common_cap.max_mtu = max_mtu;
ICE_RESTORE_FUNC_CAP(valid_functions);
ICE_RESTORE_FUNC_CAP(txq_first_id);
ICE_RESTORE_FUNC_CAP(rxq_first_id);
ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
ICE_RESTORE_FUNC_CAP(max_mtu);
ICE_RESTORE_FUNC_CAP(nvm_unified_update);
/* one Tx and one Rx queue in safe mode */
func_caps->common_cap.num_rxq = 1;
@ -2419,22 +2431,22 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
func_caps->guar_num_vsi = 1;
/* cache some dev_caps values that should be restored after memset */
valid_func = dev_caps->common_cap.valid_functions;
txq_first_id = dev_caps->common_cap.txq_first_id;
rxq_first_id = dev_caps->common_cap.rxq_first_id;
msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
max_mtu = dev_caps->common_cap.max_mtu;
cached_caps = dev_caps->common_cap;
num_funcs = dev_caps->num_funcs;
/* unset dev capabilities */
memset(dev_caps, 0, sizeof(*dev_caps));
#define ICE_RESTORE_DEV_CAP(name) \
dev_caps->common_cap.name = cached_caps.name
/* restore cached values */
dev_caps->common_cap.valid_functions = valid_func;
dev_caps->common_cap.txq_first_id = txq_first_id;
dev_caps->common_cap.rxq_first_id = rxq_first_id;
dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
dev_caps->common_cap.max_mtu = max_mtu;
ICE_RESTORE_DEV_CAP(valid_functions);
ICE_RESTORE_DEV_CAP(txq_first_id);
ICE_RESTORE_DEV_CAP(rxq_first_id);
ICE_RESTORE_DEV_CAP(msix_vector_first_id);
ICE_RESTORE_DEV_CAP(max_mtu);
ICE_RESTORE_DEV_CAP(nvm_unified_update);
dev_caps->num_funcs = num_funcs;
/* one Tx and one Rx queue per function in safe mode */
@ -2480,7 +2492,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@ -2816,6 +2828,11 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
if (status == ICE_SUCCESS)
ice_memcpy(li->module_type, &pcaps->module_type,
sizeof(li->module_type),
ICE_NONDMA_TO_NONDMA);
ice_free(hw, pcaps);
}
@ -3379,7 +3396,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
cmd = &desc.params.read_write_sff_param;
desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
@ -3399,23 +3416,33 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: VSI FW index
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
* @glob_lut_idx: global LUT index
* @params: RSS LUT parameters
* @set: set true to set the table, false to get the table
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
static enum ice_status
__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
u16 lut_size, u8 glob_lut_idx, bool set)
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
enum ice_status status;
u16 flags = 0;
u8 *lut;
if (!params)
return ICE_ERR_PARAM;
vsi_handle = params->vsi_handle;
lut = params->lut;
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
return ICE_ERR_PARAM;
lut_size = params->lut_size;
lut_type = params->lut_type;
glob_lut_idx = params->global_lut_id;
vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
cmd_resp = &desc.params.get_set_rss_lut;
@ -3492,43 +3519,27 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
/**
* ice_aq_get_rss_lut
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
* @get_params: RSS LUT parameters used to specify which RSS LUT to get
*
* get the RSS lookup table, PF or VSI type
*/
enum ice_status
ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
u8 *lut, u16 lut_size)
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
return ICE_ERR_PARAM;
return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
lut_type, lut, lut_size, 0, false);
return __ice_aq_get_set_rss_lut(hw, get_params, false);
}
/**
* ice_aq_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @lut_type: LUT table type
* @lut: pointer to the LUT buffer provided by the caller
* @lut_size: size of the LUT buffer
* @set_params: RSS LUT parameters used to specify how to set the RSS LUT
*
* set the RSS lookup table, PF or VSI type
*/
enum ice_status
ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
u8 *lut, u16 lut_size)
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
return ICE_ERR_PARAM;
return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
lut_type, lut, lut_size, 0, true);
return __ice_aq_get_set_rss_lut(hw, set_params, true);
}
/**

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -117,11 +117,9 @@ ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
u32 tx_drbell_q_index);
enum ice_status
ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
enum ice_status
ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
u16 lut_size);
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
enum ice_status
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
@ -240,13 +238,6 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_sq_cd *cd);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
void
@ -277,7 +268,7 @@ enum ice_status
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw);
enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -60,8 +60,8 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
/* Control Queue timeout settings - max delay 1s */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -994,22 +994,27 @@ ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
/**
* ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct
* @dcbcfg: DCB configuration struct
* @pi: port information structure
*
* Convert CEE configuration from firmware to DCB configuration
*/
static void
ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
struct ice_dcbx_cfg *dcbcfg)
struct ice_port_info *pi)
{
u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
u8 i, err, sync, oper, app_index, ice_app_sel_type;
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
u16 ice_app_prot_id_type;
/* CEE PG data to ETS config */
dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbcfg->tlv_status = tlv_status;
/* CEE PG data */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
/* Note that the FW creates the oper_prio_tc nibbles reversed
@ -1036,10 +1041,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
}
}
/* CEE PFC data to ETS config */
/* CEE PFC data */
dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
/* CEE APP TLV data */
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
else
cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
app_index = 0;
for (i = 0; i < 3; i++) {
if (i == 0) {
@ -1058,6 +1069,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
ice_app_sel_type = ICE_APP_SEL_TCPIP;
ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
for (j = 0; j < cmp_dcbcfg->numapps; j++) {
u16 prot_id = cmp_dcbcfg->app[j].prot_id;
u8 sel = cmp_dcbcfg->app[j].selector;
if (sel == ICE_APP_SEL_TCPIP &&
(prot_id == ICE_APP_PROT_ID_ISCSI ||
prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
ice_app_prot_id_type = prot_id;
break;
}
}
} else {
/* FIP APP */
ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@ -1148,11 +1171,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (ret == ICE_SUCCESS) {
/* CEE mode */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbx_cfg->tlv_status = LE32_TO_CPU(cee_cfg.tlv_status);
ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "0.26.16-k";
const char ice_driver_version[] = "0.28.1-k";
const uint8_t ice_major_version = 0;
const uint8_t ice_minor_version = 26;
const uint8_t ice_patch_version = 16;
const uint8_t ice_minor_version = 28;
const uint8_t ice_patch_version = 1;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 0.26.16-k")
PVID(vendor, devid, name " - 0.28.1-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.26.16-k")
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.28.1-k")
/**
* @var ice_vendor_info_array
@ -113,20 +113,11 @@ static pci_vendor_info_t ice_vendor_info_array[] = {
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0008, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x000D, 0,
"Intel(R) Ethernet Network Adapter E810-L-Q2 for OCP3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
"Intel(R) Ethernet Controller E810-C for QSFP"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0001, 0,
"Intel(R) Ethernet Network Adapter E810-L-1"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0002, 0,
"Intel(R) Ethernet Network Adapter E810-L-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0003, 0,
"Intel(R) Ethernet Network Adapter E810-L-1"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0004, 0,
"Intel(R) Ethernet Network Adapter E810-L-2"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
ICE_INTEL_VENDOR_ID, 0x0005, 0,
"Intel(R) Ethernet Network Adapter E810-XXV-4"),

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -1057,6 +1057,13 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
break;
}
if (!status) {
status = ice_set_vlan_mode(hw);
if (status)
ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
status);
}
ice_release_global_cfg_lock(hw);
return status;
@ -1126,34 +1133,40 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
static enum ice_status
ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
struct ice_global_metadata_seg *meta_seg;
struct ice_generic_seg_hdr *seg_hdr;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (!pkg_hdr)
return ICE_ERR_PARAM;
meta_seg = (struct ice_global_metadata_seg *)
ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
if (meta_seg) {
hw->pkg_ver = meta_seg->pkg_ver;
ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
seg_hdr = (struct ice_generic_seg_hdr *)
ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
struct ice_meta_sect *meta;
struct ice_pkg_enum state;
ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
/* Get package information from the Metadata Section */
meta = (struct ice_meta_sect *)
ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
ICE_SID_METADATA);
if (!meta) {
ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
return ICE_ERR_CFG;
}
hw->pkg_ver = meta->ver;
ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name),
ICE_NONDMA_TO_NONDMA);
ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
meta_seg->pkg_name);
} else {
ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n");
return ICE_ERR_CFG;
}
meta->ver.major, meta->ver.minor, meta->ver.update,
meta->ver.draft, meta->name);
seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
if (seg_hdr) {
hw->ice_pkg_ver = seg_hdr->seg_format_ver;
ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id,
sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA);
ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
seg_hdr->seg_format_ver.major,
@ -1909,7 +1922,7 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
bld->reserved_section_table_entries += count;
data_end = LE16_TO_CPU(buf->data_end) +
(count * sizeof(buf->section_entry[0]));
FLEX_ARRAY_SIZE(buf, section_entry, count);
buf->data_end = CPU_TO_LE16(data_end);
return ICE_SUCCESS;
@ -2041,7 +2054,7 @@ ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
bld->reserved_section_table_entries -= count;
data_end = LE16_TO_CPU(buf->data_end) -
(count * sizeof(buf->section_entry[0]));
FLEX_ARRAY_SIZE(buf, section_entry, count);
buf->data_end = CPU_TO_LE16(data_end);
return ICE_SUCCESS;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -142,6 +142,7 @@ struct ice_buf_hdr {
(ent_sz))
/* ice package section IDs */
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
@ -308,6 +309,13 @@ enum ice_sect {
#define ICE_PTYPE_IPV6_SCTP_PAY 93
#define ICE_PTYPE_IPV6_ICMP_PAY 94
struct ice_meta_sect {
struct ice_pkg_ver ver;
#define ICE_META_SECT_NAME_SIZE 28
char name[ICE_META_SECT_NAME_SIZE];
__le32 track_id;
};
/* Packet Type Groups (PTG) - Inner Most fields (IM) */
#define ICE_PTG_IM_IPV4_TCP 16
#define ICE_PTG_IM_IPV4_UDP 17
@ -473,6 +481,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GTP,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -240,7 +240,7 @@ static const u32 ice_ptypes_ipv6_il[] = {
};
/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
static const u32 ice_ipv4_ofos_no_l4[] = {
static const u32 ice_ptypes_ipv4_ofos_no_l4[] = {
0x10C00000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -252,7 +252,7 @@ static const u32 ice_ipv4_ofos_no_l4[] = {
};
/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
static const u32 ice_ipv4_il_no_l4[] = {
static const u32 ice_ptypes_ipv4_il_no_l4[] = {
0x60000000, 0x18043008, 0x80000002, 0x6010c021,
0x00000008, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -264,7 +264,7 @@ static const u32 ice_ipv4_il_no_l4[] = {
};
/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
static const u32 ice_ipv6_ofos_no_l4[] = {
static const u32 ice_ptypes_ipv6_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x43000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -276,7 +276,7 @@ static const u32 ice_ipv6_ofos_no_l4[] = {
};
/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
static const u32 ice_ipv6_il_no_l4[] = {
static const u32 ice_ptypes_ipv6_il_no_l4[] = {
0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
0x00000430, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@ -493,8 +493,8 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
(const ice_bitmap_t *)ice_ipv4_il_no_l4;
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 :
(const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
@ -504,8 +504,8 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
(const ice_bitmap_t *)ice_ipv6_il_no_l4;
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 :
(const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
@ -596,7 +596,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
else
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
break;
case ICE_FLOW_FIELD_IDX_IPV6_TTL:
@ -609,7 +609,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
else
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
break;
case ICE_FLOW_FIELD_IDX_IPV4_SA:
@ -1269,36 +1269,48 @@ ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
/**
* ice_flow_set_rss_seg_info - setup packet segments for RSS
* @segs: pointer to the flow field segment(s)
* @hash_fields: fields to be hashed on for the segment(s)
* @flow_hdr: protocol header fields within a packet segment
* @seg_cnt: segment count
* @cfg: configure parameters
*
* Helper function to extract fields from hash bitmap and use flow
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
static enum ice_status
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_flow_seg_info *seg;
u64 val;
u8 i;
ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
/* set inner most segment */
seg = &segs[seg_cnt - 1];
ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds,
ICE_FLOW_FIELD_IDX_MAX)
ice_flow_set_fld(segs, (enum ice_flow_field)i,
ice_flow_set_fld(seg, (enum ice_flow_field)i,
ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
ICE_FLOW_SET_HDRS(segs, flow_hdr);
ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
/* set outer most header */
if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6)
segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER;
if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
return ICE_ERR_PARAM;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !ice_is_pow2(val))
return ICE_ERR_CFG;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !ice_is_pow2(val))
return ICE_ERR_CFG;
@ -1370,6 +1382,29 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
return status;
}
/**
* ice_get_rss_hdr_type - get a RSS profile's header type
* @prof: RSS flow profile
*/
static enum ice_rss_cfg_hdr_type
ice_get_rss_hdr_type(struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS;
if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) {
hdr_type = ICE_RSS_OUTER_HEADERS;
} else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) {
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE)
hdr_type = ICE_RSS_INNER_HEADERS;
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4)
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6)
hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
}
return hdr_type;
}
/**
* ice_rem_rss_list - remove RSS configuration from list
* @hw: pointer to the hardware structure
@ -1381,16 +1416,19 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
static void
ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *tmp;
/* Search for RSS hash fields associated to the VSI that match the
* hash configurations associated to the flow profile. If found
* remove from the RSS entry list of the VSI context and delete entry.
*/
hdr_type = ice_get_rss_hdr_type(prof);
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
ice_rss_cfg, l_entry)
if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
r->hash.hdr_type == hdr_type) {
ice_clear_bit(vsi_handle, r->vsis);
if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
LIST_DEL(&r->l_entry);
@ -1411,12 +1449,15 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
static enum ice_status
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
struct ice_rss_cfg *r, *rss_cfg;
hdr_type = ice_get_rss_hdr_type(prof);
LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
ice_rss_cfg, l_entry)
if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match &&
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
r->hash.hdr_type == hdr_type) {
ice_set_bit(vsi_handle, r->vsis);
return ICE_SUCCESS;
}
@ -1425,8 +1466,10 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
if (!rss_cfg)
return ICE_ERR_NO_MEMORY;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs;
rss_cfg->hash.hdr_type = hdr_type;
rss_cfg->hash.symm = prof->cfg.symm;
ice_set_bit(vsi_handle, rss_cfg->vsis);
LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
@ -1438,52 +1481,54 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
#define ICE_FLOW_PROF_HDR_S 32
#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
#define ICE_FLOW_PROF_ENCAP_S 63
#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
#define ICE_RSS_OUTER_HEADERS 1
#define ICE_RSS_INNER_HEADERS 2
#define ICE_FLOW_PROF_ENCAP_S 62
#define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S)
/* Flow profile ID format:
* [0:31] - Packet match fields
* [32:62] - Protocol header
* [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
* [32:61] - Protocol header
* [62:63] - Encapsulation flag:
* 0 if non-tunneled
* 1 if tunneled
* 2 for tunneled with outer ipv4
* 3 for tunneled with outer ipv6
*/
#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
#define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \
(u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
(((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
(((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))
/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
* @addl_hdrs: protocol header fields
* @segs_cnt: packet segment count
* @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static enum ice_status
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
enum ice_status status;
u8 segs_cnt;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
if (cfg->symm)
return ICE_ERR_PARAM;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
sizeof(*segs));
if (!segs)
return ICE_ERR_NO_MEMORY;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
addl_hdrs);
status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto exit;
@ -1537,9 +1582,9 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* segment information.
*/
status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
ICE_FLOW_GEN_PROFID(hashed_flds,
ICE_FLOW_GEN_PROFID(cfg->hash_flds,
segs[segs_cnt - 1].hdrs,
segs_cnt),
cfg->hdr_type),
segs, segs_cnt, NULL, 0, &prof);
if (status)
goto exit;
@ -1555,6 +1600,8 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
status = ice_add_rss_list(hw, vsi_handle, prof);
prof->cfg.symm = cfg->symm;
exit:
ice_free(hw, segs);
return status;
@ -1564,30 +1611,40 @@ ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_add_rss_cfg - add an RSS configuration with specified hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
* @addl_hdrs: protocol header fields
* @cfg: configure parameters
*
* This function will generate a flow profile based on fields associated with
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
enum ice_status status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
if (!ice_is_vsi_valid(hw, vsi_handle) ||
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return ICE_ERR_PARAM;
ice_acquire_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
ice_release_lock(&hw->rss_locks);
local_cfg = *cfg;
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
ice_acquire_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
ice_release_lock(&hw->rss_locks);
} else {
ice_acquire_lock(&hw->rss_locks);
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg);
if (!status) {
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
status = ice_add_rss_cfg_sync(hw, vsi_handle,
&local_cfg);
}
ice_release_lock(&hw->rss_locks);
}
return status;
}
@ -1596,29 +1653,29 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_rem_rss_cfg_sync - remove an existing RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
* @addl_hdrs: Protocol header fields within a packet segment
* @segs_cnt: packet segment count
* @cfg: configure parameters
*
* Assumption: lock has already been acquired for RSS list
*/
static enum ice_status
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
enum ice_status status;
u8 segs_cnt;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX;
segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
sizeof(*segs));
if (!segs)
return ICE_ERR_NO_MEMORY;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
addl_hdrs);
status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg);
if (status)
goto out;
@ -1651,8 +1708,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
* @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
* @addl_hdrs: Protocol header fields within a packet segment
* @cfg: configure parameters
*
* This function will lookup the flow profile based on the input
* hash field bitmap, iterate through the profile entry list of
@ -1661,21 +1717,31 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* turn build or update buffers for RSS XLT1 section.
*/
enum ice_status
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
enum ice_status status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
if (!ice_is_vsi_valid(hw, vsi_handle) ||
!cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
cfg->hash_flds == ICE_HASH_INVALID)
return ICE_ERR_PARAM;
ice_acquire_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
ICE_RSS_OUTER_HEADERS);
if (!status)
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
addl_hdrs, ICE_RSS_INNER_HEADERS);
local_cfg = *cfg;
if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) {
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
} else {
local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg);
if (!status) {
local_cfg.hdr_type = ICE_RSS_INNER_HEADERS;
status = ice_rem_rss_cfg_sync(hw, vsi_handle,
&local_cfg);
}
}
ice_release_lock(&hw->rss_locks);
return status;
@ -1727,6 +1793,7 @@ enum ice_status
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
{
enum ice_status status = ICE_SUCCESS;
struct ice_rss_hash_cfg hcfg;
u64 hash_flds;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
@ -1798,8 +1865,11 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
if (rss_hash == ICE_HASH_INVALID)
return ICE_ERR_OUT_OF_RANGE;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE);
hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
hcfg.hash_flds = rss_hash;
hcfg.symm = false;
hcfg.hdr_type = ICE_RSS_ANY_HEADERS;
status = ice_add_rss_cfg(hw, vsi_handle, &hcfg);
if (status)
break;
}
@ -1824,16 +1894,7 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
ice_rss_cfg, l_entry) {
if (ice_is_bit_set(r->vsis, vsi_handle)) {
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
r->packet_hdr,
ICE_RSS_OUTER_HEADERS);
if (status)
break;
status = ice_add_rss_cfg_sync(hw, vsi_handle,
r->hashed_flds,
r->packet_hdr,
ICE_RSS_INNER_HEADERS);
status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash);
if (status)
break;
}
@ -1865,8 +1926,8 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
ice_rss_cfg, l_entry)
if (ice_is_bit_set(r->vsis, vsi_handle) &&
r->packet_hdr == hdrs) {
rss_hash = r->hashed_flds;
r->hash.addl_hdrs == hdrs) {
rss_hash = r->hash.hash_flds;
break;
}
ice_release_lock(&hw->rss_locks);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -185,17 +185,22 @@ enum ice_flow_avf_hdr_field {
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
enum ice_rss_hash_func {
ICE_RSS_HASH_TOEPLITZ = 0,
ICE_RSS_HASH_TOEPLITZ_SYMMETRIC = 1,
ICE_RSS_HASH_XOR = 2,
ICE_RSS_HASH_JHASH = 3,
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */
/* take inner headers as inputset for packet with outer ipv4. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV4,
/* take inner headers as inputset for packet with outer ipv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
ICE_RSS_ANY_HEADERS
};
struct ice_rss_hash_cfg {
u32 addl_hdrs;
u64 hash_flds;
enum ice_rss_hash_func hash_func;
u32 addl_hdrs; /* protocol header fields */
u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */
enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */
bool symm; /* symmetric or asymmetric hash */
};
enum ice_flow_dir {
@ -211,6 +216,7 @@ enum ice_flow_priority {
ICE_FLOW_PRIO_HIGH
};
#define ICE_FLOW_SEG_SINGLE 1
#define ICE_FLOW_SEG_MAX 2
#define ICE_FLOW_PROFILE_MAX 1024
#define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
@ -274,6 +280,7 @@ struct ice_flow_prof {
union {
/* struct sw_recipe */
bool symm; /* Symmetric Hash for RSS */
} cfg;
};
@ -281,8 +288,7 @@ struct ice_rss_cfg {
struct LIST_ENTRY_TYPE l_entry;
/* bitmap of VSIs added to the RSS entry */
ice_declare_bitmap(vsis, ICE_MAX_VSI);
u64 hashed_flds;
u32 packet_hdr;
struct ice_rss_hash_cfg hash;
};
enum ice_flow_action_type {
@ -333,10 +339,10 @@ enum ice_status
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
enum ice_status
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
#endif /* _ICE_FLOW_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -2206,9 +2206,9 @@ ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf)
static void
ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf)
{
struct ice_nvm_info *nvm = &hw->nvm;
struct ice_orom_info *orom = &nvm->orom;
struct ice_netlist_ver_info *netlist_ver = &hw->netlist_ver;
struct ice_nvm_info *nvm = &hw->flash.nvm;
struct ice_orom_info *orom = &hw->flash.orom;
struct ice_netlist_info *netlist = &hw->flash.netlist;
/* Note that the netlist versions are stored in packed Binary Coded
* Decimal format. The use of '%x' will correctly display these as
@ -2220,10 +2220,10 @@ ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf)
"fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
hw->api_maj_ver, hw->api_min_ver,
nvm->major_ver, nvm->minor_ver, nvm->eetrack,
netlist_ver->major, netlist_ver->minor,
netlist_ver->type >> 16, netlist_ver->type & 0xFFFF,
netlist_ver->rev, netlist_ver->cust_ver, netlist_ver->hash,
nvm->major, nvm->minor, nvm->eetrack,
netlist->major, netlist->minor,
netlist->type >> 16, netlist->type & 0xFFFF,
netlist->rev, netlist->cust_ver, netlist->hash,
orom->major, orom->build, orom->patch);
}
@ -5892,6 +5892,7 @@ ice_set_rss_flow_flds(struct ice_vsi *vsi)
{
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
struct ice_rss_hash_cfg rss_cfg = { 0, 0, ICE_RSS_ANY_HEADERS, false };
device_t dev = sc->dev;
enum ice_status status;
u_int rss_hash_config;
@ -5899,48 +5900,54 @@ ice_set_rss_flow_flds(struct ice_vsi *vsi)
rss_hash_config = rss_gethashconfig();
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) {
status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4;
rss_cfg.hash_flds = ICE_FLOW_HASH_IPV4;
status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
if (status)
device_printf(dev,
"ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n",
vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
}
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) {
status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP;
rss_cfg.hash_flds = ICE_HASH_TCP_IPV4;
status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
if (status)
device_printf(dev,
"ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n",
vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
}
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) {
status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP;
rss_cfg.hash_flds = ICE_HASH_UDP_IPV4;
status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
if (status)
device_printf(dev,
"ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n",
vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
}
if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) {
status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6;
rss_cfg.hash_flds = ICE_FLOW_HASH_IPV6;
status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
if (status)
device_printf(dev,
"ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n",
vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
}
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) {
status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP;
rss_cfg.hash_flds = ICE_HASH_TCP_IPV6;
status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
if (status)
device_printf(dev,
"ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n",
vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
}
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) {
status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP;
rss_cfg.hash_flds = ICE_HASH_UDP_IPV6;
status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg);
if (status)
device_printf(dev,
"ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n",
@ -5972,6 +5979,7 @@ ice_set_rss_lut(struct ice_vsi *vsi)
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct ice_aq_get_set_rss_lut_params lut_params;
enum ice_status status;
int i, err = 0;
u8 *lut;
@ -5992,8 +6000,12 @@ ice_set_rss_lut(struct ice_vsi *vsi)
lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues;
}
status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
lut, vsi->rss_table_size);
lut_params.vsi_handle = vsi->idx;
lut_params.lut_size = vsi->rss_table_size;
lut_params.lut_type = vsi->rss_lut_type;
lut_params.lut = lut;
lut_params.global_lut_id = 0;
status = ice_aq_set_rss_lut(hw, &lut_params);
if (status) {
device_printf(dev,
"Cannot set RSS lut, err %s aq_err %s\n",

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -54,6 +54,16 @@
#define ICE_NVM_REG_RW_MODULE 0x0
#define ICE_NVM_REG_RW_FLAGS 0x1
#pragma pack(1)
struct ice_orom_civd_info {
u8 signature[4]; /* Must match ASCII '$CIV' characters */
u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
__le32 combo_ver; /* Combo Image Version number */
u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
};
#pragma pack()
#define ICE_NVM_ACCESS_MAJOR_VER 0
#define ICE_NVM_ACCESS_MINOR_VER 5
@ -126,6 +136,16 @@ enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
enum ice_status
ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
enum ice_status
ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
enum ice_status
ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
enum ice_status
ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
enum ice_status
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
@ -147,4 +167,6 @@ ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw);
enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
#endif /* _ICE_NVM_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -72,6 +72,7 @@ enum ice_protocol_type {
ICE_GENEVE,
ICE_VXLAN_GPE,
ICE_NVGRE,
ICE_GTP,
ICE_PROTOCOL_LAST
};
@ -87,6 +88,14 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
* and GENEVE
*/
ICE_SW_TUN_IPV4_GTP_IPV4_TCP,
ICE_SW_TUN_IPV4_GTP_IPV4_UDP,
ICE_SW_TUN_IPV4_GTP_IPV6_TCP,
ICE_SW_TUN_IPV4_GTP_IPV6_UDP,
ICE_SW_TUN_IPV6_GTP_IPV4_TCP,
ICE_SW_TUN_IPV6_GTP_IPV4_UDP,
ICE_SW_TUN_IPV6_GTP_IPV6_TCP,
ICE_SW_TUN_IPV6_GTP_IPV6_UDP,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@ -143,6 +152,7 @@ enum ice_prot_id {
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
#define ICE_IPV4_IL_HW 33
@ -248,6 +258,20 @@ struct ice_udp_tnl_hdr {
__be32 vni; /* only use lower 24-bits */
};
struct ice_udp_gtp_hdr {
u8 flags;
u8 msg_type;
__be16 rsrvd_len;
__be32 teid;
__be16 rsrvd_seq_nbr;
u8 rsrvd_n_pdu_nbr;
u8 rsrvd_next_ext;
u8 rsvrd_ext_len;
u8 pdu_type;
u8 qfi;
u8 rsvrd;
};
struct ice_nvgre {
__be16 flags;
__be16 protocol;
@ -264,6 +288,7 @@ union ice_prot_hdr {
struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
};
/* This is mapping table entry that maps every word within a given protocol

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -755,15 +755,15 @@ ice_sched_del_rl_profile(struct ice_hw *hw,
static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
{
u16 ln;
struct ice_hw *hw = pi->hw;
for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
struct ice_aqc_rl_profile_info *rl_prof_elem;
struct ice_aqc_rl_profile_info *rl_prof_tmp;
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
&pi->rl_prof_list[ln],
&hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
struct ice_hw *hw = pi->hw;
enum ice_status status;
rl_prof_elem->prof_id_ref = 0;
@ -1288,7 +1288,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
pi->port_state = ICE_SCHED_PORT_STATE_READY;
ice_init_lock(&pi->sched_lock);
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
INIT_LIST_HEAD(&pi->rl_prof_list[i]);
INIT_LIST_HEAD(&hw->rl_prof_list[i]);
err_init_port:
if (status && pi->root) {
@ -2277,9 +2277,9 @@ static enum ice_status
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
enum ice_status status = ICE_SUCCESS;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *node;
enum ice_status status = ICE_SUCCESS;
u16 i, grps_movd = 0;
struct ice_hw *hw;
u16 buf_len;
@ -2690,10 +2690,9 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
/* Create new entry for new aggregator ID */
agg_info = (struct ice_sched_agg_info *)
ice_malloc(hw, sizeof(*agg_info));
if (!agg_info) {
status = ICE_ERR_NO_MEMORY;
goto exit_reg_agg;
}
if (!agg_info)
return ICE_ERR_NO_MEMORY;
agg_info->agg_id = agg_id;
agg_info->agg_type = agg_type;
agg_info->tc_bitmap[0] = 0;
@ -2726,7 +2725,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
/* Save aggregator node's TC information */
ice_set_bit(tc, agg_info->tc_bitmap);
}
exit_reg_agg:
return status;
}
@ -2880,40 +2879,31 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
if (status)
break;
if (agg_id != ICE_DFLT_AGG_ID)
ice_set_bit(tc, agg_vsi_info->tc_bitmap);
else
ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
}
/* If VSI moved back to default aggregator, delete agg_vsi_info. */
if (!ice_is_any_bit_set(agg_vsi_info->tc_bitmap,
ICE_MAX_TRAFFIC_CLASS)) {
LIST_DEL(&agg_vsi_info->list_entry);
ice_free(hw, agg_vsi_info);
ice_set_bit(tc, agg_vsi_info->tc_bitmap);
}
return status;
}
/**
* ice_sched_rm_unused_rl_prof - remove unused RL profile
* @pi: port information structure
* @hw: pointer to the hardware structure
*
* This function removes unused rate limit profiles from the HW and
* SW DB. The caller needs to hold scheduler lock.
*/
static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
{
u16 ln;
for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
struct ice_aqc_rl_profile_info *rl_prof_elem;
struct ice_aqc_rl_profile_info *rl_prof_tmp;
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
&pi->rl_prof_list[ln],
&hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n");
if (!ice_sched_del_rl_profile(hw, rl_prof_elem))
ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n");
}
}
}
@ -3059,7 +3049,7 @@ enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
ice_free(pi->hw, agg_info);
/* Remove unused RL profile IDs from HW and SW DB */
ice_sched_rm_unused_rl_prof(pi);
ice_sched_rm_unused_rl_prof(pi->hw);
exit_ice_rm_agg_cfg:
ice_release_lock(&pi->sched_lock);
@ -3170,12 +3160,6 @@ static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
bw_t_info->eir_bw.bw = 0;
} else {
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element.
* First clear earlier saved shared BW information.
*/
ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
bw_t_info->shared_bw = 0;
/* save EIR BW information */
ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
bw_t_info->eir_bw.bw = bw;
@ -3195,12 +3179,6 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
bw_t_info->shared_bw = 0;
} else {
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element.
* First clear earlier saved EIR BW information.
*/
ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
bw_t_info->eir_bw.bw = 0;
/* save shared BW information */
ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
bw_t_info->shared_bw = bw;
@ -3473,15 +3451,19 @@ ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
* ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit
* @pi: port information structure
* @vsi_handle: software VSI handle
* @bw: bandwidth in Kbps
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* This function Configures shared rate limiter(SRL) of all VSI type nodes
* across all traffic classes for VSI matching handle.
* Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
* classes for VSI matching handle.
*/
enum ice_status
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw)
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
u32 max_bw, u32 shared_bw)
{
return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, bw);
return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw,
shared_bw);
}
/**
@ -3496,6 +3478,8 @@ enum ice_status
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
ICE_SCHED_DFLT_BW,
ICE_SCHED_DFLT_BW,
ICE_SCHED_DFLT_BW);
}
@ -3503,15 +3487,19 @@ ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
* ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit
* @pi: port information structure
* @agg_id: aggregator ID
* @bw: bandwidth in Kbps
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
enum ice_status
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw)
{
return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, bw);
return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw,
shared_bw);
}
/**
@ -3525,7 +3513,47 @@ ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
enum ice_status
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
{
return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW);
return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW,
ICE_SCHED_DFLT_BW,
ICE_SCHED_DFLT_BW);
}
/**
* ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc
* @pi: port information structure
* @agg_id: aggregator ID
* @tc: traffic class
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
enum ice_status
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw,
max_bw, shared_bw);
}
/**
* ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc
* @pi: port information structure
* @agg_id: aggregator ID
* @tc: traffic class
*
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
enum ice_status
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc,
ICE_SCHED_DFLT_BW,
ICE_SCHED_DFLT_BW,
ICE_SCHED_DFLT_BW);
}
/**
@ -3880,7 +3908,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
/**
* ice_sched_add_rl_profile - add RL profile
* @pi: port information structure
* @hw: pointer to the hardware structure
* @rl_type: type of rate limit BW - min, max, or shared
* @bw: bandwidth in Kbps - Kilo bits per sec
* @layer_num: specifies in which layer to create profile
@ -3892,14 +3920,13 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
* The caller needs to hold the scheduler lock.
*/
static struct ice_aqc_rl_profile_info *
ice_sched_add_rl_profile(struct ice_port_info *pi,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
@ -3918,10 +3945,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
return NULL;
}
if (!pi)
if (!hw)
return NULL;
hw = pi->hw;
LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type && rl_prof_elem->bw == bw)
@ -3954,7 +3980,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
/* Good entry - add in the list */
rl_prof_elem->prof_id_ref = 0;
LIST_ADD(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]);
return rl_prof_elem;
exit_add_rl_prof:
@ -3986,37 +4012,10 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
break;
case ICE_MAX_BW:
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element
*/
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
return ICE_ERR_CFG;
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
break;
case ICE_SHARED_BW:
/* Check for removing shared BW */
if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
/* remove shared profile */
data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
data->srl_id = 0; /* clear SRL field */
/* enable back EIR to default profile */
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_profile_idx =
CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
break;
}
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element
*/
if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
(LE16_TO_CPU(data->eir_bw.bw_profile_idx) !=
ICE_SCHED_DFLT_RL_PROF_ID))
return ICE_ERR_CFG;
/* EIR BW is set to default, disable it */
data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
/* Okay to enable shared BW now */
data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
data->srl_id = CPU_TO_LE16(rl_prof_id);
break;
@ -4133,7 +4132,7 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
/**
* ice_sched_rm_rl_profile - remove RL profile ID
* @pi: port information structure
* @hw: pointer to the hardware structure
* @layer_num: layer number where profiles are saved
* @profile_type: profile type like EIR, CIR, or SRL
* @profile_id: profile ID to remove
@ -4143,7 +4142,7 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
* scheduler lock.
*/
static enum ice_status
ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
@ -4152,7 +4151,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return ICE_ERR_PARAM;
/* Check the existing list for RL profile */
LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type &&
@ -4162,9 +4161,9 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
rl_prof_elem->prof_id_ref--;
/* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
if (status && status != ICE_ERR_IN_USE)
ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
if (status == ICE_ERR_IN_USE)
@ -4224,52 +4223,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
old_id == ICE_SCHED_INVAL_PROF_ID)
return ICE_SUCCESS;
return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
}
/**
* ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
* @pi: port information structure
* @node: pointer to node structure
* @layer_num: layer number where rate limit profiles are saved
* @rl_type: rate limit type min, max, or shared
* @bw: bandwidth value
*
* This function prepares node element's bandwidth to SRL or EIR exclusively.
* EIR BW and Shared BW profiles are mutually exclusive and hence only one of
* them may be set for any given element. This function needs to be called
* with the scheduler lock held.
*/
static enum ice_status
ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
struct ice_sched_node *node,
u8 layer_num, enum ice_rl_type rl_type, u32 bw)
{
if (rl_type == ICE_SHARED_BW) {
/* SRL node passed in this case, it may be different node */
if (bw == ICE_SCHED_DFLT_BW)
/* SRL being removed, ice_sched_cfg_node_bw_lmt()
* enables EIR to default. EIR is not set in this
* case, so no additional action is required.
*/
return ICE_SUCCESS;
/* SRL being configured, set EIR to default here.
* ice_sched_cfg_node_bw_lmt() disables EIR when it
* configures SRL
*/
return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
layer_num);
} else if (rl_type == ICE_MAX_BW &&
node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
/* Remove Shared profile. Set default shared BW call
* removes shared profile for a node.
*/
return ice_sched_set_node_bw_dflt(pi, node,
ICE_SHARED_BW,
layer_num);
}
return ICE_SUCCESS;
return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
}
/**
@ -4293,7 +4247,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
if (!rl_prof_info)
return status;
@ -4315,7 +4269,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
return ICE_SUCCESS;
return ice_sched_rm_rl_profile(pi, layer_num,
return ice_sched_rm_rl_profile(hw, layer_num,
rl_prof_info->profile.flags &
ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
@ -4329,14 +4283,14 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
*
* It updates node's BW limit parameters like BW RL profile ID of type CIR,
* EIR, or SRL. The caller needs to hold scheduler lock.
*
* NOTE: Caller provides the correct SRL node in case of shared profile
* settings.
*/
static enum ice_status
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
struct ice_sched_node *cfg_node = node;
enum ice_status status;
struct ice_hw *hw;
u8 layer_num;
@ -4344,29 +4298,16 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
return ICE_ERR_PARAM;
hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */
ice_sched_rm_unused_rl_prof(pi);
ice_sched_rm_unused_rl_prof(hw);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
if (rl_type == ICE_SHARED_BW) {
/* SRL node may be different */
cfg_node = ice_sched_get_srl_node(node, layer_num);
if (!cfg_node)
return ICE_ERR_CFG;
}
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element
*/
status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
bw);
if (status)
return status;
if (bw == ICE_SCHED_DFLT_BW)
return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
layer_num);
return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num);
return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num);
}
/**
@ -4926,19 +4867,108 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
return ICE_SUCCESS;
}
/**
* ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: traffic class
* @srl_node: sched node to configure
* @rl_type: rate limit type minimum, maximum, or shared
* @bw: minimum, maximum, or shared bandwidth in Kbps
*
* Configure shared rate limiter(SRL) of VSI type nodes across given traffic
* class, and saves those value for later use for replaying purposes. The
* caller holds the scheduler lock.
*/
static enum ice_status
ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, struct ice_sched_node *srl_node,
enum ice_rl_type rl_type, u32 bw)
{
enum ice_status status;
if (bw == ICE_SCHED_DFLT_BW) {
status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
} else {
status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
if (status)
return status;
status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
}
return status;
}
/**
* ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: traffic class
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* Configure shared rate limiter(SRL) of VSI type nodes across requested
* traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW
* is passed, it removes the corresponding bw from the node. The caller
* holds scheduler lock.
*/
static enum ice_status
ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
{
struct ice_sched_node *tc_node, *vsi_node, *cfg_node;
enum ice_status status;
u8 layer_num;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_CFG;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW,
vsi_node->tx_sched_layer);
if (layer_num >= pi->hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
/* SRL node may be different */
cfg_node = ice_sched_get_srl_node(vsi_node, layer_num);
if (!cfg_node)
return ICE_ERR_CFG;
status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
cfg_node, ICE_MIN_BW,
min_bw);
if (status)
return status;
status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc,
cfg_node, ICE_MAX_BW,
max_bw);
if (status)
return status;
return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node,
ICE_SHARED_BW, shared_bw);
}
/**
* ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit
* @pi: port information structure
* @vsi_handle: software VSI handle
* @bw: bandwidth in Kbps
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* This function Configures shared rate limiter(SRL) of all VSI type nodes
* across all traffic classes for VSI matching handle. When BW value of
* ICE_SCHED_DFLT_BW is passed, it removes the SRL from the node.
* Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
* classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is
* passed, it removes those value(s) from the node.
*/
enum ice_status
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
u32 bw)
u32 min_bw, u32 max_bw, u32 shared_bw)
{
enum ice_status status = ICE_SUCCESS;
u8 tc;
@ -4956,7 +4986,6 @@ ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
/* Return success if no nodes are present across TC */
ice_for_each_traffic_class(tc) {
struct ice_sched_node *tc_node, *vsi_node;
enum ice_rl_type rl_type = ICE_SHARED_BW;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@ -4966,16 +4995,9 @@ ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_node)
continue;
if (bw == ICE_SCHED_DFLT_BW)
/* It removes existing SRL from the node */
status = ice_sched_set_node_bw_dflt_lmt(pi, vsi_node,
rl_type);
else
status = ice_sched_set_node_bw_lmt(pi, vsi_node,
rl_type, bw);
if (status)
break;
status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc,
min_bw, max_bw,
shared_bw);
if (status)
break;
}
@ -5043,32 +5065,23 @@ ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
}
/**
* ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit
* ice_sched_validate_agg_id - Validate aggregator id
* @pi: port information structure
* @agg_id: aggregator ID
* @bw: bandwidth in Kbps
*
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id. When
* BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
* node(s).
* This function validates aggregator id. Caller holds the scheduler lock.
*/
enum ice_status
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
static enum ice_status
ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *tmp;
bool agg_id_present = false;
enum ice_status status = ICE_SUCCESS;
u8 tc;
enum ice_status status;
if (!pi)
return ICE_ERR_PARAM;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_validate_agg_srl_node(pi, agg_id);
if (status)
goto exit_agg_bw_shared_lmt;
return status;
LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list,
ice_sched_agg_info, list_entry)
@ -5077,14 +5090,129 @@ ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
break;
}
if (!agg_id_present) {
status = ICE_ERR_PARAM;
goto exit_agg_bw_shared_lmt;
if (!agg_id_present)
return ICE_ERR_PARAM;
return ICE_SUCCESS;
}
/**
* ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values
* @pi: port information structure
* @agg_id: aggregator ID
* @tc: traffic class
* @srl_node: sched node to configure
* @rl_type: rate limit type minimum, maximum, or shared
* @bw: minimum, maximum, or shared bandwidth in Kbps
*
* Configure shared rate limiter(SRL) of aggregator type nodes across
* requested traffic class, and saves those value for later use for
* replaying purposes. The caller holds the scheduler lock.
*/
static enum ice_status
ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
struct ice_sched_node *srl_node,
enum ice_rl_type rl_type, u32 bw)
{
enum ice_status status;
if (bw == ICE_SCHED_DFLT_BW) {
status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
} else {
status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw);
if (status)
return status;
status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
}
return status;
}
/**
* ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc
* @pi: port information structure
* @agg_id: aggregator ID
* @tc: traffic class
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* This function configures the shared rate limiter(SRL) of aggregator type
* node for a given traffic class for aggregator matching agg_id. When BW
* value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller
* holds the scheduler lock.
*/
static enum ice_status
ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
{
struct ice_sched_node *tc_node, *agg_node, *cfg_node;
enum ice_rl_type rl_type = ICE_SHARED_BW;
enum ice_status status = ICE_ERR_CFG;
u8 layer_num;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_CFG;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
return ICE_ERR_CFG;
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
agg_node->tx_sched_layer);
if (layer_num >= pi->hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
/* SRL node may be different */
cfg_node = ice_sched_get_srl_node(agg_node, layer_num);
if (!cfg_node)
return ICE_ERR_CFG;
status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
ICE_MIN_BW, min_bw);
if (status)
return status;
status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
ICE_MAX_BW, max_bw);
if (status)
return status;
status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node,
ICE_SHARED_BW, shared_bw);
return status;
}
/**
* ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit
* @pi: port information structure
* @agg_id: aggregator ID
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id. When
* BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
* node(s).
*/
enum ice_status
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
enum ice_status status;
u8 tc;
if (!pi)
return ICE_ERR_PARAM;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_validate_agg_id(pi, agg_id);
if (status)
goto exit_agg_bw_shared_lmt;
/* Return success if no nodes are present across TC */
ice_for_each_traffic_class(tc) {
enum ice_rl_type rl_type = ICE_SHARED_BW;
struct ice_sched_node *tc_node, *agg_node;
tc_node = ice_sched_get_tc_node(pi, tc);
@ -5095,16 +5223,9 @@ ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
if (!agg_node)
continue;
if (bw == ICE_SCHED_DFLT_BW)
/* It removes existing SRL from the node */
status = ice_sched_set_node_bw_dflt_lmt(pi, agg_node,
rl_type);
else
status = ice_sched_set_node_bw_lmt(pi, agg_node,
rl_type, bw);
if (status)
break;
status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc,
min_bw, max_bw,
shared_bw);
if (status)
break;
}
@ -5114,6 +5235,41 @@ ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
return status;
}
/**
* ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc
* @pi: port information structure
* @agg_id: aggregator ID
* @tc: traffic class
* @min_bw: minimum bandwidth in Kbps
* @max_bw: maximum bandwidth in Kbps
* @shared_bw: shared bandwidth in Kbps
*
* This function configures the shared rate limiter(SRL) of aggregator type
* node for a given traffic class for aggregator matching agg_id. When BW
* value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node.
*/
enum ice_status
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw,
u32 shared_bw)
{
enum ice_status status;
if (!pi)
return ICE_ERR_PARAM;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_validate_agg_id(pi, agg_id);
if (status)
goto exit_agg_bw_shared_lmt_per_tc;
status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw,
max_bw, shared_bw);
exit_agg_bw_shared_lmt_per_tc:
ice_release_lock(&pi->sched_lock);
return status;
}
/**
* ice_sched_cfg_sibl_node_prio - configure node sibling priority
* @pi: port information structure

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -181,14 +181,22 @@ enum ice_status
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type);
enum ice_status
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw);
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
u32 max_bw, u32 shared_bw);
enum ice_status
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle);
enum ice_status
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw);
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw);
enum ice_status
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id);
enum ice_status
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
u32 min_bw, u32 max_bw, u32 shared_bw);
enum ice_status
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc);
enum ice_status
ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
u8 *q_prio);
enum ice_status
@ -212,9 +220,14 @@ ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
u32 bw);
u32 min_bw, u32 max_bw, u32 shared_bw);
enum ice_status
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw);
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw);
enum ice_status
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw,
u32 shared_bw);
enum ice_status
ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
struct ice_sched_node *node, u8 priority);
@ -222,4 +235,12 @@ enum ice_status
ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,7 +31,6 @@
/*$FreeBSD$*/
#include "ice_common.h"
#include "ice_adminq_cmd.h"
#include "ice_sriov.h"
/**
@ -191,3 +190,407 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
return speed;
}
/* The mailbox overflow detection algorithm helps to check if there
* is a possibility of a malicious VF transmitting too many MBX messages to the
* PF.
* 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
* driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
* The struct ice_mbx_snapshot helps to track and traverse a static window of
* messages within the mailbox queue while looking for a malicious VF.
*
* 2. When the caller starts processing its mailbox queue in response to an
* interrupt, the structure ice_mbx_snapshot is expected to be cleared before
* the algorithm can be run for the first time for that interrupt. This can be
* done via ice_mbx_reset_snapshot().
*
* 3. For every message read by the caller from the MBX Queue, the caller must
* call the detection algorithm's entry function ice_mbx_vf_state_handler().
* Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
* filled as it is required to be passed to the algorithm.
*
* 4. Every time a message is read from the MBX queue, a VFId is received which
* is passed to the state handler. The boolean output is_malvf of the state
* handler ice_mbx_vf_state_handler() serves as an indicator to the caller
* whether this VF is malicious or not.
*
* 5. When a VF is identified to be malicious, the caller can send a message
* to the system administrator. The caller can invoke ice_mbx_report_malvf()
* to help determine if a malicious VF is to be reported or not. This function
* requires the caller to maintain a global bitmap to track all malicious VFs
* and pass that to ice_mbx_report_malvf() along with the VFID which was identified
* to be malicious by ice_mbx_vf_state_handler().
*
* 6. The global bitmap maintained by PF can be cleared completely if PF is in
* reset or the bit corresponding to a VF can be cleared if that VF is in reset.
* When a VF is shut down and brought back up, we assume that the new VF
* brought up is not malicious and hence report it if found malicious.
*
* 7. The function ice_mbx_reset_snapshot() is called to reset the information
* in ice_mbx_snapshot for every new mailbox interrupt handled.
*
* 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
* when driver is unloaded.
*/
#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
* the max messages check must be ignored in the algorithm
*/
#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
/**
* ice_mbx_traverse - Pass through mailbox snapshot
* @hw: pointer to the HW struct
* @new_state: new algorithm state
*
* Traversing the mailbox static snapshot without checking
* for malicious VFs.
*/
static void
ice_mbx_traverse(struct ice_hw *hw,
enum ice_mbx_snapshot_state *new_state)
{
struct ice_mbx_snap_buffer_data *snap_buf;
u32 num_iterations;
snap_buf = &hw->mbx_snapshot.mbx_buf;
/* As mailbox buffer is circular, applying a mask
* on the incremented iteration count.
*/
num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
/* Checking either of the below conditions to exit snapshot traversal:
* Condition-1: If the number of iterations in the mailbox is equal to
* the mailbox head which would indicate that we have reached the end
* of the static snapshot.
* Condition-2: If the maximum messages serviced in the mailbox for a
* given interrupt is the highest possible value then there is no need
* to check if the number of messages processed is equal to it. If not
* check if the number of messages processed is greater than or equal
* to the maximum number of mailbox entries serviced in current work item.
*/
if (num_iterations == snap_buf->head ||
(snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
*new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
}
/**
* ice_mbx_detect_malvf - Detect malicious VF in snapshot
* @hw: pointer to the HW struct
* @vf_id: relative virtual function ID
* @new_state: new algorithm state
* @is_malvf: boolean output to indicate if VF is malicious
*
* This function tracks the number of asynchronous messages
* sent per VF and marks the VF as malicious if it exceeds
* the permissible number of messages to send.
*/
static enum ice_status
ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
enum ice_mbx_snapshot_state *new_state,
bool *is_malvf)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
if (vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE;
/* increment the message count in the VF array */
snap->mbx_vf.vf_cntr[vf_id]++;
if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
*is_malvf = true;
/* continue to iterate through the mailbox snapshot */
ice_mbx_traverse(hw, new_state);
return ICE_SUCCESS;
}
/**
* ice_mbx_reset_snapshot - Reset mailbox snapshot structure
* @snap: pointer to mailbox snapshot structure in the ice_hw struct
*
* Reset the mailbox snapshot structure and clear VF counter array.
*/
static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
{
u32 vfcntr_len;
if (!snap || !snap->mbx_vf.vf_cntr)
return;
/* Clear VF counters. */
vfcntr_len = snap->mbx_vf.vfcntr_len;
if (vfcntr_len)
ice_memset(snap->mbx_vf.vf_cntr, 0,
(vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)),
ICE_NONDMA_MEM);
/* Reset mailbox snapshot for a new capture. */
ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf),
ICE_NONDMA_MEM);
snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
}
/**
* ice_mbx_vf_state_handler - Handle states of the overflow algorithm
* @hw: pointer to the HW struct
* @mbx_data: pointer to structure containing mailbox data
* @vf_id: relative virtual function (VF) ID
* @is_malvf: boolean output to indicate if VF is malicious
*
* The function serves as an entry point for the malicious VF
* detection algorithm by handling the different states and state
* transitions of the algorithm:
* New snapshot: This state is entered when creating a new static
* snapshot. The data from any previous mailbox snapshot is
* cleared and a new capture of the mailbox head and tail is
* logged. This will be the new static snapshot to detect
* asynchronous messages sent by VFs. On capturing the snapshot
* and depending on whether the number of pending messages in that
* snapshot exceed the watermark value, the state machine enters
* traverse or detect states.
* Traverse: If pending message count is below watermark then iterate
* through the snapshot without any action on VF.
* Detect: If pending message count exceeds watermark traverse
* the static snapshot and look for a malicious VF.
*/
enum ice_status
ice_mbx_vf_state_handler(struct ice_hw *hw,
struct ice_mbx_data *mbx_data, u16 vf_id,
bool *is_malvf)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
struct ice_mbx_snap_buffer_data *snap_buf;
struct ice_ctl_q_info *cq = &hw->mailboxq;
enum ice_mbx_snapshot_state new_state;
enum ice_status status = ICE_SUCCESS;
if (!is_malvf || !mbx_data)
return ICE_ERR_BAD_PTR;
/* When entering the mailbox state machine assume that the VF
* is not malicious until detected.
*/
*is_malvf = false;
/* Checking if max messages allowed to be processed while servicing current
* interrupt is not less than the defined AVF message threshold.
*/
if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
return ICE_ERR_INVAL_SIZE;
/* The watermark value should not be lesser than the threshold limit
* set for the number of asynchronous messages a VF can send to mailbox
* nor should it be greater than the maximum number of messages in the
* mailbox serviced in current interrupt.
*/
if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
return ICE_ERR_PARAM;
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
snap_buf = &snap->mbx_buf;
switch (snap_buf->state) {
case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
/* Clear any previously held data in mailbox snapshot structure. */
ice_mbx_reset_snapshot(snap);
/* Collect the pending ARQ count, number of messages processed and
* the maximum number of messages allowed to be processed from the
* Mailbox for current interrupt.
*/
snap_buf->num_pending_arq = mbx_data->num_pending_arq;
snap_buf->num_msg_proc = mbx_data->num_msg_proc;
snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
/* Capture a new static snapshot of the mailbox by logging the
* head and tail of snapshot and set num_iterations to the tail
* value to mark the start of the iteration through the snapshot.
*/
snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
mbx_data->num_pending_arq);
snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
snap_buf->num_iterations = snap_buf->tail;
/* Pending ARQ messages returned by ice_clean_rq_elem
* is the difference between the head and tail of the
* mailbox queue. Comparing this value against the watermark
* helps to check if we potentially have malicious VFs.
*/
if (snap_buf->num_pending_arq >=
mbx_data->async_watermark_val) {
new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
} else {
new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
ice_mbx_traverse(hw, &new_state);
}
break;
case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
ice_mbx_traverse(hw, &new_state);
break;
case ICE_MAL_VF_DETECT_STATE_DETECT:
new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
break;
default:
new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
status = ICE_ERR_CFG;
}
snap_buf->state = new_state;
return status;
}
/**
* ice_mbx_report_malvf - Track and note malicious VF
* @hw: pointer to the HW struct
* @all_malvfs: all malicious VFs tracked by PF
* @bitmap_len: length of bitmap in bits
* @vf_id: relative virtual function ID of the malicious VF
* @report_malvf: boolean to indicate if malicious VF must be reported
*
* This function will update a bitmap that keeps track of the malicious
* VFs attached to the PF. A malicious VF must be reported only once if
* discovered between VF resets or loading so the function checks
* the input vf_id against the bitmap to verify if the VF has been
* detected in any previous mailbox iterations.
*/
enum ice_status
ice_mbx_report_malvf(struct ice_hw *hw, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf)
{
if (!all_malvfs || !report_malvf)
return ICE_ERR_PARAM;
*report_malvf = false;
if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE;
if (vf_id >= bitmap_len)
return ICE_ERR_OUT_OF_RANGE;
/* If the vf_id is found in the bitmap set bit and boolean to true */
if (!ice_is_bit_set(all_malvfs, vf_id)) {
ice_set_bit(vf_id, all_malvfs);
ice_debug(hw, ICE_DBG_TRACE, "Malicious VF=%d found\n", vf_id);
*report_malvf = true;
}
return ICE_SUCCESS;
}
/**
* ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
* @snap: pointer to the mailbox snapshot structure
* @all_malvfs: all malicious VFs tracked by PF
* @bitmap_len: length of bitmap in bits
* @vf_id: relative virtual function ID of the malicious VF
*
* In case of a VF reset, this function can be called to clear
* the bit corresponding to the VF ID in the bitmap tracking all
* malicious VFs attached to the PF. The function also clears the
* VF counter array at the index of the VF ID. This is to ensure
* that the new VF loaded is not considered malicious before going
* through the overflow detection algorithm.
*/
enum ice_status
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id)
{
if (!snap || !all_malvfs)
return ICE_ERR_PARAM;
if (bitmap_len < snap->mbx_vf.vfcntr_len)
return ICE_ERR_INVAL_SIZE;
/* Ensure VF ID value is not larger than bitmap or VF counter length */
if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
return ICE_ERR_OUT_OF_RANGE;
/* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
ice_clear_bit(vf_id, all_malvfs);
/* Clear the VF counter in the mailbox snapshot structure for that VF ID.
* This is to ensure that if a VF is unloaded and a new one brought back
* up with the same VF ID for a snapshot currently in traversal or detect
* state the counter for that VF ID does not increment on top of existing
* values in the mailbox overflow detection algorithm.
*/
snap->mbx_vf.vf_cntr[vf_id] = 0;
return ICE_SUCCESS;
}
/**
* ice_mbx_init_snapshot - Initialize mailbox snapshot structure
* @hw: pointer to the hardware structure
* @vf_count: number of VFs allocated on a PF
*
* Clear the mailbox snapshot structure and allocate memory
* for the VF counter array based on the number of VFs allocated
* on that PF.
*
* Assumption: This function will assume ice_get_caps() has already been
* called to ensure that the vf_count can be compared against the number
* of VFs supported as defined in the functional capabilities of the device.
*/
enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
/* Ensure that the number of VFs allocated is non-zero and
* is not greater than the number of supported VFs defined in
* the functional capabilities of the PF.
*/
if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
return ICE_ERR_INVAL_SIZE;
snap->mbx_vf.vf_cntr =
(u32 *)ice_calloc(hw, vf_count,
sizeof(*snap->mbx_vf.vf_cntr));
if (!snap->mbx_vf.vf_cntr)
return ICE_ERR_NO_MEMORY;
/* Setting the VF counter length to the number of allocated
* VFs for given PF's functional capabilities.
*/
snap->mbx_vf.vfcntr_len = vf_count;
/* Clear mbx_buf in the mailbox snaphot structure and setting the
* mailbox snapshot state to a new capture.
*/
ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
return ICE_SUCCESS;
}
/**
* ice_mbx_deinit_snapshot - Free mailbox snapshot structure
* @hw: pointer to the hardware structure
*
* Clear the mailbox snapshot structure and free the VF counter array.
*/
void ice_mbx_deinit_snapshot(struct ice_hw *hw)
{
struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
/* Free VF counter array and reset vf counter length */
ice_free(hw, snap->mbx_vf.vf_cntr);
snap->mbx_vf.vfcntr_len = 0;
/* Clear mbx_buf in the mailbox snaphot structure */
ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
}

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -33,16 +33,33 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
#include "ice_common.h"
#include "ice_type.h"
#include "ice_controlq.h"
/* Defining the mailbox message threshold as 63 asynchronous
* pending messages. Normal VF functionality does not require
* sending more than 63 asynchronous pending message.
*/
#define ICE_ASYNC_VF_MSG_THRESHOLD 63
enum ice_status
ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
enum ice_status v_retval, u8 *msg, u16 msglen,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
enum ice_status
ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
u16 vf_id, bool *is_mal_vf);
enum ice_status
ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id);
enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
void ice_mbx_deinit_snapshot(struct ice_hw *hw);
enum ice_status
ice_mbx_report_malvf(struct ice_hw *hw, ice_bitmap_t *all_malvfs,
u16 bitmap_len, u16 vf_id, bool *report_malvf);
#endif /* _ICE_SRIOV_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -137,6 +137,71 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
return status;
}
/**
* ice_alloc_rss_global_lut - allocate a RSS global LUT
* @hw: pointer to the HW struct
* @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
* @global_lut_id: output parameter for the RSS global LUT's ID
*/
enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
enum ice_status status;
u16 buf_len;
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
sw_buf->num_elems = CPU_TO_LE16(1);
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH |
(shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
ICE_AQC_RES_TYPE_FLAG_DEDICATED));
status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL);
if (status) {
ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n",
shared_res ? "shared" : "dedicated", status);
goto ice_alloc_global_lut_exit;
}
*global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
ice_alloc_global_lut_exit:
ice_free(hw, sw_buf);
return status;
}
/**
* ice_free_global_lut - free a RSS global LUT
* @hw: pointer to the HW struct
* @global_lut_id: ID of the RSS global LUT to free
*/
enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
u16 buf_len, num_elems = 1;
enum ice_status status;
buf_len = ice_struct_size(sw_buf, elem, num_elems);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
sw_buf->num_elems = CPU_TO_LE16(num_elems);
sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH);
sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id);
status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL);
if (status)
ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n",
global_lut_id, status);
ice_free(hw, sw_buf);
return status;
}
/**
* ice_alloc_sw - allocate resources specific to switch
* @hw: pointer to the HW struct
@ -1425,8 +1490,7 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
struct ice_vsi_list_map_info *v_map;
int i;
v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
sizeof(*v_map));
v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map));
if (!v_map)
return NULL;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -414,6 +414,8 @@ ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id);
/* Switch/bridge related commands */
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id);
enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id);
enum ice_status
ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
u16 *counter_id);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -66,6 +66,8 @@
#define ice_struct_size(ptr, field, num) \
(sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0]))
#include "ice_status.h"
#include "ice_hw_autogen.h"
#include "ice_devids.h"
@ -75,6 +77,7 @@
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_vlan_mode.h"
static inline bool ice_is_tc_ena(ice_bitmap_t bitmap, u8 tc)
{
@ -380,7 +383,11 @@ struct ice_hw_common_caps {
u8 apm_wol_support;
u8 acpi_prog_mthd;
u8 proxy_support;
bool sec_rev_disabled;
bool update_disabled;
bool nvm_unified_update;
#define ICE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
#define ICE_NVM_MGMT_UPDATE_DISABLED BIT(1)
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
};
@ -474,16 +481,74 @@ struct ice_orom_info {
u8 major; /* Major version of OROM */
u8 patch; /* Patch version of OROM */
u16 build; /* Build version of OROM */
u32 srev; /* Security revision */
};
/* NVM Information */
/* NVM version information */
struct ice_nvm_info {
u32 eetrack;
u32 srev;
u8 major;
u8 minor;
};
/* Minimum Security Revision information */
struct ice_minsrev_info {
u32 nvm;
u32 orom;
u8 nvm_valid : 1;
u8 orom_valid : 1;
};
/* netlist version information */
struct ice_netlist_info {
u32 major; /* major high/low */
u32 minor; /* minor high/low */
u32 type; /* type high/low */
u32 rev; /* revision high/low */
u32 hash; /* SHA-1 hash word */
u16 cust_ver; /* customer version */
};
/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
* of the flash image.
*/
enum ice_flash_bank {
ICE_INVALID_FLASH_BANK,
ICE_1ST_FLASH_BANK,
ICE_2ND_FLASH_BANK,
};
/* Enumeration of which flash bank is desired to read from, either the active
* bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
* code which just wants to read the active or inactive flash bank.
*/
enum ice_bank_select {
ICE_ACTIVE_FLASH_BANK,
ICE_INACTIVE_FLASH_BANK,
};
/* information for accessing NVM, OROM, and Netlist flash banks */
struct ice_bank_info {
u32 nvm_ptr; /* Pointer to 1st NVM bank */
u32 nvm_size; /* Size of NVM bank */
u32 orom_ptr; /* Pointer to 1st OROM bank */
u32 orom_size; /* Size of OROM bank */
u32 netlist_ptr; /* Pointer to 1st Netlist bank */
u32 netlist_size; /* Size of Netlist bank */
enum ice_flash_bank nvm_bank; /* Active NVM bank */
enum ice_flash_bank orom_bank; /* Active OROM bank */
enum ice_flash_bank netlist_bank; /* Active Netlist bank */
};
/* Flash Chip Information */
struct ice_flash_info {
struct ice_orom_info orom; /* Option ROM version info */
u32 eetrack; /* NVM data version */
struct ice_nvm_info nvm; /* NVM version information */
struct ice_netlist_info netlist;/* Netlist version info */
struct ice_bank_info banks; /* Flash Bank information */
u16 sr_words; /* Shadow RAM size in words */
u32 flash_size; /* Size of available flash in bytes */
u8 major_ver; /* major version of dev starter */
u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
@ -511,16 +576,6 @@ struct ice_link_default_override_tlv {
#define ICE_NVM_VER_LEN 32
/* netlist version information */
struct ice_netlist_ver_info {
u32 major; /* major high/low */
u32 minor; /* minor high/low */
u32 type; /* type high/low */
u32 rev; /* revision high/low */
u32 hash; /* SHA-1 hash word */
u16 cust_ver; /* customer version */
};
/* Max number of port to queue branches w.r.t topology */
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@ -700,19 +755,20 @@ struct ice_dcb_app_priority_table {
u8 selector;
};
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 32
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
#define ICE_TLV_STATUS_ERR 0x4
#define ICE_APP_PROT_ID_FCOE 0x8906
#define ICE_APP_PROT_ID_ISCSI 0x0cbc
#define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
#define ICE_CEE_APP_SEL_TCPIP 0x1
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 32
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
#define ICE_TLV_STATUS_ERR 0x4
#define ICE_APP_PROT_ID_FCOE 0x8906
#define ICE_APP_PROT_ID_ISCSI 0x0cbc
#define ICE_APP_PROT_ID_ISCSI_860 0x035c
#define ICE_APP_PROT_ID_FIP 0x8914
#define ICE_APP_SEL_ETHTYPE 0x1
#define ICE_APP_SEL_TCPIP 0x2
#define ICE_CEE_APP_SEL_ETHTYPE 0x0
#define ICE_CEE_APP_SEL_TCPIP 0x1
struct ice_dcbx_cfg {
u32 numapps;
@ -757,8 +813,6 @@ struct ice_port_info {
struct ice_lock sched_lock; /* protect access to TXSched tree */
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
/* List contain profile ID(s) and other params per layer */
struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_bw_type_info root_node_bw_t_info;
struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS];
struct ice_qos_cfg qos_cfg;
@ -774,6 +828,80 @@ struct ice_switch_info {
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
/* Enum defining the different states of the mailbox snapshot in the
* PF-VF mailbox overflow detection algorithm. The snapshot can be in
* states:
* 1. ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot
* within the mailbox buffer.
* 2. ICE_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot
* 3. ICE_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the
* mailbox and mark any VFs sending more messages than the threshold limit set.
* 4. ICE_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to 0xFFFFFFFF.
*/
enum ice_mbx_snapshot_state {
ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0,
ICE_MAL_VF_DETECT_STATE_TRAVERSE,
ICE_MAL_VF_DETECT_STATE_DETECT,
ICE_MAL_VF_DETECT_STATE_INVALID = 0xFFFFFFFF,
};
/* Structure to hold information of the static snapshot and the mailbox
* buffer data used to generate and track the snapshot.
* 1. state: the state of the mailbox snapshot in the malicious VF
* detection state handler ice_mbx_vf_state_handler()
* 2. head : head of the mailbox snapshot in a circular mailbox buffer
* 3. tail : tail of the mailbox snapshot in a circular mailbox buffer
* 4. num_iterations: number of messages traversed in circular mailbox buffer
* 5. num_msg_proc: number of messages processed in mailbox
* 6. num_pending_arq: number of pending asynchronous messages
* 7. max_num_msgs_mbx: maximum messages in mailbox for currently
* serviced work item or interrupt.
*/
struct ice_mbx_snap_buffer_data {
enum ice_mbx_snapshot_state state;
u32 head;
u32 tail;
u32 num_iterations;
u16 num_msg_proc;
u16 num_pending_arq;
u16 max_num_msgs_mbx;
};
/* Structure to track messages sent by VFs on mailbox:
* 1. vf_cntr : a counter array of VFs to track the number of
* asynchronous messages sent by each VF
* 2. vfcntr_len : number of entries in VF counter array
*/
struct ice_mbx_vf_counter {
u32 *vf_cntr;
u32 vfcntr_len;
};
/* Structure to hold data relevant to the captured static snapshot
* of the PF-VF mailbox.
*/
struct ice_mbx_snapshot {
struct ice_mbx_snap_buffer_data mbx_buf;
struct ice_mbx_vf_counter mbx_vf;
};
/* Structure to hold data to be used for capturing or updating a
* static snapshot.
* 1. num_msg_proc: number of messages processed in mailbox
* 2. num_pending_arq: number of pending asynchronous messages
* 3. max_num_msgs_mbx: maximum messages in mailbox for currently
* serviced work item or interrupt.
* 4. async_watermark_val: An upper threshold set by caller to determine
* if the pending arq count is large enough to assume that there is
* the possibility of a mailicious VF.
*/
struct ice_mbx_data {
u16 num_msg_proc;
u16 num_pending_arq;
u16 max_num_msgs_mbx;
u16 async_watermark_val;
};
/* Port hardware description */
struct ice_hw {
u8 *hw_addr;
@ -808,21 +936,21 @@ struct ice_hw {
u8 sw_entry_point_layer;
u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct LIST_HEAD_TYPE agg_list; /* lists all aggregator */
/* List contain profile ID(s) and other params per layer */
struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_flash_info flash;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
/* Control Queue info */
struct ice_ctl_q_info adminq;
struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
u8 api_min_ver; /* API minor version */
@ -870,13 +998,13 @@ struct ice_hw {
enum ice_aq_err pkg_dwnld_status;
/* Driver's package ver - (from the Metadata seg) */
/* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
/* Driver's Ice package version (from the Ice seg) */
struct ice_pkg_ver ice_pkg_ver;
u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
/* Driver's Ice segment format version and id (from the Ice seg) */
struct ice_pkg_ver ice_seg_fmt_ver;
u8 ice_seg_id[ICE_SEG_ID_SIZE];
/* Pointer to the ice segment */
struct ice_seg *seg;
@ -895,6 +1023,8 @@ struct ice_hw {
struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT];
struct ice_lock rss_locks; /* protect RSS configuration */
struct LIST_HEAD_TYPE rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
struct ice_vlan_mode_ops vlan_mode_ops;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@ -981,6 +1111,14 @@ enum ice_sw_fwd_act_type {
ICE_INVAL_ACT
};
struct ice_aq_get_set_rss_lut_params {
u16 vsi_handle; /* software VSI handle */
u16 lut_size; /* size of the LUT buffer */
u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
u8 *lut; /* input RSS LUT for set and output RSS LUT for get */
u8 global_lut_id; /* only valid when lut_type is global */
};
/* Checksum and Shadow RAM pointers */
#define ICE_SR_NVM_CTRL_WORD 0x00
#define ICE_SR_PHY_ANALOG_PTR 0x04
@ -1044,11 +1182,65 @@ enum ice_sw_fwd_act_type {
#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134
#define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118
/* CSS Header words */
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
/* Length of CSS header section in words */
#define ICE_CSS_HEADER_LENGTH 330
/* Offset of Shadow RAM copy in the NVM bank area. */
#define ICE_NVM_SR_COPY_WORD_OFFSET ROUND_UP(ICE_CSS_HEADER_LENGTH, 32)
/* Size in bytes of Option ROM trailer */
#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
* and length.
*/
#define ICE_NETLIST_LINK_TOPO_MOD_ID 0x011B
#define ICE_NETLIST_TYPE_OFFSET 0x0000
#define ICE_NETLIST_LEN_OFFSET 0x0001
/* The Link Topology section follows the TLV header. When reading the netlist
* using ice_read_netlist_module, we need to account for the 2-word TLV
* header.
*/
#define ICE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
#define ICE_LINK_TOPO_MODULE_LEN ICE_NETLIST_LINK_TOPO_OFFSET(0x0000)
#define ICE_LINK_TOPO_NODE_COUNT ICE_NETLIST_LINK_TOPO_OFFSET(0x0001)
#define ICE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
/* The Netlist ID Block is located after all of the Link Topology nodes. */
#define ICE_NETLIST_ID_BLK_SIZE 0x30
#define ICE_NETLIST_ID_BLK_OFFSET(n) ICE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
/* netlist ID block field offsets (word offsets) */
#define ICE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
#define ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
#define ICE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
#define ICE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
#define ICE_NETLIST_ID_BLK_TYPE_LOW 0x06
#define ICE_NETLIST_ID_BLK_TYPE_HIGH 0x07
#define ICE_NETLIST_ID_BLK_REV_LOW 0x08
#define ICE_NETLIST_ID_BLK_REV_HIGH 0x09
#define ICE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
#define ICE_NETLIST_ID_BLK_CUST_VER 0x2F
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define ICE_SR_VPD_SIZE_WORDS 512
#define ICE_SR_PCIE_ALT_SIZE_WORDS 512
#define ICE_SR_CTRL_WORD_1_S 0x06
#define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S)
#define ICE_SR_CTRL_WORD_VALID 0x1
#define ICE_SR_CTRL_WORD_OROM_BANK BIT(3)
#define ICE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
#define ICE_SR_CTRL_WORD_NVM_BANK BIT(5)
#define ICE_SR_NVM_PTR_4KB_UNITS BIT(15)
/* Shadow RAM related */
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800

View File

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#include "ice_vlan_mode.h"
#include "ice_common.h"
/**
* ice_set_svm - set single VLAN mode
* @hw: pointer to the HW structure
*/
static enum ice_status ice_set_svm_dflt(struct ice_hw *hw)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
return ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL);
}
/**
* ice_init_vlan_mode_ops - initialize VLAN mode configuration ops
* @hw: pointer to the HW structure
*/
void ice_init_vlan_mode_ops(struct ice_hw *hw)
{
hw->vlan_mode_ops.set_dvm = NULL;
hw->vlan_mode_ops.set_svm = ice_set_svm_dflt;
}
/**
* ice_set_vlan_mode
* @hw: pointer to the HW structure
*/
enum ice_status ice_set_vlan_mode(struct ice_hw *hw)
{
enum ice_status status = ICE_ERR_NOT_IMPL;
if (hw->vlan_mode_ops.set_dvm)
status = hw->vlan_mode_ops.set_dvm(hw);
if (status)
return hw->vlan_mode_ops.set_svm(hw);
return status;
}

View File

@ -0,0 +1,60 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*$FreeBSD$*/
#ifndef _ICE_VLAN_MODE_H_
#define _ICE_VLAN_MODE_H_
struct ice_hw;
enum ice_status ice_set_vlan_mode(struct ice_hw *hw);
void ice_init_vlan_mode_ops(struct ice_hw *hw);
/* This structure defines the VLAN mode configuration interface. It is used to set the VLAN mode.
*
* Note: These operations will be called while the global configuration lock is held.
*
* enum ice_status (*set_svm)(struct ice_hw *hw);
* This function is called when the DDP and/or Firmware don't support double VLAN mode (DVM) or
* if the set_dvm op is not implemented and/or returns failure. It will set the device in
* single VLAN mode (SVM).
*
* enum ice_status (*set_dvm)(struct ice_hw *hw);
* This function is called when the DDP and Firmware support double VLAN mode (DVM). It should
* be implemented to set double VLAN mode. If it fails or remains unimplemented, set_svm will
* be called as a fallback plan.
*/
struct ice_vlan_mode_ops {
enum ice_status (*set_svm)(struct ice_hw *hw);
enum ice_status (*set_dvm)(struct ice_hw *hw);
};
#endif /* _ICE_VLAN_MODE_H */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -160,9 +160,89 @@ enum virtchnl_ops {
/* opcodes 39, 40, 41, 42 and 43 are reserved */
/* opcode 44 is reserved */
/* opcode 45, 46, 47, 48 and 49 are reserved */
VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50,
VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107,
VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108,
VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
VIRTCHNL_OP_MAX,
};
static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
{
switch (v_opcode) {
case VIRTCHNL_OP_UNKNOWN:
return "VIRTCHNL_OP_UNKNOWN";
case VIRTCHNL_OP_VERSION:
return "VIRTCHNL_OP_VERSION";
case VIRTCHNL_OP_RESET_VF:
return "VIRTCHNL_OP_RESET_VF";
case VIRTCHNL_OP_GET_VF_RESOURCES:
return "VIRTCHNL_OP_GET_VF_RESOURCES";
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
return "VIRTCHNL_OP_CONFIG_TX_QUEUE";
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
return "VIRTCHNL_OP_CONFIG_RX_QUEUE";
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
return "VIRTCHNL_OP_CONFIG_VSI_QUEUES";
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
return "VIRTCHNL_OP_CONFIG_IRQ_MAP";
case VIRTCHNL_OP_ENABLE_QUEUES:
return "VIRTCHNL_OP_ENABLE_QUEUES";
case VIRTCHNL_OP_DISABLE_QUEUES:
return "VIRTCHNL_OP_DISABLE_QUEUES";
case VIRTCHNL_OP_ADD_ETH_ADDR:
return "VIRTCHNL_OP_ADD_ETH_ADDR";
case VIRTCHNL_OP_DEL_ETH_ADDR:
return "VIRTCHNL_OP_DEL_ETH_ADDR";
case VIRTCHNL_OP_ADD_VLAN:
return "VIRTCHNL_OP_ADD_VLAN";
case VIRTCHNL_OP_DEL_VLAN:
return "VIRTCHNL_OP_DEL_VLAN";
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE";
case VIRTCHNL_OP_GET_STATS:
return "VIRTCHNL_OP_GET_STATS";
case VIRTCHNL_OP_RSVD:
return "VIRTCHNL_OP_RSVD";
case VIRTCHNL_OP_EVENT:
return "VIRTCHNL_OP_EVENT";
case VIRTCHNL_OP_CONFIG_RSS_KEY:
return "VIRTCHNL_OP_CONFIG_RSS_KEY";
case VIRTCHNL_OP_CONFIG_RSS_LUT:
return "VIRTCHNL_OP_CONFIG_RSS_LUT";
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
return "VIRTCHNL_OP_GET_RSS_HENA_CAPS";
case VIRTCHNL_OP_SET_RSS_HENA:
return "VIRTCHNL_OP_SET_RSS_HENA";
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING";
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING";
case VIRTCHNL_OP_REQUEST_QUEUES:
return "VIRTCHNL_OP_REQUEST_QUEUES";
case VIRTCHNL_OP_ENABLE_CHANNELS:
return "VIRTCHNL_OP_ENABLE_CHANNELS";
case VIRTCHNL_OP_DISABLE_CHANNELS:
return "VIRTCHNL_OP_DISABLE_CHANNELS";
case VIRTCHNL_OP_ADD_CLOUD_FILTER:
return "VIRTCHNL_OP_ADD_CLOUD_FILTER";
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
return "VIRTCHNL_OP_DEL_CLOUD_FILTER";
case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
return "VIRTCHNL_OP_GET_MAX_RSS_QREGION";
case VIRTCHNL_OP_ENABLE_QUEUES_V2:
return "VIRTCHNL_OP_ENABLE_QUEUES_V2";
case VIRTCHNL_OP_DISABLE_QUEUES_V2:
return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
case VIRTCHNL_OP_MAX:
return "VIRTCHNL_OP_MAX";
default:
return "Unsupported (update virtchnl.h)";
}
}
/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
* structure/union is not of the correct size, otherwise it creates an enum
@ -265,6 +345,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
/* 0X00000100 is reserved */
#define VIRTCHNL_VF_LARGE_NUM_QPAIRS 0x00000200
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
@ -279,7 +361,6 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* 0X08000000 and 0X10000000 are reserved */
/* 0X20000000 is reserved */
/* 0X40000000 is reserved */
/* 0X80000000 is reserved */
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
@ -442,6 +523,35 @@ struct virtchnl_queue_select {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
/* VIRTCHNL_OP_GET_MAX_RSS_QREGION
*
* if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then this op must be supported.
*
* VF sends this message in order to query the max RSS queue region
* size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled.
* This information should be used when configuring the RSS LUT and/or
* configuring queue region based filters.
*
* The maximum RSS queue region is 2^qregion_width. So, a qregion_width
* of 6 would inform the VF that the PF supports a maximum RSS queue region
* of 64.
*
* A queue region represents a range of queues that can be used to configure
* a RSS LUT. For example, if a VF is given 64 queues, but only a max queue
* region size of 16 (i.e. 2^qregion_width = 16) then it will only be able
* to configure the RSS LUT with queue indices from 0 to 15. However, other
* filters can be used to direct packets to queues >15 via specifying a queue
* base/offset and queue region width.
*/
struct virtchnl_max_rss_qregion {
u16 vport_id;
u16 qregion_width;
u8 pad[4];
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion);
/* VIRTCHNL_OP_ADD_ETH_ADDR
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
@ -634,8 +744,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
*/
struct virtchnl_l4_spec {
u8 src_mac[ETH_ALEN];
u8 dst_mac[ETH_ALEN];
u8 src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
u8 dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
/* vlan_prio is part of this 16 bit field even from OS perspective
* vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
* in future, when decided to offload vlan_prio, pass that information
@ -732,6 +842,121 @@ struct virtchnl_pf_event {
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum virtchnl_vfr_states {
VIRTCHNL_VFR_INPROGRESS = 0,
VIRTCHNL_VFR_COMPLETED,
VIRTCHNL_VFR_VFACTIVE,
};
/* TX and RX queue types are valid in legacy as well as split queue models.
* With Split Queue model, 2 additional types are introduced - TX_COMPLETION
* and RX_BUFFER. In split queue model, RX corresponds to the queue where HW
* posts completions.
*/
enum virtchnl_queue_type {
VIRTCHNL_QUEUE_TYPE_TX = 0,
VIRTCHNL_QUEUE_TYPE_RX = 1,
VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2,
VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3,
VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4,
VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5
};
/* structure to specify a chunk of contiguous queues */
struct virtchnl_queue_chunk {
enum virtchnl_queue_type type;
u16 start_queue_id;
u16 num_queues;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk);
/* structure to specify several chunks of contiguous queues */
struct virtchnl_queue_chunks {
u16 num_chunks;
u16 rsvd;
struct virtchnl_queue_chunk chunks[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks);
/* VIRTCHNL_OP_ENABLE_QUEUES_V2
* VIRTCHNL_OP_DISABLE_QUEUES_V2
* VIRTCHNL_OP_DEL_QUEUES
*
* If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then all of these ops are available.
*
* If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are
* available.
*
* PF sends these messages to enable, disable or delete queues specified in
* chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues
* to be enabled/disabled/deleted. Also applicable to single queue RX or
* TX. CP performs requested action and returns status.
*/
struct virtchnl_del_ena_dis_queues {
u16 vport_id;
u16 pad;
struct virtchnl_queue_chunks chunks;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues);
/* Virtchannel interrupt throttling rate index */
enum virtchnl_itr_idx {
VIRTCHNL_ITR_IDX_0 = 0,
VIRTCHNL_ITR_IDX_1 = 1,
VIRTCHNL_ITR_IDX_NO_ITR = 3,
};
/* Queue to vector mapping */
struct virtchnl_queue_vector {
u16 queue_id;
u16 vector_id;
u8 pad[4];
enum virtchnl_itr_idx itr_idx;
enum virtchnl_queue_type queue_type;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector);
/* VIRTCHNL_OP_MAP_QUEUE_VECTOR
* VIRTCHNL_OP_UNMAP_QUEUE_VECTOR
*
* If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then all of these ops are available.
*
* If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES
* then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available.
*
* PF sends this message to map or unmap queues to vectors and ITR index
* registers. External data buffer contains virtchnl_queue_vector_maps structure
* that contains num_qv_maps of virtchnl_queue_vector structures.
* CP maps the requested queue vector maps after validating the queue and vector
* ids and returns a status code.
*/
struct virtchnl_queue_vector_maps {
u16 vport_id;
u16 num_qv_maps;
u8 pad[4];
struct virtchnl_queue_vector qv_maps[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps);
/* Since VF messages are limited by u16 size, precalculate the maximum possible
* values of nested elements in virtchnl structures that virtual channel can
* possibly handle in a single message.
@ -756,23 +981,14 @@ enum virtchnl_vector_limits {
VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
sizeof(struct virtchnl_channel_info),
};
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum virtchnl_vfr_states {
VIRTCHNL_VFR_INPROGRESS = 0,
VIRTCHNL_VFR_COMPLETED,
VIRTCHNL_VFR_VFACTIVE,
VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX =
((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) /
sizeof(struct virtchnl_queue_chunk),
VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX =
((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) /
sizeof(struct virtchnl_queue_vector),
};
/**
@ -845,6 +1061,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_GET_MAX_RSS_QREGION:
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
valid_len = sizeof(struct virtchnl_ether_addr_list);
@ -945,6 +1163,35 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
case VIRTCHNL_OP_ENABLE_QUEUES_V2:
case VIRTCHNL_OP_DISABLE_QUEUES_V2:
valid_len = sizeof(struct virtchnl_del_ena_dis_queues);
if (msglen >= valid_len) {
struct virtchnl_del_ena_dis_queues *qs =
(struct virtchnl_del_ena_dis_queues *)msg;
if (qs->chunks.num_chunks == 0 ||
qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) {
err_msg_format = true;
break;
}
valid_len += (qs->chunks.num_chunks - 1) *
sizeof(struct virtchnl_queue_chunk);
}
break;
case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
valid_len = sizeof(struct virtchnl_queue_vector_maps);
if (msglen >= valid_len) {
struct virtchnl_queue_vector_maps *v_qp =
(struct virtchnl_queue_vector_maps *)msg;
if (v_qp->num_qv_maps == 0 ||
v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {
err_msg_format = true;
break;
}
valid_len += (v_qp->num_qv_maps - 1) *
sizeof(struct virtchnl_queue_vector);
}
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2020, Intel Corporation
/* Copyright (c) 2021, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -41,6 +41,9 @@
#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
#define VIRTCHNL_IPSEC_SA_DESTROY 0
#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF
#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF
#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF
/* crypto type */
#define VIRTCHNL_AUTH 1
@ -98,6 +101,17 @@
#define VIRTCHNL_IPV4 1
#define VIRTCHNL_IPV6 2
/* for virtchnl_ipsec_resp */
enum inline_ipsec_resp {
INLINE_IPSEC_SUCCESS = 0,
INLINE_IPSEC_FAIL = -1,
INLINE_IPSEC_ERR_FIFO_FULL = -2,
INLINE_IPSEC_ERR_NOT_READY = -3,
INLINE_IPSEC_ERR_VF_DOWN = -4,
INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
INLINE_IPSEC_ERR_NO_MEM = -6,
};
/* Detailed opcodes for DPDK and IPsec use */
enum inline_ipsec_ops {
INLINE_IPSEC_OP_GET_CAP = 0,

View File

@ -10,6 +10,6 @@ SRCS += ice_iflib_recovery_txrx.c ice_iflib_txrx.c if_ice_iflib.c
# Shared source
SRCS += ice_common.c ice_controlq.c ice_dcb.c ice_flex_pipe.c ice_flow.c
SRCS += ice_nvm.c ice_sched.c ice_sriov.c ice_switch.c
SRCS += ice_nvm.c ice_sched.c ice_sriov.c ice_switch.c ice_vlan_mode.c
.include <bsd.kmod.mk>