ice(4): Update to 1.37.11-k

This driver update has no corresponding ice_ddp update, and doesn't
contain very many functional changes:
- Some refactoring for future SR-IOV PF support
- Various minor fixes

Signed-off-by: Eric Joyner <erj@FreeBSD.org>

Tested by:	jeffrey.e.pieper@intel.com
MFC after:	1 week
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D39821
This commit is contained in:
Eric Joyner 2023-05-24 16:38:02 -07:00
parent 156424fce9
commit 9dc2f6e26f
No known key found for this signature in database
GPG Key ID: 96F0C6FD61E05DE3
60 changed files with 742 additions and 345 deletions

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -1987,6 +1987,7 @@ struct ice_aqc_get_port_options {
u8 port_options_count;
#define ICE_AQC_PORT_OPT_COUNT_S 0
#define ICE_AQC_PORT_OPT_COUNT_M (0xF << ICE_AQC_PORT_OPT_COUNT_S)
#define ICE_AQC_PORT_OPT_MAX 16
u8 innermost_phy_index;
u8 port_options;
#define ICE_AQC_PORT_OPT_ACTIVE_S 0

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -461,7 +461,7 @@ ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
}
/**
* ice_cmp_bitmaps - compares two bitmaps.
* ice_cmp_bitmap - compares two bitmaps.
* @bmp1: the bitmap to compare
* @bmp2: the bitmap to compare with bmp1
* @size: Size of the bitmaps in bits

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -37,81 +37,81 @@
#include "ice_flow.h"
#include "ice_switch.h"
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_PF_RESET_WAIT_COUNT 500
static const char * const ice_link_mode_str_low[] = {
[0] = "100BASE_TX",
[1] = "100M_SGMII",
[2] = "1000BASE_T",
[3] = "1000BASE_SX",
[4] = "1000BASE_LX",
[5] = "1000BASE_KX",
[6] = "1G_SGMII",
[7] = "2500BASE_T",
[8] = "2500BASE_X",
[9] = "2500BASE_KX",
[10] = "5GBASE_T",
[11] = "5GBASE_KR",
[12] = "10GBASE_T",
[13] = "10G_SFI_DA",
[14] = "10GBASE_SR",
[15] = "10GBASE_LR",
[16] = "10GBASE_KR_CR1",
[17] = "10G_SFI_AOC_ACC",
[18] = "10G_SFI_C2C",
[19] = "25GBASE_T",
[20] = "25GBASE_CR",
[21] = "25GBASE_CR_S",
[22] = "25GBASE_CR1",
[23] = "25GBASE_SR",
[24] = "25GBASE_LR",
[25] = "25GBASE_KR",
[26] = "25GBASE_KR_S",
[27] = "25GBASE_KR1",
[28] = "25G_AUI_AOC_ACC",
[29] = "25G_AUI_C2C",
[30] = "40GBASE_CR4",
[31] = "40GBASE_SR4",
[32] = "40GBASE_LR4",
[33] = "40GBASE_KR4",
[34] = "40G_XLAUI_AOC_ACC",
[35] = "40G_XLAUI",
[36] = "50GBASE_CR2",
[37] = "50GBASE_SR2",
[38] = "50GBASE_LR2",
[39] = "50GBASE_KR2",
[40] = "50G_LAUI2_AOC_ACC",
[41] = "50G_LAUI2",
[42] = "50G_AUI2_AOC_ACC",
[43] = "50G_AUI2",
[44] = "50GBASE_CP",
[45] = "50GBASE_SR",
[46] = "50GBASE_FR",
[47] = "50GBASE_LR",
[48] = "50GBASE_KR_PAM4",
[49] = "50G_AUI1_AOC_ACC",
[50] = "50G_AUI1",
[51] = "100GBASE_CR4",
[52] = "100GBASE_SR4",
[53] = "100GBASE_LR4",
[54] = "100GBASE_KR4",
[55] = "100G_CAUI4_AOC_ACC",
[56] = "100G_CAUI4",
[57] = "100G_AUI4_AOC_ACC",
[58] = "100G_AUI4",
[59] = "100GBASE_CR_PAM4",
[60] = "100GBASE_KR_PAM4",
[61] = "100GBASE_CP2",
[62] = "100GBASE_SR2",
[63] = "100GBASE_DR",
ice_arr_elem_idx(0, "100BASE_TX"),
ice_arr_elem_idx(1, "100M_SGMII"),
ice_arr_elem_idx(2, "1000BASE_T"),
ice_arr_elem_idx(3, "1000BASE_SX"),
ice_arr_elem_idx(4, "1000BASE_LX"),
ice_arr_elem_idx(5, "1000BASE_KX"),
ice_arr_elem_idx(6, "1G_SGMII"),
ice_arr_elem_idx(7, "2500BASE_T"),
ice_arr_elem_idx(8, "2500BASE_X"),
ice_arr_elem_idx(9, "2500BASE_KX"),
ice_arr_elem_idx(10, "5GBASE_T"),
ice_arr_elem_idx(11, "5GBASE_KR"),
ice_arr_elem_idx(12, "10GBASE_T"),
ice_arr_elem_idx(13, "10G_SFI_DA"),
ice_arr_elem_idx(14, "10GBASE_SR"),
ice_arr_elem_idx(15, "10GBASE_LR"),
ice_arr_elem_idx(16, "10GBASE_KR_CR1"),
ice_arr_elem_idx(17, "10G_SFI_AOC_ACC"),
ice_arr_elem_idx(18, "10G_SFI_C2C"),
ice_arr_elem_idx(19, "25GBASE_T"),
ice_arr_elem_idx(20, "25GBASE_CR"),
ice_arr_elem_idx(21, "25GBASE_CR_S"),
ice_arr_elem_idx(22, "25GBASE_CR1"),
ice_arr_elem_idx(23, "25GBASE_SR"),
ice_arr_elem_idx(24, "25GBASE_LR"),
ice_arr_elem_idx(25, "25GBASE_KR"),
ice_arr_elem_idx(26, "25GBASE_KR_S"),
ice_arr_elem_idx(27, "25GBASE_KR1"),
ice_arr_elem_idx(28, "25G_AUI_AOC_ACC"),
ice_arr_elem_idx(29, "25G_AUI_C2C"),
ice_arr_elem_idx(30, "40GBASE_CR4"),
ice_arr_elem_idx(31, "40GBASE_SR4"),
ice_arr_elem_idx(32, "40GBASE_LR4"),
ice_arr_elem_idx(33, "40GBASE_KR4"),
ice_arr_elem_idx(34, "40G_XLAUI_AOC_ACC"),
ice_arr_elem_idx(35, "40G_XLAUI"),
ice_arr_elem_idx(36, "50GBASE_CR2"),
ice_arr_elem_idx(37, "50GBASE_SR2"),
ice_arr_elem_idx(38, "50GBASE_LR2"),
ice_arr_elem_idx(39, "50GBASE_KR2"),
ice_arr_elem_idx(40, "50G_LAUI2_AOC_ACC"),
ice_arr_elem_idx(41, "50G_LAUI2"),
ice_arr_elem_idx(42, "50G_AUI2_AOC_ACC"),
ice_arr_elem_idx(43, "50G_AUI2"),
ice_arr_elem_idx(44, "50GBASE_CP"),
ice_arr_elem_idx(45, "50GBASE_SR"),
ice_arr_elem_idx(46, "50GBASE_FR"),
ice_arr_elem_idx(47, "50GBASE_LR"),
ice_arr_elem_idx(48, "50GBASE_KR_PAM4"),
ice_arr_elem_idx(49, "50G_AUI1_AOC_ACC"),
ice_arr_elem_idx(50, "50G_AUI1"),
ice_arr_elem_idx(51, "100GBASE_CR4"),
ice_arr_elem_idx(52, "100GBASE_SR4"),
ice_arr_elem_idx(53, "100GBASE_LR4"),
ice_arr_elem_idx(54, "100GBASE_KR4"),
ice_arr_elem_idx(55, "100G_CAUI4_AOC_ACC"),
ice_arr_elem_idx(56, "100G_CAUI4"),
ice_arr_elem_idx(57, "100G_AUI4_AOC_ACC"),
ice_arr_elem_idx(58, "100G_AUI4"),
ice_arr_elem_idx(59, "100GBASE_CR_PAM4"),
ice_arr_elem_idx(60, "100GBASE_KR_PAM4"),
ice_arr_elem_idx(61, "100GBASE_CP2"),
ice_arr_elem_idx(62, "100GBASE_SR2"),
ice_arr_elem_idx(63, "100GBASE_DR"),
};
static const char * const ice_link_mode_str_high[] = {
[0] = "100GBASE_KR2_PAM4",
[1] = "100G_CAUI2_AOC_ACC",
[2] = "100G_CAUI2",
[3] = "100G_AUI2_AOC_ACC",
[4] = "100G_AUI2",
ice_arr_elem_idx(0, "100GBASE_KR2_PAM4"),
ice_arr_elem_idx(1, "100G_CAUI2_AOC_ACC"),
ice_arr_elem_idx(2, "100G_CAUI2"),
ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"),
ice_arr_elem_idx(4, "100G_AUI2"),
};
/**
@ -1292,7 +1292,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* that is occurring during a download package operation.
*/
for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
ICE_PF_RESET_WAIT_COUNT; cnt++) {
ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@ -1378,6 +1378,37 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
return ICE_SUCCESS;
}
/**
* ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the Rx queue
*
* Copies rxq context from HW register space to dense structure
*/
static enum ice_status
ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
if (!ice_rxq_ctx)
return ICE_ERR_BAD_PTR;
if (rxq_index > QRX_CTRL_MAX_INDEX)
return ICE_ERR_PARAM;
/* Copy each dword separately from HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
u32 *ctx = (u32 *)(ice_rxq_ctx + (i * sizeof(u32)));
*ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
}
return ICE_SUCCESS;
}
/* LAN Rx Queue Context */
static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
/* Field Width LSB */
@ -1429,6 +1460,32 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
/**
* ice_read_rxq_ctx - Read rxq context from HW
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the rxq context
* @rxq_index: the index of the Rx queue
*
* Read rxq context from HW register space and then converts it from dense
* structure to sparse
*/
enum ice_status
ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
enum ice_status status;
if (!rlan_ctx)
return ICE_ERR_BAD_PTR;
status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index);
if (status)
return status;
return ice_get_ctx(ctx_buf, (u8 *)rlan_ctx, ice_rlan_ctx_info);
}
/**
* ice_clear_rxq_ctx
* @hw: pointer to the hardware structure
@ -1450,7 +1507,9 @@ enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
return ICE_SUCCESS;
}
/* LAN Tx Queue Context */
/* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs,
* Bit[0-175] is valid
*/
const struct ice_ctx_ele ice_tlan_ctx_info[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
@ -3041,7 +3100,6 @@ enum ice_status
ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool save_bad_pac, bool pad_short_pac, bool double_vlan,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_port_params *cmd;
struct ice_hw *hw = pi->hw;
@ -3093,8 +3151,8 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
@ -4772,7 +4830,7 @@ ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id,
* @ce_info: a description of the struct to be filled
*/
static void
ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u8 dest_byte, mask;
u8 *src, *target;
@ -4790,7 +4848,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
dest_byte &= ~(mask);
dest_byte &= mask;
dest_byte >>= shift_width;
@ -4808,7 +4866,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
* @ce_info: a description of the struct to be filled
*/
static void
ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u16 dest_word, mask;
u8 *src, *target;
@ -4830,7 +4888,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
/* the data in the memory is stored as little endian so mask it
* correctly
*/
src_word &= ~(CPU_TO_LE16(mask));
src_word &= CPU_TO_LE16(mask);
/* get the data back into host order before shifting */
dest_word = LE16_TO_CPU(src_word);
@ -4851,7 +4909,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
* @ce_info: a description of the struct to be filled
*/
static void
ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u32 dest_dword, mask;
__le32 src_dword;
@ -4881,7 +4939,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
/* the data in the memory is stored as little endian so mask it
* correctly
*/
src_dword &= ~(CPU_TO_LE32(mask));
src_dword &= CPU_TO_LE32(mask);
/* get the data back into host order before shifting */
dest_dword = LE32_TO_CPU(src_dword);
@ -4902,7 +4960,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
* @ce_info: a description of the struct to be filled
*/
static void
ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u64 dest_qword, mask;
__le64 src_qword;
@ -4932,7 +4990,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
/* the data in the memory is stored as little endian so mask it
* correctly
*/
src_qword &= ~(CPU_TO_LE64(mask));
src_qword &= CPU_TO_LE64(mask);
/* get the data back into host order before shifting */
dest_qword = LE64_TO_CPU(src_qword);
@ -4953,7 +5011,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
* @ce_info: a description of the structure to be read from
*/
enum ice_status
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
int f;
@ -5789,7 +5847,7 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
}
/**
* ice_cfg_get_cur_lldp_persist_status
* ice_get_cur_lldp_persist_status
* @hw: pointer to the HW struct
* @lldp_status: return value of LLDP persistent status
*
@ -6266,6 +6324,8 @@ ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
* when PF owns more than 1 port it must be true
* @active_option_idx: index of active port option in returned buffer
* @active_option_valid: active option in returned buffer is valid
* @pending_option_idx: index of pending port option in returned buffer
* @pending_option_valid: pending option in returned buffer is valid
*
* Calls Get Port Options AQC (0x06ea) and verifies result.
*/
@ -6273,17 +6333,14 @@ enum ice_status
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
u8 *active_option_idx, bool *active_option_valid)
u8 *active_option_idx, bool *active_option_valid,
u8 *pending_option_idx, bool *pending_option_valid)
{
struct ice_aqc_get_port_options *cmd;
struct ice_aq_desc desc;
enum ice_status status;
u8 pmd_count;
u8 max_speed;
u8 i;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* options buffer shall be able to hold max returned options */
if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
return ICE_ERR_PARAM;
@ -6291,8 +6348,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
cmd = &desc.params.get_port_options;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
if (lport_valid)
cmd->lport_num = lport;
cmd->lport_num = lport;
cmd->lport_num_valid = lport_valid;
status = ice_aq_send_cmd(hw, &desc, options,
@ -6313,34 +6369,59 @@ ice_aq_get_port_options(struct ice_hw *hw,
*active_option_idx);
}
/* verify indirect FW response & mask output options fields */
*pending_option_valid = cmd->pending_port_option_status &
ICE_AQC_PENDING_PORT_OPT_VALID;
if (*pending_option_valid) {
*pending_option_idx = cmd->pending_port_option_status &
ICE_AQC_PENDING_PORT_OPT_IDX_M;
if (*pending_option_idx > (*option_count - 1))
return ICE_ERR_OUT_OF_RANGE;
ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
*pending_option_idx);
}
/* mask output options fields */
for (i = 0; i < *option_count; i++) {
options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M;
options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M;
pmd_count = options[i].pmd;
max_speed = options[i].max_lane_speed;
ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
pmd_count, max_speed);
/* check only entries containing valid max pmd speed values,
* other reserved values may be returned, when logical port
* used is unrelated to specific option
*/
if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) {
if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV)
return ICE_ERR_OUT_OF_RANGE;
if (pmd_count > 2 &&
max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G)
return ICE_ERR_CFG;
if (pmd_count > 7 &&
max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G)
return ICE_ERR_CFG;
}
options[i].pmd, options[i].max_lane_speed);
}
return ICE_SUCCESS;
}
/**
* ice_aq_set_port_option
* @hw: pointer to the hw struct
* @lport: logical port to call the command with
* @lport_valid: when false, FW uses port owned by the PF instead of lport,
* when PF owns more than 1 port it must be true
* @new_option: new port option to be written
*
* Calls Set Port Options AQC (0x06eb).
*/
enum ice_status
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option)
{
struct ice_aqc_set_port_option *cmd;
struct ice_aq_desc desc;
if (new_option >= ICE_AQC_PORT_OPT_COUNT_M)
return ICE_ERR_PARAM;
cmd = &desc.params.set_port_option;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
cmd->lport_num = lport;
cmd->lport_num_valid = lport_valid;
cmd->selected_port_option = new_option;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
@ -6440,6 +6521,42 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
/* each of the indexes into the following array match the speed of a return
* value from the list of AQ returned speeds like the range:
* ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
* ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) The array is defined as 15
* elements long because the link_speed returned by the firmware is a 16 bit
* value, but is indexed by [fls(speed) - 1]
*/
static const u32 ice_aq_to_link_speed[15] = {
ICE_LINK_SPEED_10MBPS, /* BIT(0) */
ICE_LINK_SPEED_100MBPS,
ICE_LINK_SPEED_1000MBPS,
ICE_LINK_SPEED_2500MBPS,
ICE_LINK_SPEED_5000MBPS,
ICE_LINK_SPEED_10000MBPS,
ICE_LINK_SPEED_20000MBPS,
ICE_LINK_SPEED_25000MBPS,
ICE_LINK_SPEED_40000MBPS,
ICE_LINK_SPEED_50000MBPS,
ICE_LINK_SPEED_100000MBPS, /* BIT(10) */
ICE_LINK_SPEED_UNKNOWN,
ICE_LINK_SPEED_UNKNOWN,
ICE_LINK_SPEED_UNKNOWN,
ICE_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
* ice_get_link_speed - get integer speed from table
* @index: array index from fls(aq speed) - 1
*
* Returns: u32 value containing integer speed
*/
u32 ice_get_link_speed(u16 index)
{
return ice_aq_to_link_speed[index];
}
/**
* ice_fw_supports_fec_dis_auto
* @hw: pointer to the hardware structure

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -57,7 +57,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
@ -85,7 +84,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
@ -109,9 +107,14 @@ enum ice_status ice_set_mac_type(struct ice_hw *hw);
*/
#define ICE_ALIGN(ptr, align) (((ptr) + ((align) - 1)) & ~((align) - 1))
#define ice_arr_elem_idx(idx, val) [(idx)] = (val)
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status
ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
enum ice_status
ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
@ -159,6 +162,8 @@ extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
enum ice_status
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
@ -240,6 +245,7 @@ enum ice_status
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
u32 ice_get_link_speed(u16 index);
enum ice_status
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
@ -255,9 +261,11 @@ enum ice_status
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
u8 *active_option_idx, bool *active_option_valid);
u8 *active_option_idx, bool *active_option_valid,
u8 *pending_option_idx, bool *pending_option_valid);
enum ice_status
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info);
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
enum ice_status
__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data);
enum ice_status

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -1470,14 +1470,14 @@ struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
return bld;
}
static bool ice_is_gtp_u_profile(u16 prof_idx)
static bool ice_is_gtp_u_profile(u32 prof_idx)
{
return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) ||
prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
}
static bool ice_is_gtp_c_profile(u16 prof_idx)
static bool ice_is_gtp_c_profile(u32 prof_idx)
{
switch (prof_idx) {
case ICE_PROFID_IPV4_GTPC_TEID:

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -30,8 +30,8 @@
*/
/*$FreeBSD$*/
#ifndef _ICE_DDP_H_
#define _ICE_DDP_H_
#ifndef _ICE_DDP_COMMON_H_
#define _ICE_DDP_COMMON_H_
#include "ice_osdep.h"
#include "ice_adminq_cmd.h"
@ -475,4 +475,4 @@ void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
#endif /* _ICE_DDP_H_ */
#endif /* _ICE_DDP_COMMON_H_ */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
const char ice_driver_version[] = "1.37.7-k";
const char ice_driver_version[] = "1.37.11-k";
const uint8_t ice_major_version = 1;
const uint8_t ice_minor_version = 37;
const uint8_t ice_patch_version = 7;
const uint8_t ice_patch_version = 11;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
PVID(vendor, devid, name " - 1.37.7-k")
PVID(vendor, devid, name " - 1.37.11-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.7-k")
PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.11-k")
/**
* @var ice_vendor_info_array

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -1368,6 +1368,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *p, *t;
enum ice_status status = ICE_SUCCESS;
u16 vsig;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@ -1377,7 +1378,16 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
l_entry)
l_entry) {
int ret;
/* check if vsig is already removed */
ret = ice_vsig_find_vsi(hw, blk,
ice_get_hw_vsi_num(hw, vsi_handle),
&vsig);
if (!ret && !vsig)
break;
if (ice_is_bit_set(p->vsis, vsi_handle)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
if (status)
@ -1389,6 +1399,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
break;
}
}
}
ice_release_lock(&hw->rss_locks);
return status;

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -804,6 +804,8 @@ enum ice_rx_flex_desc_exstat_bits {
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TXQ_CTX_SIZE_DWORDS 10
#define ICE_TXQ_CTX_SZ (ICE_TXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
@ -1065,6 +1067,7 @@ struct ice_tlan_ctx {
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
u16 tail;
};
/* LAN Tx Completion Queue data */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -580,16 +580,23 @@ ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
MPASS(vsi->rx_qmap != NULL);
/* TODO:
* Handle scattered queues (for VFs)
*/
if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS)
switch (vsi->qmap_type) {
case ICE_RESMGR_ALLOC_CONTIGUOUS:
ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG);
ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]);
ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues);
break;
case ICE_RESMGR_ALLOC_SCATTERED:
ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_NONCONTIG);
for (int i = 0; i < vsi->num_rx_queues; i++)
ctx->info.q_mapping[i] = CPU_TO_LE16(vsi->rx_qmap[i]);
break;
default:
return (EOPNOTSUPP);
ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG);
ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]);
ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues);
}
/* Calculate the next power-of-2 of number of queues */
if (vsi->num_rx_queues)
@ -1219,50 +1226,88 @@ ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
}
/**
* ice_configure_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts
* ice_configure_rxq_interrupt - Configure HW Rx queue for an MSI-X interrupt
* @hw: ice hw structure
* @rxqid: Rx queue index in PF space
* @vector: MSI-X vector index in PF/VF space
* @itr_idx: ITR index to use for interrupt
*
* @remark ice_flush() may need to be called after this
*/
void
ice_configure_rxq_interrupt(struct ice_hw *hw, u16 rxqid, u16 vector, u8 itr_idx)
{
u32 val;
MPASS(itr_idx <= ICE_ITR_NONE);
val = (QINT_RQCTL_CAUSE_ENA_M |
(itr_idx << QINT_RQCTL_ITR_INDX_S) |
(vector << QINT_RQCTL_MSIX_INDX_S));
wr32(hw, QINT_RQCTL(rxqid), val);
}
/**
* ice_configure_all_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts
* @vsi: the VSI to configure
*
* Called when setting up MSI-X interrupts to configure the Rx hardware queues.
*/
void
ice_configure_rxq_interrupts(struct ice_vsi *vsi)
ice_configure_all_rxq_interrupts(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->sc->hw;
int i;
for (i = 0; i < vsi->num_rx_queues; i++) {
struct ice_rx_queue *rxq = &vsi->rx_queues[i];
u32 val;
val = (QINT_RQCTL_CAUSE_ENA_M |
(ICE_RX_ITR << QINT_RQCTL_ITR_INDX_S) |
(rxq->irqv->me << QINT_RQCTL_MSIX_INDX_S));
wr32(hw, QINT_RQCTL(vsi->rx_qmap[rxq->me]), val);
ice_configure_rxq_interrupt(hw, vsi->rx_qmap[rxq->me],
rxq->irqv->me, ICE_RX_ITR);
}
ice_flush(hw);
}
/**
* ice_configure_txq_interrupts - Configure HW Tx queues for MSI-X interrupts
* ice_configure_txq_interrupt - Configure HW Tx queue for an MSI-X interrupt
* @hw: ice hw structure
* @txqid: Tx queue index in PF space
* @vector: MSI-X vector index in PF/VF space
* @itr_idx: ITR index to use for interrupt
*
* @remark ice_flush() may need to be called after this
*/
void
ice_configure_txq_interrupt(struct ice_hw *hw, u16 txqid, u16 vector, u8 itr_idx)
{
u32 val;
MPASS(itr_idx <= ICE_ITR_NONE);
val = (QINT_TQCTL_CAUSE_ENA_M |
(itr_idx << QINT_TQCTL_ITR_INDX_S) |
(vector << QINT_TQCTL_MSIX_INDX_S));
wr32(hw, QINT_TQCTL(txqid), val);
}
/**
* ice_configure_all_txq_interrupts - Configure HW Tx queues for MSI-X interrupts
* @vsi: the VSI to configure
*
* Called when setting up MSI-X interrupts to configure the Tx hardware queues.
*/
void
ice_configure_txq_interrupts(struct ice_vsi *vsi)
ice_configure_all_txq_interrupts(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->sc->hw;
int i;
for (i = 0; i < vsi->num_tx_queues; i++) {
struct ice_tx_queue *txq = &vsi->tx_queues[i];
u32 val;
val = (QINT_TQCTL_CAUSE_ENA_M |
(ICE_TX_ITR << QINT_TQCTL_ITR_INDX_S) |
(txq->irqv->me << QINT_TQCTL_MSIX_INDX_S));
wr32(hw, QINT_TQCTL(vsi->tx_qmap[txq->me]), val);
ice_configure_txq_interrupt(hw, vsi->tx_qmap[txq->me],
txq->irqv->me, ICE_TX_ITR);
}
ice_flush(hw);
@ -1277,7 +1322,7 @@ ice_configure_txq_interrupts(struct ice_vsi *vsi)
* queue disable logic to dissociate the Rx queue from the interrupt.
*
* Note: this function must be called prior to disabling Rx queues with
* ice_control_rx_queues, otherwise the Rx queue may not be disabled properly.
* ice_control_all_rx_queues, otherwise the Rx queue may not be disabled properly.
*/
void
ice_flush_rxq_interrupts(struct ice_vsi *vsi)
@ -1413,7 +1458,6 @@ ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->pf_num = hw->pf_id;
/* For now, we only have code supporting PF VSIs */
switch (vsi->type) {
case ICE_VSI_PF:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
@ -1648,7 +1692,66 @@ ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg)
}
/**
* ice_control_rx_queues - Configure hardware to start or stop the Rx queues
* ice_control_rx_queue - Configure hardware to start or stop an Rx queue
* @vsi: VSI containing queue to enable/disable
* @qidx: Queue index in VSI space
* @enable: true to enable queue, false to disable
*
* Control the Rx queue through the QRX_CTRL register, enabling or disabling
* it. Wait for the appropriate time to ensure that the queue has actually
* reached the expected state.
*/
int
ice_control_rx_queue(struct ice_vsi *vsi, u16 qidx, bool enable)
{
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
u32 qrx_ctrl = 0;
int err;
struct ice_rx_queue *rxq = &vsi->rx_queues[qidx];
int pf_q = vsi->rx_qmap[rxq->me];
err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl);
if (err) {
device_printf(dev,
"Rx queue %d is not ready\n",
pf_q);
return err;
}
/* Skip if the queue is already in correct state */
if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M))
return (0);
if (enable)
qrx_ctrl |= QRX_CTRL_QENA_REQ_M;
else
qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), qrx_ctrl);
/* wait for the queue to finalize the request */
err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl);
if (err) {
device_printf(dev,
"Rx queue %d %sable timeout\n",
pf_q, (enable ? "en" : "dis"));
return err;
}
/* this should never happen */
if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) {
device_printf(dev,
"Rx queue %d invalid state\n",
pf_q);
return (EDOOFUS);
}
return (0);
}
/**
* ice_control_all_rx_queues - Configure hardware to start or stop the Rx queues
* @vsi: VSI to enable/disable queues
* @enable: true to enable queues, false to disable
*
@ -1657,11 +1760,8 @@ ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg)
* reached the expected state.
*/
int
ice_control_rx_queues(struct ice_vsi *vsi, bool enable)
ice_control_all_rx_queues(struct ice_vsi *vsi, bool enable)
{
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
u32 qrx_ctrl = 0;
int i, err;
/* TODO: amortize waits by changing all queues up front and then
@ -1669,43 +1769,9 @@ ice_control_rx_queues(struct ice_vsi *vsi, bool enable)
* when we have a large number of queues.
*/
for (i = 0; i < vsi->num_rx_queues; i++) {
struct ice_rx_queue *rxq = &vsi->rx_queues[i];
int pf_q = vsi->rx_qmap[rxq->me];
err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl);
if (err) {
device_printf(dev,
"Rx queue %d is not ready\n",
pf_q);
return err;
}
/* Skip if the queue is already in correct state */
if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M))
continue;
if (enable)
qrx_ctrl |= QRX_CTRL_QENA_REQ_M;
else
qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), qrx_ctrl);
/* wait for the queue to finalize the request */
err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl);
if (err) {
device_printf(dev,
"Rx queue %d %sable timeout\n",
pf_q, (enable ? "en" : "dis"));
return err;
}
/* this should never happen */
if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) {
device_printf(dev,
"Rx queue %d invalid state\n",
pf_q);
return (EDOOFUS);
}
err = ice_control_rx_queue(vsi, i, enable);
if (err)
break;
}
return (0);
@ -4745,7 +4811,7 @@ ice_add_sysctls_mac_pfc_one_stat(struct sysctl_ctx_list *ctx,
namebuf = sbuf_new_auto();
descbuf = sbuf_new_auto();
for (int i = 0; i < ICE_MAX_DCB_TCS; i++) {
for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
sbuf_clear(namebuf);
sbuf_clear(descbuf);
@ -5133,6 +5199,58 @@ ice_sync_multicast_filters(struct ice_softc *sc)
return (err);
}
/**
* ice_add_vlan_hw_filters - Add multiple VLAN filters for a given VSI
* @vsi: The VSI to add the filter for
* @vid: array of VLAN ids to add
* @length: length of vid array
*
* Programs HW filters so that the given VSI will receive the specified VLANs.
*/
enum ice_status
ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
{
struct ice_hw *hw = &vsi->sc->hw;
struct ice_list_head vlan_list;
struct ice_fltr_list_entry *vlan_entries;
enum ice_status status;
MPASS(length > 0);
INIT_LIST_HEAD(&vlan_list);
vlan_entries = (struct ice_fltr_list_entry *)
malloc(sizeof(*vlan_entries) * length, M_ICE, M_NOWAIT | M_ZERO);
if (!vlan_entries)
return (ICE_ERR_NO_MEMORY);
for (u16 i = 0; i < length; i++) {
vlan_entries[i].fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
vlan_entries[i].fltr_info.fltr_act = ICE_FWD_TO_VSI;
vlan_entries[i].fltr_info.flag = ICE_FLTR_TX;
vlan_entries[i].fltr_info.src_id = ICE_SRC_ID_VSI;
vlan_entries[i].fltr_info.vsi_handle = vsi->idx;
vlan_entries[i].fltr_info.l_data.vlan.vlan_id = vid[i];
LIST_ADD(&vlan_entries[i].list_entry, &vlan_list);
}
status = ice_add_vlan(hw, &vlan_list);
if (!status)
goto done;
device_printf(vsi->sc->dev, "Failed to add VLAN filters:\n");
for (u16 i = 0; i < length; i++) {
device_printf(vsi->sc->dev,
"- vlan %d, status %d\n",
vlan_entries[i].fltr_info.l_data.vlan.vlan_id,
vlan_entries[i].status);
}
done:
free(vlan_entries, M_ICE);
return (status);
}
/**
* ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI
* @vsi: The VSI to add the filter for
@ -5142,29 +5260,65 @@ ice_sync_multicast_filters(struct ice_softc *sc)
*/
enum ice_status
ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
{
return ice_add_vlan_hw_filters(vsi, &vid, 1);
}
/**
* ice_remove_vlan_hw_filters - Remove multiple VLAN filters for a given VSI
* @vsi: The VSI to remove the filters from
* @vid: array of VLAN ids to remove
* @length: length of vid array
*
* Removes previously programmed HW filters for the specified VSI.
*/
enum ice_status
ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
{
struct ice_hw *hw = &vsi->sc->hw;
struct ice_list_head vlan_list;
struct ice_fltr_list_entry vlan_entry;
struct ice_fltr_list_entry *vlan_entries;
enum ice_status status;
MPASS(length > 0);
INIT_LIST_HEAD(&vlan_list);
memset(&vlan_entry, 0, sizeof(vlan_entry));
vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
vlan_entry.fltr_info.flag = ICE_FLTR_TX;
vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI;
vlan_entry.fltr_info.vsi_handle = vsi->idx;
vlan_entry.fltr_info.l_data.vlan.vlan_id = vid;
vlan_entries = (struct ice_fltr_list_entry *)
malloc(sizeof(*vlan_entries) * length, M_ICE, M_NOWAIT | M_ZERO);
if (!vlan_entries)
return (ICE_ERR_NO_MEMORY);
LIST_ADD(&vlan_entry.list_entry, &vlan_list);
for (u16 i = 0; i < length; i++) {
vlan_entries[i].fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
vlan_entries[i].fltr_info.fltr_act = ICE_FWD_TO_VSI;
vlan_entries[i].fltr_info.flag = ICE_FLTR_TX;
vlan_entries[i].fltr_info.src_id = ICE_SRC_ID_VSI;
vlan_entries[i].fltr_info.vsi_handle = vsi->idx;
vlan_entries[i].fltr_info.l_data.vlan.vlan_id = vid[i];
return ice_add_vlan(hw, &vlan_list);
LIST_ADD(&vlan_entries[i].list_entry, &vlan_list);
}
status = ice_remove_vlan(hw, &vlan_list);
if (!status)
goto done;
device_printf(vsi->sc->dev, "Failed to remove VLAN filters:\n");
for (u16 i = 0; i < length; i++) {
device_printf(vsi->sc->dev,
"- vlan %d, status %d\n",
vlan_entries[i].fltr_info.l_data.vlan.vlan_id,
vlan_entries[i].status);
}
done:
free(vlan_entries, M_ICE);
return (status);
}
/**
* ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI
* @vsi: The VSI to add the filter for
* @vsi: The VSI to remove the filter from
* @vid: VLAN to remove
*
* Removes a previously programmed HW filter for the specified VSI.
@ -5172,23 +5326,7 @@ ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
enum ice_status
ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
{
struct ice_hw *hw = &vsi->sc->hw;
struct ice_list_head vlan_list;
struct ice_fltr_list_entry vlan_entry;
INIT_LIST_HEAD(&vlan_list);
memset(&vlan_entry, 0, sizeof(vlan_entry));
vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
vlan_entry.fltr_info.flag = ICE_FLTR_TX;
vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI;
vlan_entry.fltr_info.vsi_handle = vsi->idx;
vlan_entry.fltr_info.l_data.vlan.vlan_id = vid;
LIST_ADD(&vlan_entry.list_entry, &vlan_list);
return ice_remove_vlan(hw, &vlan_list);
return ice_remove_vlan_hw_filters(vsi, &vid, 1);
}
#define ICE_SYSCTL_HELP_RX_ITR \
@ -8203,7 +8341,7 @@ ice_stop_pf_vsi(struct ice_softc *sc)
/* Disable the Tx and Rx queues */
ice_vsi_disable_tx(&sc->pf_vsi);
ice_control_rx_queues(&sc->pf_vsi, false);
ice_control_all_rx_queues(&sc->pf_vsi, false);
}
/**

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -156,8 +156,6 @@ struct ice_bar_info {
#define ICE_MSIX_BAR 3
#define ICE_MAX_DCB_TCS 8
#define ICE_DEFAULT_DESC_COUNT 1024
#define ICE_MAX_DESC_COUNT 8160
#define ICE_MIN_DESC_COUNT 64
@ -261,6 +259,10 @@ struct ice_bar_info {
*/
#define ICE_MIN_MTU 112
/*
* The default number of queues reserved for a VF is 4, according to the
* AVF Base Mode specification.
*/
#define ICE_DEFAULT_VF_QUEUES 4
/*
@ -826,13 +828,16 @@ uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi);
int ice_get_phy_type_low(uint64_t phy_type_low);
int ice_get_phy_type_high(uint64_t phy_type_high);
enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
void ice_configure_rxq_interrupts(struct ice_vsi *vsi);
void ice_configure_txq_interrupts(struct ice_vsi *vsi);
void ice_configure_rxq_interrupt(struct ice_hw *hw, u16 rxqid, u16 vector, u8 itr_idx);
void ice_configure_all_rxq_interrupts(struct ice_vsi *vsi);
void ice_configure_txq_interrupt(struct ice_hw *hw, u16 txqid, u16 vector, u8 itr_idx);
void ice_configure_all_txq_interrupts(struct ice_vsi *vsi);
void ice_flush_rxq_interrupts(struct ice_vsi *vsi);
void ice_flush_txq_interrupts(struct ice_vsi *vsi);
int ice_cfg_vsi_for_tx(struct ice_vsi *vsi);
int ice_cfg_vsi_for_rx(struct ice_vsi *vsi);
int ice_control_rx_queues(struct ice_vsi *vsi, bool enable);
int ice_control_rx_queue(struct ice_vsi *vsi, u16 qidx, bool enable);
int ice_control_all_rx_queues(struct ice_vsi *vsi, bool enable);
int ice_cfg_pf_default_mac_filters(struct ice_softc *sc);
int ice_rm_pf_default_mac_filters(struct ice_softc *sc);
void ice_print_nvm_version(struct ice_softc *sc);
@ -851,7 +856,11 @@ void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct ice_hw_port_stats *stats);
void ice_configure_misc_interrupts(struct ice_softc *sc);
int ice_sync_multicast_filters(struct ice_softc *sc);
enum ice_status ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
enum ice_status ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent);
void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -982,43 +982,67 @@ static enum ice_status
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
struct ice_orom_civd_info tmp;
u8 *orom_data;
enum ice_status status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
* A simple modulo 256 sum of all of the bytes of the structure must
* equal 0.
*
* The exact location is unknown and varies between images but is
* usually somewhere in the middle of the bank. We need to scan the
* Option ROM bank to locate it.
*
* It's significantly faster to read the entire Option ROM up front
* using the maximum page size, than to read each possible location
* with a separate firmware command.
*/
orom_data = (u8 *)ice_calloc(hw, hw->flash.banks.orom_size, sizeof(u8));
if (!orom_data)
return ICE_ERR_NO_MEMORY;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
orom_data, hw->flash.banks.orom_size);
if (status) {
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
return status;
}
/* Scan the memory buffer to locate the CIVD data section */
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
enum ice_status status;
struct ice_orom_civd_info *tmp;
u8 sum = 0, i;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
offset, (u8 *)&tmp, sizeof(tmp));
if (status) {
ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n");
return status;
}
tmp = (struct ice_orom_civd_info *)&orom_data[offset];
/* Skip forward until we find a matching signature */
if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0)
if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0)
continue;
ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n",
offset);
/* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(tmp); i++)
sum += ((u8 *)&tmp)[i];
for (i = 0; i < sizeof(*tmp); i++)
sum += ((u8 *)tmp)[i];
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
return ICE_ERR_NVM;
goto err_invalid_checksum;
}
*civd = tmp;
*civd = *tmp;
ice_free(hw, orom_data);
return ICE_SUCCESS;
}
ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
err_invalid_checksum:
ice_free(hw, orom_data);
return ICE_ERR_NVM;
}

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -72,6 +72,8 @@ void ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
void ice_info_fwlog(struct ice_hw *hw, uint32_t rowsize, uint32_t groupsize,
uint8_t *buf, size_t len);
#define ice_fls(_n) flsl(_n)
#define ice_info(_hw, _fmt, args...) \
device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -76,6 +76,7 @@ enum ice_protocol_type {
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PPPOE,
ICE_L2TPV3,
ICE_PROTOCOL_LAST
};
@ -91,30 +92,8 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
* and GENEVE
*/
ICE_SW_TUN_IPV4_GTP_IPV4_TCP,
ICE_SW_TUN_IPV4_GTP_IPV4_UDP,
ICE_SW_TUN_IPV4_GTP_IPV6_TCP,
ICE_SW_TUN_IPV4_GTP_IPV6_UDP,
ICE_SW_TUN_IPV6_GTP_IPV4_TCP,
ICE_SW_TUN_IPV6_GTP_IPV4_UDP,
ICE_SW_TUN_IPV6_GTP_IPV6_TCP,
ICE_SW_TUN_IPV6_GTP_IPV6_UDP,
/* following adds support for GTP, just using inner protocols,
* outer L3 and L4 protocols can be anything
*/
ICE_SW_TUN_GTP_IPV4_TCP,
ICE_SW_TUN_GTP_IPV4_UDP,
ICE_SW_TUN_GTP_IPV6_TCP,
ICE_SW_TUN_GTP_IPV6_UDP,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
ICE_SW_TUN_IPV4_GTPU_IPV4,
ICE_SW_TUN_IPV4_GTPU_IPV6,
ICE_SW_TUN_IPV6_GTPU_IPV4,
ICE_SW_TUN_IPV6_GTPU_IPV6,
ICE_SW_TUN_GTP_IPV4,
ICE_SW_TUN_GTP_IPV6,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@ -185,6 +164,8 @@ enum ice_prot_id {
#define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53
#define ICE_SCTP_IL_HW 96
#define ICE_PPPOE_HW 103
#define ICE_L2TPV3_HW 104
/* ICE_UDP_OF is used to identify all 3 tunnel types
* VXLAN, GENEVE and VXLAN_GPE. To differentiate further
@ -192,8 +173,7 @@ enum ice_prot_id {
*/
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_GRE_OF_HW 64 /* NVGRE */
#define ICE_PPPOE_HW 103
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 20
@ -201,9 +181,14 @@ enum ice_prot_id {
(ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word)))
#define ICE_TUN_FLAG_MASK 0xFF
#define ICE_DIR_FLAG_MASK 0x10
#define ICE_TUN_FLAG_IN_VLAN_MASK 0x80 /* VLAN inside tunneled header */
#define ICE_TUN_FLAG_VLAN_MASK 0x01
#define ICE_TUN_FLAG_FV_IND 2
#define ICE_VLAN_FLAG_MDID 20
#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
#define ICE_PROTOCOL_MAX_ENTRIES 16
/* Mapping of software defined protocol ID to hardware defined protocol ID */
@ -306,6 +291,11 @@ struct ice_pppoe_hdr {
__be16 ppp_prot_id; /* control and data only */
};
struct ice_l2tpv3_sess_hdr {
__be32 session_id;
__be64 cookie;
};
struct ice_nvgre {
__be16 flags;
__be16 protocol;
@ -324,6 +314,7 @@ union ice_prot_hdr {
struct ice_nvgre nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
};
/* This is mapping table entry that maps every word within a given protocol

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -239,8 +239,10 @@ struct ice_adv_rule_info {
struct ice_sw_act_ctrl sw_act;
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u8 add_dir_lkup;
u16 fltr_rule_id;
u16 lg_id;
u16 vlan_type;
struct ice_adv_rule_flags_info flags_info;
};
@ -490,8 +492,7 @@ ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
bool res_shared, u16 *desc_id, struct ice_sq_cd *cd);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list);
enum ice_status
ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list);
void ice_rem_all_sw_rules_info(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -945,11 +945,11 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
/* PHY configuration */
enum ice_phy_cfg {
ICE_PHY_E810 = 1,
/* PHY model */
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
ICE_PHY_E810 = 1,
ICE_PHY_E822,
ICE_PHY_ETH56G,
};
/* Port hardware description */
@ -975,7 +975,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
enum ice_phy_cfg phy_cfg;
enum ice_phy_model phy_model;
u16 max_burst_size; /* driver sets this value */

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -700,14 +700,11 @@ ice_update_link_status(struct ice_softc *sc, bool update_media)
ice_rdma_link_change(sc, LINK_STATE_UP, baudrate);
ice_link_up_msg(sc);
update_media = true;
} else { /* link is down */
iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0);
ice_rdma_link_change(sc, LINK_STATE_DOWN, 0);
update_media = true;
}
update_media = true;
}
/* Update the supported media types */
@ -718,8 +715,6 @@ ice_update_link_status(struct ice_softc *sc, bool update_media)
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}
/* TODO: notify VFs of link state change */
}
/**
@ -1965,7 +1960,7 @@ ice_if_init(if_ctx_t ctx)
goto err_cleanup_tx;
}
err = ice_control_rx_queues(&sc->pf_vsi, true);
err = ice_control_all_rx_queues(&sc->pf_vsi, true);
if (err) {
device_printf(dev,
"Unable to enable Rx rings for transmit: %s\n",
@ -1984,7 +1979,7 @@ ice_if_init(if_ctx_t ctx)
/* We use software interrupts for Tx, so we only program the hardware
* interrupts for Rx.
*/
ice_configure_rxq_interrupts(&sc->pf_vsi);
ice_configure_all_rxq_interrupts(&sc->pf_vsi);
ice_configure_rx_itr(&sc->pf_vsi);
/* Configure promiscuous mode */
@ -1996,7 +1991,7 @@ ice_if_init(if_ctx_t ctx)
return;
err_stop_rx:
ice_control_rx_queues(&sc->pf_vsi, false);
ice_control_all_rx_queues(&sc->pf_vsi, false);
err_cleanup_tx:
ice_vsi_disable_tx(&sc->pf_vsi);
}
@ -2902,7 +2897,7 @@ ice_if_stop(if_ctx_t ctx)
/* Disable the Tx and Rx queues */
ice_vsi_disable_tx(&sc->pf_vsi);
ice_control_rx_queues(&sc->pf_vsi, false);
ice_control_all_rx_queues(&sc->pf_vsi, false);
}
/**

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022, Intel Corporation
# Copyright (c) 2023, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022, Intel Corporation
# Copyright (c) 2023, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -202,6 +202,9 @@ enum virtchnl_ops {
VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111,
VIRTCHNL_OP_CONFIG_QUEUE_BW = 112,
VIRTCHNL_OP_CONFIG_QUANTA = 113,
VIRTCHNL_OP_FLOW_SUBSCRIBE = 114,
VIRTCHNL_OP_FLOW_UNSUBSCRIBE = 115,
/* opcode 116 through 128 are reserved */
VIRTCHNL_OP_MAX,
};
@ -300,6 +303,10 @@ static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode)
return "VIRTCHNL_OP_DISABLE_QUEUES_V2";
case VIRTCHNL_OP_MAP_QUEUE_VECTOR:
return "VIRTCHNL_OP_MAP_QUEUE_VECTOR";
case VIRTCHNL_OP_FLOW_SUBSCRIBE:
return "VIRTCHNL_OP_FLOW_SUBSCRIBE";
case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
return "VIRTCHNL_OP_FLOW_UNSUBSCRIBE";
case VIRTCHNL_OP_MAX:
return "VIRTCHNL_OP_MAX";
default:
@ -436,6 +443,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* BIT(8) is reserved */
#define VIRTCHNL_VF_LARGE_NUM_QPAIRS BIT(9)
#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
#define VIRTCHNL_VF_OFFLOAD_FSUB_PF BIT(14)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
@ -1438,6 +1446,7 @@ enum virtchnl_vfr_states {
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK 16
#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) \
@ -1630,6 +1639,22 @@ struct virtchnl_proto_hdr {
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
struct virtchnl_proto_hdr_w_msk {
/* see enum virtchnl_proto_hdr_type */
s32 type;
u32 pad;
/**
* binary buffer in network order for specific header type.
* For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
* header is expected to be copied into the buffer.
*/
u8 buffer_spec[64];
/* binary buffer for bit-mask applied to specific header type */
u8 buffer_mask[64];
};
VIRTCHNL_CHECK_STRUCT_LEN(136, virtchnl_proto_hdr_w_msk);
struct virtchnl_proto_hdrs {
u8 tunnel_level;
/**
@ -1642,12 +1667,18 @@ struct virtchnl_proto_hdrs {
*/
int count;
/**
* number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS
* must be 0 for a raw packet request.
* count must <=
* VIRTCHNL_MAX_NUM_PROTO_HDRS + VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK
* count = 0 : select raw
* 1 < count <= VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr
* count > VIRTCHNL_MAX_NUM_PROTO_HDRS : select proto_hdr_w_msk
* last valid index = count - VIRTCHNL_MAX_NUM_PROTO_HDRS
*/
union {
struct virtchnl_proto_hdr
proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
struct virtchnl_proto_hdr_w_msk
proto_hdr_w_msk[VIRTCHNL_MAX_NUM_PROTO_HDRS_W_MSK];
struct {
u16 pkt_len;
u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET];
@ -1668,7 +1699,7 @@ struct virtchnl_rss_cfg {
VIRTCHNL_CHECK_STRUCT_LEN(2444, virtchnl_rss_cfg);
/* action configuration for FDIR */
/* action configuration for FDIR and FSUB */
struct virtchnl_filter_action {
/* see enum virtchnl_action type */
s32 type;
@ -1786,6 +1817,66 @@ struct virtchnl_fdir_del {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
/* Status returned to VF after VF requests FSUB commands
* VIRTCHNL_FSUB_SUCCESS
* VF FLOW related request is successfully done by PF
* The request can be OP_FLOW_SUBSCRIBE/UNSUBSCRIBE.
*
* VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE
* OP_FLOW_SUBSCRIBE request is failed due to no Hardware resource.
*
* VIRTCHNL_FSUB_FAILURE_RULE_EXIST
* OP_FLOW_SUBSCRIBE request is failed due to the rule is already existed.
*
* VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST
* OP_FLOW_UNSUBSCRIBE request is failed due to this rule doesn't exist.
*
* VIRTCHNL_FSUB_FAILURE_RULE_INVALID
* OP_FLOW_SUBSCRIBE request is failed due to parameters validation
* or HW doesn't support.
*/
enum virtchnl_fsub_prgm_status {
VIRTCHNL_FSUB_SUCCESS = 0,
VIRTCHNL_FSUB_FAILURE_RULE_NORESOURCE,
VIRTCHNL_FSUB_FAILURE_RULE_EXIST,
VIRTCHNL_FSUB_FAILURE_RULE_NONEXIST,
VIRTCHNL_FSUB_FAILURE_RULE_INVALID,
};
/* VIRTCHNL_OP_FLOW_SUBSCRIBE
* VF sends this request to PF by filling out vsi_id,
* validate_only, priority, proto_hdrs and actions.
* PF will return flow_id
* if the request is successfully done and return status to VF.
*/
struct virtchnl_flow_sub {
u16 vsi_id; /* INPUT */
u8 validate_only; /* INPUT */
/* 0 is the highest priority; INPUT */
u8 priority;
u32 flow_id; /* OUTPUT */
struct virtchnl_proto_hdrs proto_hdrs; /* INPUT */
struct virtchnl_filter_action_set actions; /* INPUT */
/* see enum virtchnl_fsub_prgm_status; OUTPUT */
s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_flow_sub);
/* VIRTCHNL_OP_FLOW_UNSUBSCRIBE
* VF sends this request to PF by filling out vsi_id
* and flow_id. PF will return status to VF.
*/
struct virtchnl_flow_unsub {
u16 vsi_id; /* INPUT */
u16 pad;
u32 flow_id; /* INPUT */
/* see enum virtchnl_fsub_prgm_status; OUTPUT */
s32 status;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_flow_unsub);
/* VIRTCHNL_OP_GET_QOS_CAPS
* VF sends this message to get its QoS Caps, such as
* TC number, Arbiter and Bandwidth.
@ -2172,6 +2263,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_FDIR_FILTER:
valid_len = sizeof(struct virtchnl_fdir_del);
break;
case VIRTCHNL_OP_FLOW_SUBSCRIBE:
valid_len = sizeof(struct virtchnl_flow_sub);
break;
case VIRTCHNL_OP_FLOW_UNSUBSCRIBE:
valid_len = sizeof(struct virtchnl_flow_unsub);
break;
case VIRTCHNL_OP_GET_QOS_CAPS:
break;
case VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP:

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright (c) 2022, Intel Corporation
/* Copyright (c) 2023, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without