ixl(4): Update to 1.9.9-k

Refresh upstream driver before impending conversion to iflib.

Major changes:

- Support for descriptor writeback mode (required by ixlv(4) for AVF support)
- Ability to disable firmware LLDP agent by user (PR 221530)
- Fix for TX queue hang when using TSO (PR 221919)
- Separate descriptor ring sizes for TX and RX rings

PR:		221530, 221919
Submitted by:	Krzysztof Galazka <krzysztof.galazka@intel.com>
Reviewed by:	#IntelNetworking
MFC after:	1 day
Relnotes:	Yes
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D14985
This commit is contained in:
Eric Joyner 2018-05-01 18:50:12 +00:00
parent 7631477269
commit ceebc2f348
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=333149
40 changed files with 5493 additions and 1890 deletions

View File

@ -290,6 +290,8 @@ dev/ixl/i40e_nvm.c optional ixl pci | ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_adminq.c optional ixl pci | ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/i40e_dcb.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/fdc/fdc.c optional fdc
dev/fdc/fdc_acpi.c optional fdc
dev/fdc/fdc_isa.c optional fdc isa

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -644,6 +644,24 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if ((hw->aq.api_maj_ver > 1) ||
((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver >= 7)))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
if (hw->mac.type == I40E_MAC_XL710 &&
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
}
/* Newer versions of firmware require lock when reading the NVM */
if ((hw->aq.api_maj_ver > 1) ||
((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver >= 5)))
hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@ -899,8 +917,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
i40e_msec_delay(1);
total_delay++;
i40e_usec_delay(50);
total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@ -941,10 +959,15 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: AQ Critical error.\n");
status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
} else {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Writeback timeout.\n");
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
}
}
asq_send_command_error:
@ -1007,9 +1030,9 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
/* set next_to_use to head */
if (!i40e_is_vf(hw))
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (i40e_is_vf(hw))
ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
else
ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@ -1067,7 +1090,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -159,7 +159,7 @@ static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -41,8 +41,17 @@
* This file needs to comply with the Linux Kernel coding style.
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR 0x0005
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
I40E_FW_API_VERSION_MINOR_X722)
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
struct i40e_aq_desc {
__le16 flags;
@ -202,6 +211,7 @@ enum i40e_admin_queue_opc {
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
i40e_aqc_opc_set_dcb_parameters = 0x0303,
/* TX scheduler */
i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
@ -241,6 +251,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
i40e_aqc_opc_run_phy_activity = 0x0626,
i40e_aqc_opc_set_phy_register = 0x0628,
i40e_aqc_opc_get_phy_register = 0x0629,
/* NVM commands */
i40e_aqc_opc_nvm_read = 0x0701,
@ -248,6 +260,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_nvm_update = 0x0703,
i40e_aqc_opc_nvm_config_read = 0x0704,
i40e_aqc_opc_nvm_config_write = 0x0705,
i40e_aqc_opc_nvm_progress = 0x0706,
i40e_aqc_opc_oem_post_update = 0x0720,
i40e_aqc_opc_thermal_sensor = 0x0721,
@ -771,8 +784,52 @@ struct i40e_aqc_set_switch_config {
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
u8 reserved[12];
/* The ethertype in switch_tag is dropped on ingress and used
* internally by the switch. Set this to zero for the default
* of 0x88a8 (802.1ad). Should be zero for firmware API
* versions lower than 1.7.
*/
__le16 switch_tag;
/* The ethertypes in first_tag and second_tag are used to
* match the outer and inner VLAN tags (respectively) when HW
* double VLAN tagging is enabled via the set port parameters
* AQ command. Otherwise these are both ignored. Set them to
* zero for their defaults of 0x8100 (802.1Q). Should be zero
* for firmware API versions lower than 1.7.
*/
__le16 first_tag;
__le16 second_tag;
/* Next byte is split into following:
* Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0
* Bit 6 : 0 : Destination Port, 1: source port
* Bit 5..4 : L4 type
* 0: rsvd
* 1: TCP
* 2: UDP
* 3: Both TCP and UDP
* Bits 3:0 Mode
* 0: default mode
* 1: L4 port only mode
* 2: non-tunneled mode
* 3: tunneled mode
*/
#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80
#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40
#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00
#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10
#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20
#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30
#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00
#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01
#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02
#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03
u8 mode;
u8 rsvd5[5];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
@ -1703,6 +1760,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
I40E_PHY_TYPE_10GBASE_AOC = 0xC,
I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_UNRECOGNIZED = 0xE,
I40E_PHY_TYPE_UNSUPPORTED = 0xF,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@ -1721,9 +1780,51 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
I40E_PHY_TYPE_DEFAULT = 0xFF,
};
#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
BIT_ULL(I40E_PHY_TYPE_XAUI) | \
BIT_ULL(I40E_PHY_TYPE_XFI) | \
BIT_ULL(I40E_PHY_TYPE_SFI) | \
BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
@ -1778,6 +1879,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@ -1907,19 +2010,31 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
/* Since firmware API 1.7 loopback field keeps power class info as well */
#define I40E_AQ_LOOPBACK_MASK 0x07
#define I40E_AQ_PWR_CLASS_SHIFT_LB 6
#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 power_desc;
union {
struct {
u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
u8 reserved[4];
};
struct {
u8 link_type[4];
u8 link_type_ext;
};
};
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@ -1956,11 +2071,28 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
__le16 lb_mode;
u8 lb_level;
#define I40E_AQ_LB_NONE 0
#define I40E_AQ_LB_MAC 1
#define I40E_AQ_LB_SERDES 2
#define I40E_AQ_LB_PHY_INT 3
#define I40E_AQ_LB_PHY_EXT 4
#define I40E_AQ_LB_CPVL_PCS 5
#define I40E_AQ_LB_CPVL_EXT 6
#define I40E_AQ_LB_PHY_LOCAL 0x01
#define I40E_AQ_LB_PHY_REMOTE 0x02
#define I40E_AQ_LB_MAC_LOCAL 0x04
u8 reserved[14];
u8 lb_type;
#define I40E_AQ_LB_LOCAL 0
#define I40E_AQ_LB_FAR 0x01
u8 speed;
#define I40E_AQ_LB_SPEED_NONE 0
#define I40E_AQ_LB_SPEED_1G 1
#define I40E_AQ_LB_SPEED_10G 2
#define I40E_AQ_LB_SPEED_40G 3
#define I40E_AQ_LB_SPEED_20G 4
u8 force_speed;
u8 reserved[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
@ -2002,14 +2134,34 @@ struct i40e_aqc_run_phy_activity {
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
/* Set PHY Register command (0x0628) */
/* Get PHY Register command (0x0629) */
struct i40e_aqc_phy_register_access {
u8 phy_interface;
#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_addres;
u8 reserved1[2];
__le32 reg_address;
__le32 reg_value;
u8 reserved2[4];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
/* NVM Read command (indirect 0x0701)
* NVM Erase commands (direct 0x0702)
* NVM Update commands (indirect 0x0703)
*/
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_LAST_CMD 0x01
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03
#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01
u8 module_pointer;
__le16 length;
__le32 offset;
@ -2269,6 +2421,17 @@ struct i40e_aqc_lldp_start {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Set DCB (direct 0x0303) */
struct i40e_aqc_set_dcb_parameters {
u8 command;
#define I40E_AQ_DCB_SET_AGENT 0x1
#define I40E_DCB_VALID 0x1
u8 valid_flags;
u8 reserved[14];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters);
/* Get CEE DCBX Oper Config (0x0A07)
* uses the generic descriptor struct
* returns below as indirect response

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -35,7 +35,7 @@
#include "i40e_type.h"
#include "i40e_adminq.h"
#include "i40e_prototype.h"
#include "i40e_virtchnl.h"
#include "virtchnl.h"
/**
@ -68,7 +68,6 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_X722_A0:
case I40E_DEV_ID_KX_X722:
case I40E_DEV_ID_QSFP_X722:
case I40E_DEV_ID_SFP_X722:
@ -78,11 +77,11 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
case I40E_DEV_ID_X722_A0_VF:
hw->mac.type = I40E_MAC_X722_VF;
break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
case I40E_DEV_ID_ADAPTIVE_VF:
hw->mac.type = I40E_MAC_VF;
break;
default:
@ -298,6 +297,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return "I40E_NOT_SUPPORTED";
case I40E_ERR_FIRMWARE_API_VERSION:
return "I40E_ERR_FIRMWARE_API_VERSION";
case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@ -318,13 +319,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
u16 len = LE16_TO_CPU(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
u16 len;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
len = LE16_TO_CPU(aq_desc->datalen);
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
@ -1008,7 +1011,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
@ -1242,6 +1246,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@ -1324,6 +1330,8 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
* we don't need to do the PF Reset
*/
if (!cnt) {
u32 reg2 = 0;
reg = rd32(hw, I40E_PFGEN_CTRL);
wr32(hw, I40E_PFGEN_CTRL,
(reg | I40E_PFGEN_CTRL_PFSWR_MASK));
@ -1331,6 +1339,12 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
reg2 = rd32(hw, I40E_GLGEN_RSTAT);
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
return I40E_ERR_NOT_READY;
}
i40e_msec_delay(1);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
@ -1519,6 +1533,7 @@ u32 i40e_led_get(struct i40e_hw *hw)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
case I40E_LINK_ACTIVITY:
continue;
default:
break;
@ -1567,6 +1582,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
case I40E_COMBINED_ACTIVITY:
case I40E_FILTER_ACTIVITY:
case I40E_MAC_ACTIVITY:
case I40E_LINK_ACTIVITY:
continue;
default:
break;
@ -1577,9 +1593,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
if (mode == I40E_LINK_ACTIVITY)
blink = FALSE;
if (blink)
gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
else
@ -1609,35 +1622,58 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
if (!abilities)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_abilities);
do {
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_abilities);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
if (abilities_size > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
if (abilities_size > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
if (qualified_modules)
desc.params.external.param0 |=
if (qualified_modules)
desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
if (report_init)
desc.params.external.param0 |=
if (report_init)
desc.params.external.param0 |=
CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
cmd_details);
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
if (status != I40E_SUCCESS)
break;
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
status = I40E_ERR_UNKNOWN_PHY;
break;
} else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
i40e_msec_delay(1);
total_delay++;
status = I40E_ERR_TIMEOUT;
}
} while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
(total_delay < max_delay));
if (status != I40E_SUCCESS)
return status;
if (report_init) {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
if (hw->mac.type == I40E_MAC_XL710 &&
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
} else {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
hw->phy.phy_types |=
((u64)abilities->phy_type_ext << 32);
}
}
return status;
@ -1680,6 +1716,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
/**
* i40e_set_fc
* @hw: pointer to the hw struct
* @aq_failures: buffer to return AdminQ failure information
* @atomic_restart: whether to enable atomic link restart
*
* Set the requested flow control mode using set_phy_config.
**/
@ -1899,7 +1937,7 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
@ -1930,6 +1968,16 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= 7) {
__le32 tmp;
i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
I40E_NONDMA_TO_NONDMA);
hw->phy.phy_types = LE32_TO_CPU(tmp);
hw->phy.phy_types |= ((u64)resp->link_type_ext << 32);
}
/* save link status information */
if (link)
i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
@ -2069,9 +2117,9 @@ enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
*
* Sets loopback modes.
**/
enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
u16 lb_modes,
struct i40e_asq_cmd_details *cmd_details)
enum i40e_status_code
i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_lb_mode *cmd =
@ -2081,7 +2129,11 @@ enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_lb_modes);
cmd->lb_mode = CPU_TO_LE16(lb_modes);
cmd->lb_level = lb_level;
cmd->lb_type = lb_type;
cmd->speed = speed;
if (speed)
cmd->force_speed = 1;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@ -2607,13 +2659,14 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
* i40e_aq_set_switch_config
* @hw: pointer to the hardware structure
* @flags: bit flag values to set
* @mode: cloud filter mode
* @valid_flags: which bit flags to set
* @cmd_details: pointer to command details structure or NULL
*
* Set switch configuration bits
**/
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 flags, u16 valid_flags,
u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@ -2625,7 +2678,12 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
i40e_aqc_opc_set_switch_config);
scfg->flags = CPU_TO_LE16(flags);
scfg->valid_flags = CPU_TO_LE16(valid_flags);
scfg->mode = mode;
if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) {
scfg->switch_tag = CPU_TO_LE16(hw->switch_tag);
scfg->first_tag = CPU_TO_LE16(hw->first_tag);
scfg->second_tag = CPU_TO_LE16(hw->second_tag);
}
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@ -2771,6 +2829,10 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
hw->phy.link_info.req_fec_info =
abilities.fec_cfg_curr_mod_ext_info &
(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
@ -3019,8 +3081,8 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
* @rule_used: Number of rules used in internal switch
* @rule_free: Number of rules free in internal switch
* @rules_used: Number of rules used in internal switch
* @rules_free: Number of rules free in internal switch
*
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
@ -3080,8 +3142,8 @@ static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw,
* @mr_list: list of mirrored VSI SEIDs or VLAN IDs
* @cmd_details: pointer to command details structure or NULL
* @rule_id: Rule ID returned from FW
* @rule_used: Number of rules used in internal switch
* @rule_free: Number of rules free in internal switch
* @rules_used: Number of rules used in internal switch
* @rules_free: Number of rules free in internal switch
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
@ -3111,8 +3173,8 @@ enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
* add_mirrorrule.
* @mr_list: list of mirrored VLAN IDs to be removed
* @cmd_details: pointer to command details structure or NULL
* @rule_used: Number of rules used in internal switch
* @rule_free: Number of rules free in internal switch
* @rules_used: Number of rules used in internal switch
* @rules_free: Number of rules free in internal switch
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
@ -3515,6 +3577,8 @@ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
/**
* i40e_aq_oem_post_update - triggers an OEM specific flow after update
* @hw: pointer to the hw struct
* @buff: buffer for result
* @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
@ -3593,9 +3657,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 valid_functions, num_functions;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
enum i40e_status_code status;
u16 id, ocp_cfg_word0;
u8 major_rev;
u32 i = 0;
u16 id;
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@ -3887,6 +3952,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
hw->num_ports++;
}
/* OCP cards case: if a mezz is removed the ethernet port is at
* disabled state in PRTGEN_CNF register. Additional NVM read is
* needed in order to check if we are dealing with OCP card.
* Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting
* physical ports results in wrong partition id calculation and thus
* not supporting WoL.
*/
if (hw->mac.type == I40E_MAC_X722) {
if (i40e_acquire_nvm(hw, I40E_RESOURCE_READ) == I40E_SUCCESS) {
status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR,
2 * I40E_SR_OCP_CFG_WORD0,
sizeof(ocp_cfg_word0),
&ocp_cfg_word0, TRUE, NULL);
if (status == I40E_SUCCESS &&
(ocp_cfg_word0 & I40E_SR_OCP_ENABLED))
hw->num_ports = 4;
i40e_release_nvm(hw);
}
}
valid_functions = p->valid_functions;
num_functions = 0;
while (valid_functions) {
@ -3964,13 +4049,14 @@ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
* @length: length of the section to be written (in bytes from the offset)
* @data: command buffer (size [bytes] = length)
* @last_command: tells if this is the last command in a series
* @preservation_flags: Preservation mode flags
* @cmd_details: pointer to command details structure or NULL
*
* Update the NVM using the admin queue commands
**/
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@ -3991,6 +4077,16 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
/* If this is the last command in a series, set the proper flag. */
if (last_command)
cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
if (hw->mac.type == I40E_MAC_X722) {
if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED)
cmd->command_flags |=
(I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED <<
I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL)
cmd->command_flags |=
(I40E_AQ_NVM_PRESERVATION_FLAGS_ALL <<
I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT);
}
cmd->module_pointer = module_pointer;
cmd->offset = CPU_TO_LE32(offset);
cmd->length = CPU_TO_LE16(length);
@ -4005,6 +4101,28 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
return status;
}
/**
* i40e_aq_nvm_progress
* @hw: pointer to the hw struct
* @progress: pointer to progress returned from AQ
* @cmd_details: pointer to command details structure or NULL
*
* Gets progress of flash rearrangement process
**/
enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
struct i40e_asq_cmd_details *cmd_details)
{
enum i40e_status_code status;
struct i40e_aq_desc desc;
DEBUGFUNC("i40e_aq_nvm_progress");
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
*progress = desc.params.raw[0];
return status;
}
/**
* i40e_aq_get_lldp_mib
* @hw: pointer to the hw struct
@ -4319,7 +4437,39 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_set_dcb_parameters
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
* @dcb_enable: True if DCB configuration needs to be applied
*
**/
enum i40e_status_code
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_dcb_parameters *cmd =
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
enum i40e_status_code status;
if ((hw->mac.type != I40E_MAC_XL710) ||
((hw->aq.api_maj_ver < 1) ||
((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 6))))
return I40E_ERR_DEVICE_NOT_SUPPORTED;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
if (dcb_enable) {
cmd->valid_flags = I40E_DCB_VALID;
cmd->command = I40E_AQ_DCB_SET_AGENT;
}
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@ -4387,7 +4537,6 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
@ -5434,7 +5583,7 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
cmd->etype = CPU_TO_LE16(ethtype);
@ -5458,10 +5607,10 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @seid: VSI seid to add ethertype filter from
**/
#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 seid)
{
#define I40E_FLOW_CONTROL_ETHTYPE 0x8808
u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
@ -5892,6 +6041,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* @ret_buff_size: actual buffer size returned
* @ret_next_table: next block to read
* @ret_next_index: next index to read
* @cmd_details: pointer to command details structure or NULL
*
* Dump internal FW/HW data for debug purposes.
*
@ -6014,7 +6164,7 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
* i40e_read_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@ -6059,7 +6209,7 @@ enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
* i40e_write_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes specified PHY register value
@ -6100,7 +6250,7 @@ enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@ -6174,7 +6324,7 @@ enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@ -6241,7 +6391,7 @@ enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
@ -6277,7 +6427,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @phy_addr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
@ -6312,7 +6462,6 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
* @phy_addr: Returned PHY address
*
* Gets PHY address for current port
**/
@ -6398,6 +6547,64 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
return status;
}
/**
* i40e_led_get_reg - read LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
* @reg_val: read register value
**/
static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
u32 *reg_val)
{
enum i40e_status_code status;
u8 phy_addr = 0;
*reg_val = 0;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
I40E_PHY_COM_REG_PAGE,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
phy_addr = i40e_get_phy_address(hw, hw->port);
status = i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
(u16 *)reg_val);
}
return status;
}
/**
* i40e_led_set_reg - write LED register
* @hw: pointer to the HW structure
* @led_addr: LED register address
* @reg_val: register value to write
**/
static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
u32 reg_val)
{
enum i40e_status_code status;
u8 phy_addr = 0;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
I40E_PHY_COM_REG_PAGE,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
phy_addr = i40e_get_phy_address(hw, hw->port);
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
(u16)reg_val);
}
return status;
}
/**
* i40e_led_get_phy - return current on/off mode
* @hw: pointer to the hw struct
@ -6410,17 +6617,23 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
{
enum i40e_status_code status = I40E_SUCCESS;
u16 gpio_led_port;
u32 reg_val_aq;
u16 temp_addr;
u8 phy_addr = 0;
u16 reg_val;
u16 temp_addr;
u8 port_num;
u32 i;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
I40E_PHY_COM_REG_PAGE,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
if (status == I40E_SUCCESS)
*val = (u16)reg_val_aq;
return status;
}
temp_addr = I40E_PHY_LED_PROV_REG_1;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
phy_addr = i40e_get_phy_address(hw, hw->port);
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
status = i40e_read_phy_register_clause45(hw,
@ -6442,7 +6655,9 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* i40e_led_set_phy
* @hw: pointer to the HW structure
* @on: TRUE or FALSE
* @led_addr: address of led register to use
* @mode: original val plus bit for set or ignore
*
* Set led's on or off when controlled by the PHY
*
**/
@ -6450,51 +6665,37 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
enum i40e_status_code status = I40E_SUCCESS;
u16 led_ctl = 0;
u16 led_reg = 0;
u8 phy_addr = 0;
u8 port_num;
u32 i;
u32 led_ctl = 0;
u32 led_reg = 0;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
return status;
}
status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_reg);
status = i40e_led_set_reg(hw, led_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
status = i40e_led_set_reg(hw, led_addr, led_ctl);
}
return status;
restore_config:
status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
status = i40e_led_set_reg(hw, led_addr, led_ctl);
return status;
}
@ -6544,7 +6745,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@ -6603,7 +6806,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
use_register = (((hw->aq.api_maj_ver == 1) &&
(hw->aq.api_min_ver < 5)) ||
(hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@ -6620,6 +6825,76 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
wr32(hw, reg_addr, reg_val);
}
/**
* i40e_aq_set_phy_register
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
* @reg_addr: PHY register address
* @reg_val: new register value
* @cmd_details: pointer to command details structure or NULL
*
* Write the external PHY register.
**/
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
cmd->phy_interface = phy_select;
cmd->dev_addres = dev_addr;
cmd->reg_address = CPU_TO_LE32(reg_addr);
cmd->reg_value = CPU_TO_LE32(reg_val);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_get_phy_register
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
* @reg_addr: PHY register address
* @reg_val: read register value
* @cmd_details: pointer to command details structure or NULL
*
* Read the external PHY register.
**/
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
cmd->phy_interface = phy_select;
cmd->dev_addres = dev_addr;
cmd->reg_address = CPU_TO_LE32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
*reg_val = LE32_TO_CPU(cmd->reg_value);
return status;
}
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
@ -6634,7 +6909,7 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
* completion before returning.
**/
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
@ -6673,9 +6948,9 @@ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg)
struct virtchnl_vf_resource *msg)
{
struct i40e_virtchnl_vsi_resource *vsi_res;
struct virtchnl_vsi_resource *vsi_res;
int i;
vsi_res = &msg->vsi_res[0];
@ -6684,20 +6959,18 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
hw->dev_caps.dcb = msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.fcoe = (msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
hw->dev_caps.iwarp = (msg->vf_offload_flags &
I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
hw->dev_caps.dcb = msg->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_L2;
hw->dev_caps.iwarp = (msg->vf_cap_flags &
VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) {
i40e_memcpy(hw->mac.perm_addr,
vsi_res->default_mac_addr,
I40E_ETH_LENGTH_OF_ADDRESS,
ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
I40E_ETH_LENGTH_OF_ADDRESS,
ETH_ALEN,
I40E_NONDMA_TO_NONDMA);
}
vsi_res++;
@ -6714,14 +6987,14 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
**/
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
{
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
/**
* i40e_aq_set_arp_proxy_config
* @hw: pointer to the HW structure
* @proxy_config - pointer to proxy config command table struct
* @proxy_config: pointer to proxy config command table struct
* @cmd_details: pointer to command details
*
* Set ARP offload parameters from pre-populated

1385
sys/dev/ixl/i40e_dcb.c Normal file

File diff suppressed because it is too large Load Diff

224
sys/dev/ixl/i40e_dcb.h Normal file
View File

@ -0,0 +1,224 @@
/******************************************************************************
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _I40E_DCB_H_
#define _I40E_DCB_H_
#include "i40e_type.h"
#define I40E_DCBX_OFFLOAD_DISABLED 0
#define I40E_DCBX_OFFLOAD_ENABLED 1
#define I40E_DCBX_STATUS_NOT_STARTED 0
#define I40E_DCBX_STATUS_IN_PROGRESS 1
#define I40E_DCBX_STATUS_DONE 2
#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
#define I40E_DCBX_STATUS_DISABLED 7
#define I40E_TLV_TYPE_END 0
#define I40E_TLV_TYPE_ORG 127
#define I40E_IEEE_8021QAZ_OUI 0x0080C2
#define I40E_IEEE_SUBTYPE_ETS_CFG 9
#define I40E_IEEE_SUBTYPE_ETS_REC 10
#define I40E_IEEE_SUBTYPE_PFC_CFG 11
#define I40E_IEEE_SUBTYPE_APP_PRI 12
#define I40E_CEE_DCBX_OUI 0x001b21
#define I40E_CEE_DCBX_TYPE 2
#define I40E_CEE_SUBTYPE_CTRL 1
#define I40E_CEE_SUBTYPE_PG_CFG 2
#define I40E_CEE_SUBTYPE_PFC_CFG 3
#define I40E_CEE_SUBTYPE_APP_PRI 4
#define I40E_CEE_MAX_FEAT_TYPE 3
#define I40E_LLDP_ADMINSTATUS_DISABLED 0
#define I40E_LLDP_ADMINSTATUS_ENABLED_RX 1
#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
/* Defines for LLDP TLV header */
#define I40E_LLDP_MIB_HLEN 14
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
#define I40E_LLDP_TLV_TYPE_SHIFT 9
#define I40E_LLDP_TLV_TYPE_MASK (0x7F << I40E_LLDP_TLV_TYPE_SHIFT)
#define I40E_LLDP_TLV_SUBTYPE_SHIFT 0
#define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT)
#define I40E_LLDP_TLV_OUI_SHIFT 8
#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT)
/* Defines for IEEE ETS TLV */
#define I40E_IEEE_ETS_MAXTC_SHIFT 0
#define I40E_IEEE_ETS_MAXTC_MASK (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
#define I40E_IEEE_ETS_CBS_SHIFT 6
#define I40E_IEEE_ETS_CBS_MASK BIT(I40E_IEEE_ETS_CBS_SHIFT)
#define I40E_IEEE_ETS_WILLING_SHIFT 7
#define I40E_IEEE_ETS_WILLING_MASK BIT(I40E_IEEE_ETS_WILLING_SHIFT)
#define I40E_IEEE_ETS_PRIO_0_SHIFT 0
#define I40E_IEEE_ETS_PRIO_0_MASK (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
#define I40E_IEEE_ETS_PRIO_1_SHIFT 4
#define I40E_IEEE_ETS_PRIO_1_MASK (0x7 << I40E_IEEE_ETS_PRIO_1_SHIFT)
#define I40E_CEE_PGID_PRIO_0_SHIFT 0
#define I40E_CEE_PGID_PRIO_0_MASK (0xF << I40E_CEE_PGID_PRIO_0_SHIFT)
#define I40E_CEE_PGID_PRIO_1_SHIFT 4
#define I40E_CEE_PGID_PRIO_1_MASK (0xF << I40E_CEE_PGID_PRIO_1_SHIFT)
#define I40E_CEE_PGID_STRICT 15
/* Defines for IEEE TSA types */
#define I40E_IEEE_TSA_STRICT 0
#define I40E_IEEE_TSA_CBS 1
#define I40E_IEEE_TSA_ETS 2
#define I40E_IEEE_TSA_VENDOR 255
/* Defines for IEEE PFC TLV */
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
#define I40E_IEEE_PFC_MBC_MASK BIT(I40E_IEEE_PFC_MBC_SHIFT)
#define I40E_IEEE_PFC_WILLING_SHIFT 7
#define I40E_IEEE_PFC_WILLING_MASK BIT(I40E_IEEE_PFC_WILLING_SHIFT)
/* Defines for IEEE APP TLV */
#define I40E_IEEE_APP_SEL_SHIFT 0
#define I40E_IEEE_APP_SEL_MASK (0x7 << I40E_IEEE_APP_SEL_SHIFT)
#define I40E_IEEE_APP_PRIO_SHIFT 5
#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
/* TLV definitions for preparing MIB */
#define I40E_TLV_ID_CHASSIS_ID 0
#define I40E_TLV_ID_PORT_ID 1
#define I40E_TLV_ID_TIME_TO_LIVE 2
#define I40E_IEEE_TLV_ID_ETS_CFG 3
#define I40E_IEEE_TLV_ID_ETS_REC 4
#define I40E_IEEE_TLV_ID_PFC_CFG 5
#define I40E_IEEE_TLV_ID_APP_PRI 6
#define I40E_TLV_ID_END_OF_LLDPPDU 7
#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
#define I40E_IEEE_ETS_TLV_LENGTH 25
#define I40E_IEEE_PFC_TLV_LENGTH 6
#define I40E_IEEE_APP_TLV_LENGTH 11
#pragma pack(1)
/* IEEE 802.1AB LLDP TLV structure */
struct i40e_lldp_generic_tlv {
__be16 typelength;
u8 tlvinfo[1];
};
/* IEEE 802.1AB LLDP Organization specific TLV */
struct i40e_lldp_org_tlv {
__be16 typelength;
__be32 ouisubtype;
u8 tlvinfo[1];
};
struct i40e_cee_tlv_hdr {
__be16 typelen;
u8 operver;
u8 maxver;
};
struct i40e_cee_ctrl_tlv {
struct i40e_cee_tlv_hdr hdr;
__be32 seqno;
__be32 ackno;
};
struct i40e_cee_feat_tlv {
struct i40e_cee_tlv_hdr hdr;
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
u8 subtype;
u8 tlvinfo[1];
};
struct i40e_cee_app_prio {
__be16 protocol;
u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
#define I40E_CEE_APP_SELECTOR_MASK 0x03
__be16 lower_oui;
u8 prio_map;
};
#pragma pack()
/*
* TODO: The below structures related LLDP/DCBX variables
* and statistics are defined but need to find how to get
* the required information from the Firmware to use them
*/
/* IEEE 802.1AB LLDP Agent Statistics */
struct i40e_lldp_stats {
u64 remtablelastchangetime;
u64 remtableinserts;
u64 remtabledeletes;
u64 remtabledrops;
u64 remtableageouts;
u64 txframestotal;
u64 rxframesdiscarded;
u64 rxportframeerrors;
u64 rxportframestotal;
u64 rxporttlvsdiscardedtotal;
u64 rxporttlvsunrecognizedtotal;
u64 remtoomanyneighbors;
};
/* IEEE 802.1Qaz DCBX variables */
struct i40e_dcbx_variables {
u32 defmaxtrafficclasses;
u32 defprioritytcmapping;
u32 deftcbandwidth;
u32 deftsaassignment;
};
enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
u16 *status);
enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
struct i40e_dcbx_config *dcbcfg);
enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg);
enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -54,8 +54,7 @@
#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_X722_A0 0x374C
#define I40E_DEV_ID_X722_A0_VF 0x374D
#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
@ -68,4 +67,7 @@
(d) == I40E_DEV_ID_QSFP_B || \
(d) == I40E_DEV_ID_QSFP_C)
#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \
(d) == I40E_DEV_ID_25G_SFP28)
#endif /* _I40E_DEVIDS_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -211,7 +211,6 @@ enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
* @hw: pointer to our HW structure
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
* @is_pf: distinguishes a VF from a PF
*
* This function:
* 1. Marks the entry in pd tabe (for paged address mode) or in sd table

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -34,18 +34,6 @@
#include "i40e_prototype.h"
enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
u16 *data);
enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 words, void *data,
bool last_command);
/**
* i40e_init_nvm_ops - Initialize NVM function pointers
* @hw: pointer to the HW structure
@ -207,52 +195,6 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
return ret_code;
}
/**
* i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
} else {
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
i40e_release_nvm(hw);
}
return ret_code;
}
/**
* __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
u16 offset,
u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
else
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
return ret_code;
}
/**
* i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
* @hw: pointer to the HW structure
@ -303,16 +245,69 @@ enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
return ret_code;
}
/**
* i40e_read_nvm_aq - Read Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
* @words: number of words to write
* @data: buffer with words to write to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
u16 words, void *data,
bool last_command)
{
enum i40e_status_code ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
DEBUGFUNC("i40e_read_nvm_aq");
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
* Firmware will check the module-based model.
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write fail error: tried to write %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_read_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
data, last_command, &cmd_details);
return ret_code;
}
/**
* i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
* Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
u16 *data)
static enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
@ -325,55 +320,49 @@ enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
}
/**
* __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
* __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
* Reads one 16 bit word from the Shadow RAM.
*
* Do not use this function except in cases where the nvm lock is already
* taken via i40e_acquire_nvm().
**/
enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
u16 offset,
u16 *words, u16 *data)
enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
u16 offset,
u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
else
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
return ret_code;
return i40e_read_nvm_word_aq(hw, offset, data);
return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
* i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
* Reads one 16 bit word from the Shadow RAM.
**/
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
data);
i40e_release_nvm(hw);
}
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
i40e_release_nvm(hw);
return ret_code;
}
@ -388,8 +377,8 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
static enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
u16 index, word;
@ -421,8 +410,8 @@ enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
static enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
enum i40e_status_code ret_code;
u16 read_size = *words;
@ -470,57 +459,55 @@ enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
}
/**
* i40e_read_nvm_aq - Read Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
* @words: number of words to write
* @data: buffer with words to write to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
* __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method.
**/
enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 words, void *data,
bool last_command)
enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
u16 offset,
u16 *words, u16 *data)
{
enum i40e_status_code ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
DEBUGFUNC("i40e_read_nvm_aq");
return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
/* Here we are checking the SR limit only for the flat memory model.
* We cannot do it for the module-based model, as we did not acquire
* the NVM resource yet (we cannot get the module pointer value).
* Firmware will check the module-based model.
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write fail error: tried to write %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_read_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
data, last_command, &cmd_details);
/**
* i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
* @words: (in) number of words to read; (out) number of words actually read
* @data: words read from the Shadow RAM
*
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data)
{
enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
data);
i40e_release_nvm(hw);
}
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
return ret_code;
}
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
@ -562,7 +549,8 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
ret_code = i40e_aq_update_nvm(hw, module_pointer,
2 * offset, /*bytes*/
2 * words, /*bytes*/
data, last_command, &cmd_details);
data, last_command, 0,
&cmd_details);
return ret_code;
}
@ -651,16 +639,14 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
data = (u16 *)vmem.va;
/* read pointer to VPD area */
ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
&vpd_module);
ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
goto i40e_calc_nvm_checksum_exit;
}
/* read pointer to PCIe Alt Auto-load module */
ret_code = __i40e_read_nvm_word(hw,
I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
&pcie_alt_module);
if (ret_code != I40E_SUCCESS) {
ret_code = I40E_ERR_NVM_CHECKSUM;
@ -750,19 +736,19 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
DEBUGFUNC("i40e_validate_nvm_checksum");
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
i40e_release_nvm(hw);
if (ret_code != I40E_SUCCESS)
goto i40e_validate_nvm_checksum_exit;
} else {
goto i40e_validate_nvm_checksum_exit;
}
i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
/* We must acquire the NVM lock in order to correctly synchronize the
* NVM accesses across multiple PFs. Without doing so it is possible
* for one of the PFs to read invalid data potentially indicating that
* the checksum is invalid.
*/
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
__i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
i40e_release_nvm(hw);
if (ret_code)
return ret_code;
/* Verify read checksum from EEPROM is the same as
* calculated checksum
@ -774,7 +760,6 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
if (checksum)
*checksum = checksum_local;
i40e_validate_nvm_checksum_exit:
return ret_code;
}
@ -805,6 +790,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno);
static INLINE u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@ -814,6 +802,12 @@ static INLINE u8 i40e_nvmupd_get_transaction(u32 val)
return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
}
static INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
{
return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
I40E_NVM_PRESERVATION_FLAGS_SHIFT);
}
static const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_INVALID",
"I40E_NVMUPD_READ_CON",
@ -831,6 +825,7 @@ static const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_STATUS",
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
"I40E_NVMUPD_GET_AQ_EVENT",
};
/**
@ -900,6 +895,15 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
/* Acquire lock to prevent race condition where adminq_task
* can execute after i40e_nvmupd_nvm_read/write but before state
* variables (nvm_wait_opcode, nvm_release_on_done) are updated.
*
* During NVMUpdate, it is observed that lock could be held for
* ~5ms for most commands. However lock is held for ~60ms for
* NVMUPD_CSUM_LCB command.
*/
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@ -919,8 +923,9 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
return I40E_SUCCESS;
i40e_nvmupd_clear_wait_state(hw);
status = I40E_SUCCESS;
break;
}
status = I40E_ERR_NOT_READY;
@ -935,6 +940,8 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*perrno = -ESRCH;
break;
}
i40e_release_spinlock(&hw->aq.arq_spinlock);
return status;
}
@ -1064,6 +1071,10 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
break;
case I40E_NVMUPD_GET_AQ_EVENT:
status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
break;
default:
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: bad cmd %s in init state\n",
@ -1241,40 +1252,56 @@ static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
return status;
}
/**
* i40e_nvmupd_clear_wait_state - clear wait state on hw
* @hw: pointer to the hardware structure
**/
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n",
hw->nvm_wait_opcode);
if (hw->nvm_release_on_done) {
i40e_release_nvm(hw);
hw->nvm_release_on_done = FALSE;
}
hw->nvm_wait_opcode = 0;
if (hw->aq.arq_last_status) {
hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
return;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_STATE_WRITE_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
break;
default:
break;
}
}
/**
* i40e_nvmupd_check_wait_event - handle NVM update operation events
* @hw: pointer to the hardware structure
* @opcode: the event that just happened
* @desc: AdminQ descriptor
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc)
{
u32 aq_desc_len = sizeof(struct i40e_aq_desc);
if (opcode == hw->nvm_wait_opcode) {
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
if (hw->nvm_release_on_done) {
i40e_release_nvm(hw);
hw->nvm_release_on_done = FALSE;
}
hw->nvm_wait_opcode = 0;
if (hw->aq.arq_last_status) {
hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
return;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_STATE_WRITE_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
break;
default:
break;
}
i40e_memcpy(&hw->nvm_aq_event_desc, desc,
aq_desc_len, I40E_NONDMA_TO_NONDMA);
i40e_nvmupd_clear_wait_state(hw);
}
}
@ -1332,6 +1359,9 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
else if (module == 0)
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
break;
case I40E_NVM_AQE:
upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
break;
}
break;
@ -1394,6 +1424,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
u32 aq_data_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
if (cmd->offset == 0xffff)
return I40E_SUCCESS;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@ -1430,6 +1463,9 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
}
}
if (cmd->offset)
memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
/* and away we go! */
status = i40e_asq_send_command(hw, aq_desc, buff,
buff_size, &cmd_details);
@ -1439,6 +1475,7 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
}
/* should we wait for a followup event? */
@ -1519,6 +1556,41 @@ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
return I40E_SUCCESS;
}
/**
* i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
* @bytes: pointer to the data buffer
* @perrno: pointer to return error code
*
* cmd structure contains identifiers and data buffer
**/
static enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
aq_desc_len = sizeof(struct i40e_aq_desc);
aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
/* check copylength range */
if (cmd->data_size > aq_total_len) {
i40e_debug(hw, I40E_DEBUG_NVM,
"%s: copy length %d too big, trimming to %d\n",
__func__, cmd->data_size, aq_total_len);
cmd->data_size = aq_total_len;
}
i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
I40E_NONDMA_TO_NONDMA);
return I40E_SUCCESS;
}
/**
* i40e_nvmupd_nvm_read - Read NVM
* @hw: pointer to hardware structure
@ -1614,18 +1686,20 @@ static enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
enum i40e_status_code status = I40E_SUCCESS;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
u8 preservation_flags;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
module = i40e_nvmupd_get_module(cmd->config);
last = (transaction & I40E_NVM_LCB);
preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
status = i40e_aq_update_nvm(hw, module, cmd->offset,
(u16)cmd->data_size, bytes, last,
&cmd_details);
preservation_flags, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
"i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -59,6 +59,8 @@ i40e_status
i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
{
free(mem->va, M_DEVBUF);
mem->va = NULL;
return(0);
}
@ -207,47 +209,47 @@ const char *
ixl_vc_opcode_str(uint16_t op)
{
switch (op) {
case I40E_VIRTCHNL_OP_VERSION:
case VIRTCHNL_OP_VERSION:
return ("VERSION");
case I40E_VIRTCHNL_OP_RESET_VF:
case VIRTCHNL_OP_RESET_VF:
return ("RESET_VF");
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
case VIRTCHNL_OP_GET_VF_RESOURCES:
return ("GET_VF_RESOURCES");
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
return ("CONFIG_TX_QUEUE");
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
return ("CONFIG_RX_QUEUE");
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
return ("CONFIG_VSI_QUEUES");
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
return ("CONFIG_IRQ_MAP");
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_ENABLE_QUEUES:
return ("ENABLE_QUEUES");
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
return ("DISABLE_QUEUES");
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
return ("ADD_ETHER_ADDRESS");
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
return ("DEL_ETHER_ADDRESS");
case I40E_VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_ADD_ETH_ADDR:
return ("ADD_ETH_ADDR");
case VIRTCHNL_OP_DEL_ETH_ADDR:
return ("DEL_ETH_ADDR");
case VIRTCHNL_OP_ADD_VLAN:
return ("ADD_VLAN");
case I40E_VIRTCHNL_OP_DEL_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
return ("DEL_VLAN");
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
return ("CONFIG_PROMISCUOUS_MODE");
case I40E_VIRTCHNL_OP_GET_STATS:
case VIRTCHNL_OP_GET_STATS:
return ("GET_STATS");
case I40E_VIRTCHNL_OP_FCOE:
return ("FCOE");
case I40E_VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_RSVD:
return ("RSVD");
case VIRTCHNL_OP_EVENT:
return ("EVENT");
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
case VIRTCHNL_OP_CONFIG_RSS_KEY:
return ("CONFIG_RSS_KEY");
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
return ("CONFIG_RSS_LUT");
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
return ("GET_RSS_HENA_CAPS");
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
case VIRTCHNL_OP_SET_RSS_HENA:
return ("SET_RSS_HENA");
default:
return ("UNKNOWN");

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -239,4 +239,7 @@ ixl_flush_osdep(struct i40e_osdep *osdep)
#define ixl_flush(a) ixl_flush_osdep((a)->back)
enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
u16 *data);
#endif /* _I40E_OSDEP_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -37,7 +37,7 @@
#include "i40e_type.h"
#include "i40e_alloc.h"
#include "i40e_virtchnl.h"
#include "virtchnl.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@ -140,8 +140,9 @@ enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
u64 *advt_reg,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code
i40e_aq_set_lb_modes(struct i40e_hw *hw, u8 lb_level, u8 lb_type, u8 speed,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
@ -226,7 +227,7 @@ enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
u16 flags, u16 valid_flags,
u16 flags, u16 valid_flags, u8 mode,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
@ -261,7 +262,9 @@ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
u32 offset, u16 length, void *data,
bool last_command,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
@ -288,6 +291,10 @@ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
bool dcb_enable,
struct i40e_asq_cmd_details
*cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
@ -459,7 +466,9 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
@ -471,6 +480,37 @@ static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
return i40e_ptype_lookup[ptype];
}
/**
* i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
* @link_speed: the speed to convert
*
* Returns the link_speed in terms of the virtchnl interface, for use in
* converting link_speed as reported by the AdminQ into the format used for
* talking to virtchnl devices. If we can't represent the link speed properly,
* report LINK_SPEED_UNKNOWN.
**/
static INLINE enum virtchnl_link_speed
i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
{
switch (link_speed) {
case I40E_LINK_SPEED_100MB:
return VIRTCHNL_LINK_SPEED_100MB;
case I40E_LINK_SPEED_1GB:
return VIRTCHNL_LINK_SPEED_1GB;
case I40E_LINK_SPEED_10GB:
return VIRTCHNL_LINK_SPEED_10GB;
case I40E_LINK_SPEED_40GB:
return VIRTCHNL_LINK_SPEED_40GB;
case I40E_LINK_SPEED_20GB:
return VIRTCHNL_LINK_SPEED_20GB;
case I40E_LINK_SPEED_25GB:
return VIRTCHNL_LINK_SPEED_25GB;
case I40E_LINK_SPEED_UNKNOWN:
default:
return VIRTCHNL_LINK_SPEED_UNKNOWN;
}
}
/* prototype for functions used for SW spinlocks */
void i40e_init_spinlock(struct i40e_spinlock *sp);
void i40e_acquire_spinlock(struct i40e_spinlock *sp);
@ -479,10 +519,10 @@ void i40e_destroy_spinlock(struct i40e_spinlock *sp);
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct i40e_virtchnl_vf_resource *msg);
struct virtchnl_vf_resource *msg);
enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum i40e_virtchnl_ops v_opcode,
enum virtchnl_ops v_opcode,
enum i40e_status_code v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
@ -508,6 +548,15 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -2803,7 +2803,7 @@
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -103,6 +103,7 @@ enum i40e_status_code {
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
I40E_ERR_FIRMWARE_API_VERSION = -65,
I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
};
#endif /* _I40E_STATUS_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -66,6 +66,9 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
/* Max timeout in ms for the phy to respond */
#define I40E_MAX_PHY_TIMEOUT 500
/* Check whether address is multicast. */
#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
@ -81,7 +84,7 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_ETH_LENGTH_OF_ADDRESS 6
#define ETH_ALEN 6
/* Data type manipulation macros. */
#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
#define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
@ -185,9 +188,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
@ -257,6 +257,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
u8 req_fec_info;
u8 fec_info;
u8 ext_info;
u8 loopback;
@ -340,6 +341,10 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@ -360,6 +365,15 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
/* Cloud filter modes:
* Mode1: Filter on L4 port only
* Mode2: Filter for non-tunneled traffic
* Mode3: Filter for tunnel traffic
*/
#define I40E_CLOUD_FILTER_MODE1 0x6
#define I40E_CLOUD_FILTER_MODE2 0x7
#define I40E_CLOUD_FILTER_MODE3 0x8
u32 management_mode;
u32 mng_protocols_over_mctp;
#define I40E_MNG_PROTOCOL_PLDM 0x2
@ -427,10 +441,10 @@ struct i40e_hw_capabilities {
struct i40e_mac_info {
enum i40e_mac_type type;
u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
u8 san_addr[ETH_ALEN];
u8 port_addr[ETH_ALEN];
u16 max_fcoeq;
};
@ -472,6 +486,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_STATUS,
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
I40E_NVMUPD_GET_AQ_EVENT,
};
enum i40e_nvmupd_state {
@ -491,15 +506,21 @@ enum i40e_nvmupd_state {
#define I40E_NVM_MOD_PNT_MASK 0xFF
#define I40E_NVM_TRANS_SHIFT 8
#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
#define I40E_NVM_CON 0x0
#define I40E_NVM_SNT 0x1
#define I40E_NVM_LCB 0x2
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
#define I40E_NVM_EXEC 0xf
#define I40E_NVM_TRANS_SHIFT 8
#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT)
#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12
#define I40E_NVM_PRESERVATION_FLAGS_MASK \
(0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT)
#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01
#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02
#define I40E_NVM_CON 0x0
#define I40E_NVM_SNT 0x1
#define I40E_NVM_LCB 0x2
#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB)
#define I40E_NVM_ERA 0x4
#define I40E_NVM_CSUM 0x8
#define I40E_NVM_AQE 0xe
#define I40E_NVM_EXEC 0xf
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@ -515,6 +536,19 @@ struct i40e_nvm_access {
u8 data[1];
};
/* (Q)SFP module access definitions */
#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
#define I40E_MODULE_TYPE_ADDR 0x00
#define I40E_MODULE_REVISION_ADDR 0x01
#define I40E_MODULE_SFF_8472_COMP 0x5E
#define I40E_MODULE_SFF_8472_SWAP 0x5C
#define I40E_MODULE_SFF_ADDR_MODE 0x04
#define I40E_MODULE_SFF_DIAG_CAPAB 0x40
#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
#define I40E_MODULE_TYPE_QSFP28 0x11
#define I40E_MODULE_QSFP_MAX_LEN 640
/* PCI bus types */
enum i40e_bus_type {
i40e_bus_type_unknown = 0,
@ -669,6 +703,7 @@ struct i40e_hw {
/* state of nvm update process */
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_aq_desc nvm_aq_event_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
@ -689,8 +724,16 @@ struct i40e_hw {
u16 wol_proxy_vsi_seid;
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* Used in set switch config AQ command */
u16 switch_tag;
u16 first_tag;
u16 second_tag;
/* debug mask */
u32 debug_mask;
char err_str[16];
@ -1433,7 +1476,8 @@ struct i40e_hw_port_stats {
#define I40E_SR_PE_IMAGE_PTR 0x0C
#define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D
#define I40E_SR_MNG_CONFIG_PTR 0x0E
#define I40E_SR_EMP_MODULE_PTR 0x0F
#define I40E_EMP_MODULE_PTR 0x0F
#define I40E_SR_EMP_MODULE_PTR 0x48
#define I40E_SR_PBA_FLAGS 0x15
#define I40E_SR_PBA_BLOCK_PTR 0x16
#define I40E_SR_BOOT_CONFIG_PTR 0x17
@ -1474,6 +1518,11 @@ struct i40e_hw_port_stats {
#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5)
#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12)
#define I40E_PTR_TYPE BIT(15)
#define I40E_SR_OCP_CFG_WORD0 0x2B
#define I40E_SR_OCP_ENABLED BIT(15)
/* Shadow RAM related */
#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
@ -1589,7 +1638,8 @@ enum i40e_reset_type {
};
/* IEEE 802.1AB LLDP Agent Variables from NVM */
#define I40E_NVM_LLDP_CFG_PTR 0xD
#define I40E_NVM_LLDP_CFG_PTR 0x06
#define I40E_SR_LLDP_CFG_PTR 0x31
struct i40e_lldp_variables {
u16 length;
u16 adminstatus;

View File

@ -1,424 +0,0 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _I40E_VIRTCHNL_H_
#define _I40E_VIRTCHNL_H_
#include "i40e_type.h"
/* Description:
* This header file describes the VF-PF communication protocol used
* by the various i40e drivers.
*
* Admin queue buffer usage:
* desc->opcode is always i40e_aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally.
* Firmware copies the cookie fields when sending messages between the PF and
* VF, but uses all other fields internally. Due to this limitation, we
* must send all messages as "indirect", i.e. using an external buffer.
*
* All the vsi indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value is of
* i40e_status_code type, defined in the i40e_type.h.
*
* In general, VF driver initialization should roughly follow the order of these
* opcodes. The VF driver must first validate the API version of the PF driver,
* then request a reset, then get resources, then configure queues and
* interrupts. After these operations are complete, the VF driver may start
* its queues, optionally add MAC and VLAN filters, and process traffic.
*/
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure.
*/
enum i40e_virtchnl_ops {
/* The PF sends status change events to VFs using
* the I40E_VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops.
*/
I40E_VIRTCHNL_OP_UNKNOWN = 0,
I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
I40E_VIRTCHNL_OP_RESET_VF = 2,
I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
I40E_VIRTCHNL_OP_ADD_VLAN = 12,
I40E_VIRTCHNL_OP_DEL_VLAN = 13,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
};
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
*/
struct i40e_virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
enum i40e_status_code v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */
};
/* Message descriptions and data structures.*/
/* I40E_VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1.
* If there is a major version mismatch, then the VF cannot operate.
* If there is a minor version mismatch, then the VF can operate but should
* add a warning to the system log.
*
* This enum element MUST always be specified as == 1, regardless of other
* changes in the API. The PF must always respond to this message without
* error regardless of version mismatch.
*/
#define I40E_VIRTCHNL_VERSION_MAJOR 1
#define I40E_VIRTCHNL_VERSION_MINOR 1
#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct i40e_virtchnl_version_info {
u32 major;
u32 minor;
};
/* I40E_VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized
* after this operation.
*
* When reset is complete, PF must ensure that all queues in all VSIs associated
* with the VF are stopped, all queue configurations in the HMC are set to 0,
* and all MAC and VLAN filters (except the default MAC address) on all VSIs
* are cleared.
*/
/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
* Version 1.0 VF sends this request to PF with no parameters
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* i40e_virtchnl_vf_resource and one or more
* i40e_virtchnl_vsi_resource structures.
*/
struct i40e_virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum i40e_vsi_type vsi_type;
u16 qset_handle;
u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
};
/* VF offload flags */
#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
u16 max_vectors;
u16 max_mtu;
u32 vf_offload_flags;
u32 rss_key_size;
u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of i40e_virtchnl_txq_info.
* PF configures requested queue and returns a status code.
*/
/* Tx queue config info */
struct i40e_virtchnl_txq_info {
u16 vsi_id;
u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled;
u64 dma_ring_addr;
u64 dma_headwb_addr;
};
/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of i40e_virtchnl_rxq_info.
* PF configures requested queue and returns a status code.
*/
/* Rx queue config info */
struct i40e_virtchnl_rxq_info {
u16 vsi_id;
u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size;
u16 splithdr_enabled;
u32 databuffer_size;
u32 max_pkt_size;
u64 dma_ring_addr;
enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
};
/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for all active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
*/
struct i40e_virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
struct i40e_virtchnl_txq_info txq;
struct i40e_virtchnl_rxq_info rxq;
};
struct i40e_virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
struct i40e_virtchnl_queue_pair_info qpair[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0.
* PF configures interrupt mapping and returns status.
*/
struct i40e_virtchnl_vector_map {
u16 vsi_id;
u16 vector_id;
u16 rxq_map;
u16 txq_map;
u16 rxitr_idx;
u16 txitr_idx;
};
struct i40e_virtchnl_irq_map_info {
u16 num_vectors;
struct i40e_virtchnl_vector_map vecmap[1];
};
/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
* I40E_VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
*/
struct i40e_virtchnl_queue_select {
u16 vsi_id;
u16 pad;
u32 rx_queues;
u32 tx_queues;
};
/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
* PF adds the filters and returns status.
*/
/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
* VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI.
* PF removes the filters and returns status.
*/
struct i40e_virtchnl_ether_addr {
u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
u8 pad[2];
};
struct i40e_virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
struct i40e_virtchnl_ether_addr list[1];
};
/* I40E_VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
/* I40E_VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
struct i40e_virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
u16 vlan_id[1];
};
/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled.
*/
struct i40e_virtchnl_promisc_info {
u16 vsi_id;
u16 flags;
};
#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
/* I40E_VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses
* the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
* PF replies with struct i40e_eth_stats in an external buffer.
*/
/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
* I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the rss fields in
* the vf resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF.
*/
struct i40e_virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
};
struct i40e_virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table*/
};
/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
* I40E_VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
* Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
*/
struct i40e_virtchnl_rss_hena {
u64 hena;
};
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
* messages in response to this one.
*/
enum i40e_virtchnl_event_codes {
I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
I40E_VIRTCHNL_EVENT_LINK_CHANGE,
I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define I40E_PF_EVENT_SEVERITY_INFO 0
#define I40E_PF_EVENT_SEVERITY_ATTENTION 1
#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2
#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct i40e_virtchnl_pf_event {
enum i40e_virtchnl_event_codes event;
union {
struct {
enum i40e_aq_link_speed link_speed;
bool link_status;
} link_event;
} event_data;
int severity;
};
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum i40e_vfr_states {
I40E_VFR_INPROGRESS = 0,
I40E_VFR_COMPLETED,
I40E_VFR_VFACTIVE,
I40E_VFR_UNKNOWN,
};
#endif /* _I40E_VIRTCHNL_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -47,7 +47,13 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixl_driver_version[] = "1.7.12-k";
#define IXL_DRIVER_VERSION_MAJOR 1
#define IXL_DRIVER_VERSION_MINOR 9
#define IXL_DRIVER_VERSION_BUILD 9
char ixl_driver_version[] = __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."
__XSTRING(IXL_DRIVER_VERSION_MINOR) "."
__XSTRING(IXL_DRIVER_VERSION_BUILD) "-k";
/*********************************************************************
* PCI Device ID Table
@ -86,7 +92,7 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
*********************************************************************/
static char *ixl_strings[] = {
"Intel(R) Ethernet Connection XL710/X722 Driver"
"Intel(R) Ethernet Connection 700 Series PF Driver"
};
@ -99,7 +105,6 @@ static int ixl_detach(device_t);
static int ixl_shutdown(device_t);
static int ixl_save_pf_tunables(struct ixl_pf *);
static int ixl_attach_get_link_status(struct ixl_pf *);
/*********************************************************************
* FreeBSD Device Interface Entry Points
@ -151,13 +156,18 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
"Enable MSI-X interrupts");
/*
** Number of descriptors per ring:
** - TX and RX are the same size
** Number of descriptors per ring
** - TX and RX sizes are independently configurable
*/
static int ixl_ring_size = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixl_ring_size, 0, "Descriptor Ring Size");
static int ixl_tx_ring_size = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixl.tx_ring_size", &ixl_tx_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
&ixl_tx_ring_size, 0, "TX Descriptor Ring Size");
static int ixl_rx_ring_size = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixl.rx_ring_size", &ixl_rx_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
&ixl_rx_ring_size, 0, "RX Descriptor Ring Size");
/*
** This can be set manually, if left as 0 the
@ -169,6 +179,10 @@ TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
&ixl_max_queues, 0, "Number of Queues");
/*
* Leave this on unless you need to send flow control
* frames (or other control frames) from software
*/
static int ixl_enable_tx_fc_filter = 1;
TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
&ixl_enable_tx_fc_filter);
@ -176,6 +190,17 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
&ixl_enable_tx_fc_filter, 0,
"Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
/*
* Different method for processing TX descriptor
* completion.
*/
static int ixl_enable_head_writeback = 1;
TUNABLE_INT("hw.ixl.enable_head_writeback",
&ixl_enable_head_writeback);
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
&ixl_enable_head_writeback, 0,
"For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
static int ixl_core_debug_mask = 0;
TUNABLE_INT("hw.ixl.core_debug_mask",
&ixl_core_debug_mask);
@ -218,6 +243,17 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
#ifdef IXL_IW
int ixl_enable_iwarp = 0;
TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN,
&ixl_enable_iwarp, 0, "iWARP enabled");
#if __FreeBSD_version < 1100000
int ixl_limit_iwarp_msix = 1;
#else
int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX;
#endif
TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix);
SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN,
&ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP");
#endif
#ifdef DEV_NETMAP
@ -275,30 +311,6 @@ ixl_probe(device_t dev)
return (ENXIO);
}
static int
ixl_attach_get_link_status(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
int error = 0;
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
(hw->aq.fw_maj_ver < 4)) {
i40e_msec_delay(75);
error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
if (error) {
device_printf(dev, "link restart failed, aq_err=%d\n",
pf->hw.aq.asq_last_status);
return error;
}
}
/* Determine link state */
hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
return (0);
}
/*
* Sanity check and save off tunable values.
*/
@ -315,20 +327,16 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
pf->dbg_mask = ixl_core_debug_mask;
pf->hw.debug_mask = ixl_shared_debug_mask;
#ifdef DEV_NETMAP
if (ixl_enable_head_writeback == 0)
device_printf(dev, "Head writeback mode cannot be disabled "
"when netmap is enabled\n");
pf->vsi.enable_head_writeback = 1;
#else
pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
#endif
if (ixl_ring_size < IXL_MIN_RING
|| ixl_ring_size > IXL_MAX_RING
|| ixl_ring_size % IXL_RING_INCREMENT != 0) {
device_printf(dev, "Invalid ring_size value of %d set!\n",
ixl_ring_size);
device_printf(dev, "ring_size must be between %d and %d, "
"inclusive, and must be a multiple of %d\n",
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
device_printf(dev, "Using default value of %d instead\n",
IXL_DEFAULT_RING);
pf->ringsz = IXL_DEFAULT_RING;
} else
pf->ringsz = ixl_ring_size;
ixl_vsi_setup_rings_size(&pf->vsi, ixl_tx_ring_size, ixl_rx_ring_size);
if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
device_printf(dev, "Invalid tx_itr value of %d set!\n",
@ -389,6 +397,7 @@ ixl_attach(device_t dev)
*/
vsi = &pf->vsi;
vsi->dev = pf->dev;
vsi->back = pf;
/* Save tunable values */
error = ixl_save_pf_tunables(pf);
@ -427,12 +436,6 @@ ixl_attach(device_t dev)
goto err_out;
}
/*
* Allocate interrupts and figure out number of queues to use
* for PF interface
*/
pf->msix = ixl_init_msix(pf);
/* Set up the admin queue */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
@ -450,23 +453,24 @@ ixl_attach(device_t dev)
if (status == I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "The driver for the device stopped "
"because the NVM image is newer than expected.\n"
"You must install the most recent version of "
"because the NVM image is newer than expected.\n");
device_printf(dev, "You must install the most recent version of "
"the network driver.\n");
error = EIO;
goto err_out;
}
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
device_printf(dev, "The driver for the device detected "
"a newer version of the NVM image than expected.\n"
"Please install the most recent version of the network driver.\n");
else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
"a newer version of the NVM image than expected.\n");
device_printf(dev, "Please install the most recent version "
"of the network driver.\n");
} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
device_printf(dev, "The driver for the device detected "
"an older version of the NVM image than expected.\n"
"Please update the NVM image.\n");
"an older version of the NVM image than expected.\n");
device_printf(dev, "Please update the NVM image.\n");
}
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@ -478,6 +482,12 @@ ixl_attach(device_t dev)
goto err_get_cap;
}
/*
* Allocate interrupts and figure out number of queues to use
* for PF interface
*/
pf->msix = ixl_init_msix(pf);
/* Set up host memory cache */
status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
@ -513,8 +523,10 @@ ixl_attach(device_t dev)
/* Disable LLDP from the firmware for certain NVM versions */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4))
(pf->hw.aq.fw_maj_ver < 4)) {
i40e_aq_stop_lldp(hw, TRUE, NULL);
pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
}
/* Get MAC addresses from hardware */
i40e_get_mac_addr(hw, hw->mac.addr);
@ -526,6 +538,14 @@ ixl_attach(device_t dev)
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
/* Query device FW LLDP status */
ixl_get_fw_lldp_status(pf);
/* Tell FW to apply DCB config on link up */
if ((hw->mac.type != I40E_MAC_X722)
&& ((pf->hw.aq.api_maj_ver > 1)
|| (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7)))
i40e_aq_set_dcb_parameters(hw, true, NULL);
/* Initialize mac filter list for VSI */
SLIST_INIT(&vsi->ftl);
@ -619,7 +639,7 @@ ixl_attach(device_t dev)
}
/* Set initial advertised speed sysctl value */
ixl_get_initial_advertised_speeds(pf);
ixl_set_initial_advertised_speeds(pf);
/* Initialize statistics & add sysctls */
ixl_add_device_sysctls(pf);
@ -639,7 +659,13 @@ ixl_attach(device_t dev)
#endif
#ifdef DEV_NETMAP
ixl_netmap_attach(vsi);
if (vsi->num_rx_desc == vsi->num_tx_desc) {
vsi->queues[0].num_desc = vsi->num_rx_desc;
ixl_netmap_attach(vsi);
} else
device_printf(dev,
"Netmap is not supported when RX and TX descriptor ring sizes differ\n");
#endif /* DEV_NETMAP */
#ifdef IXL_IW
@ -652,7 +678,8 @@ ixl_attach(device_t dev)
"interfacing to iwarp driver failed: %d\n",
error);
goto err_late;
}
} else
device_printf(dev, "iWARP ready\n");
} else
device_printf(dev,
"iwarp disabled on this device (no msix vectors)\n");
@ -718,6 +745,9 @@ ixl_detach(device_t dev)
}
#endif
/* Remove all previously allocated media types */
ifmedia_removeall(&vsi->media);
ether_ifdetach(vsi->ifp);
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
ixl_stop(pf);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -38,7 +38,13 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixlv_driver_version[] = "1.4.12-k";
#define IXLV_DRIVER_VERSION_MAJOR 1
#define IXLV_DRIVER_VERSION_MINOR 5
#define IXLV_DRIVER_VERSION_BUILD 4
char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
__XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
__XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k";
/*********************************************************************
* PCI Device ID Table
@ -54,7 +60,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@ -64,7 +70,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
*********************************************************************/
static char *ixlv_strings[] = {
"Intel(R) Ethernet Connection XL710/X722 VF Driver"
"Intel(R) Ethernet Connection 700 Series VF Driver"
};
@ -86,6 +92,7 @@ static void ixlv_config_rss(struct ixlv_sc *);
static void ixlv_stop(struct ixlv_sc *);
static void ixlv_add_multi(struct ixl_vsi *);
static void ixlv_del_multi(struct ixl_vsi *);
static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que);
static void ixlv_free_queues(struct ixl_vsi *);
static int ixlv_setup_interface(device_t, struct ixlv_sc *);
static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
@ -129,6 +136,9 @@ static int ixlv_vf_config(struct ixlv_sc *);
static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed);
static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
static void ixlv_add_sysctls(struct ixlv_sc *);
#ifdef IXL_DEBUG
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
@ -167,12 +177,17 @@ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
/*
** Number of descriptors per ring:
** - TX and RX are the same size
** - TX and RX sizes are independently configurable
*/
static int ixlv_ringsz = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixlv_ringsz, 0, "Descriptor Ring Size");
static int ixlv_tx_ring_size = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size);
SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN,
&ixlv_tx_ring_size, 0, "TX Descriptor Ring Size");
static int ixlv_rx_ring_size = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size);
SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN,
&ixlv_rx_ring_size, 0, "TX Descriptor Ring Size");
/* Set to zero to auto calculate */
int ixlv_max_queues = 0;
@ -191,6 +206,17 @@ TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
&ixlv_txbrsz, 0, "TX Buf Ring Size");
/*
* Different method for processing TX descriptor
* completion.
*/
static int ixlv_enable_head_writeback = 0;
TUNABLE_INT("hw.ixlv.enable_head_writeback",
&ixlv_enable_head_writeback);
SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN,
&ixlv_enable_head_writeback, 0,
"For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors");
/*
** Controls for Interrupt Throttling
** - true/false for dynamic adjustment
@ -300,6 +326,9 @@ ixlv_attach(device_t dev)
/* Allocate filter lists */
ixlv_init_filters(sc);
/* Save this tunable */
vsi->enable_head_writeback = ixlv_enable_head_writeback;
/* Core Lock Init */
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
@ -402,7 +431,9 @@ ixlv_attach(device_t dev)
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
sc->link_up = TRUE;
vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX;
ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size);
/* This allocates the memory and early settings */
if (ixlv_setup_queues(sc) != 0) {
@ -412,16 +443,6 @@ ixlv_attach(device_t dev)
goto out;
}
/* Setup the stack interface */
if (ixlv_setup_interface(dev, sc) != 0) {
device_printf(dev, "%s: setup interface failed!\n",
__func__);
error = EIO;
goto out;
}
INIT_DBG_DEV(dev, "Queue memory and interface setup");
/* Do queue interrupt setup */
if (ixlv_assign_msix(sc) != 0) {
device_printf(dev, "%s: allocating queue interrupts failed!\n",
@ -430,9 +451,24 @@ ixlv_attach(device_t dev)
goto out;
}
INIT_DBG_DEV(dev, "Queue memory and interrupts setup");
/* Setup the stack interface */
if (ixlv_setup_interface(dev, sc) != 0) {
device_printf(dev, "%s: setup interface failed!\n",
__func__);
error = EIO;
goto out;
}
INIT_DBG_DEV(dev, "Interface setup complete");
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
/* We expect a link state message, so schedule the AdminQ task now */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
/* Initialize stats */
bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
ixlv_add_sysctls(sc);
@ -456,6 +492,7 @@ ixlv_attach(device_t dev)
out:
ixlv_free_queues(vsi);
ixlv_teardown_adminq_msix(sc);
err_res_buf:
free(sc->vf_res, M_DEVBUF);
err_aq:
@ -495,6 +532,9 @@ ixlv_detach(device_t dev)
return (EBUSY);
}
/* Remove all the media and link information */
ifmedia_removeall(&sc->media);
/* Stop driver */
ether_ifdetach(vsi->ifp);
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
@ -525,8 +565,8 @@ ixlv_detach(device_t dev)
if_free(vsi->ifp);
free(sc->vf_res, M_DEVBUF);
ixlv_free_pci_resources(sc);
ixlv_free_queues(vsi);
ixlv_free_pci_resources(sc);
ixlv_free_filters(sc);
bus_generic_detach(dev);
@ -1138,8 +1178,8 @@ ixlv_vf_config(struct ixlv_sc *sc)
retried + 1);
if (!sc->vf_res) {
bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
(I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
bufsz = sizeof(struct virtchnl_vf_resource) +
(I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
if (!sc->vf_res) {
device_printf(dev,
@ -1318,29 +1358,10 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
sc->hw.back = &sc->osdep;
/*
** Explicitly set the guest PCI BUSMASTER capability
** and we must rewrite the ENABLE in the MSIX control
** register again at this point to cause the host to
** successfully initialize us.
**
** This must be set before accessing any registers.
*/
{
u16 pci_cmd_word;
int msix_ctrl;
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
pci_find_cap(dev, PCIY_MSIX, &rid);
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(dev, rid, msix_ctrl, 2);
}
ixl_set_busmaster(dev);
ixl_set_msix_enable(dev);
/* Disable adminq interrupts (just in case) */
ixlv_disable_adminq_irq(&sc->hw);
@ -1348,33 +1369,37 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
return (0);
}
/*
* Free MSI-X related resources for a single queue
*/
static void
ixlv_free_pci_resources(struct ixlv_sc *sc)
ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que)
{
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
/* We may get here before stations are setup */
if (que == NULL)
goto early;
/*
** Release all msix queue resources:
*/
for (int i = 0; i < vsi->num_queues; i++, que++) {
int rid = que->msix + 1;
if (que->tag != NULL) {
bus_teardown_intr(dev, que->res, que->tag);
que->tag = NULL;
}
if (que->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
que->res = NULL;
}
if (que->tag != NULL) {
bus_teardown_intr(dev, que->res, que->tag);
que->tag = NULL;
}
early:
if (que->res != NULL) {
int rid = que->msix + 1;
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
que->res = NULL;
}
if (que->tq != NULL) {
taskqueue_free(que->tq);
que->tq = NULL;
}
}
static void
ixlv_free_pci_resources(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
pci_release_msi(dev);
if (sc->msix_mem != NULL)
@ -1437,7 +1462,7 @@ ixlv_assign_msix(struct ixlv_sc *sc)
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixlv_msix_que, que, &que->tag);
if (error) {
que->res = NULL;
que->tag = NULL;
device_printf(dev, "Failed to register que handler");
return (error);
}
@ -1518,8 +1543,8 @@ ixlv_reset_complete(struct i40e_hw *hw)
reg = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((reg == I40E_VFR_VFACTIVE) ||
(reg == I40E_VFR_COMPLETED))
if ((reg == VIRTCHNL_VFR_VFACTIVE) ||
(reg == VIRTCHNL_VFR_COMPLETED))
return (0);
i40e_msec_pause(100);
}
@ -1552,7 +1577,11 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
#if __FreeBSD_version >= 1100000
ifp->if_baudrate = IF_Gbps(40);
#else
if_initbaudrate(ifp, IF_Gbps(40));
#endif
ifp->if_init = ixlv_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@ -1565,7 +1594,7 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp->if_transmit = ixl_mq_start;
ifp->if_qflush = ixl_qflush;
ifp->if_snd.ifq_maxlen = que->num_desc - 2;
ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2;
ether_ifattach(ifp, sc->hw.mac.addr);
@ -1573,6 +1602,10 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ ETHER_VLAN_ENCAP_LEN;
ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE;
/*
* Tell the upper layer(s) we support long frames.
*/
@ -1607,7 +1640,12 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
ixlv_media_status);
// JFV Add media types later?
/* Media types based on reported link speed over AdminQ */
ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
@ -1616,6 +1654,116 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
return (0);
}
/*
** Allocate and setup a single queue
*/
static int
ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que)
{
device_t dev = sc->dev;
struct tx_ring *txr;
struct rx_ring *rxr;
int rsize, tsize;
int error = I40E_SUCCESS;
txr = &que->txr;
txr->que = que;
txr->tail = I40E_QTX_TAIL1(que->me);
/* Initialize the TX lock */
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
/*
* Create the TX descriptor ring
*
* In Head Writeback mode, the descriptor ring is one bigger
* than the number of descriptors for space for the HW to
* write back index of last completed descriptor.
*/
if (sc->vsi.enable_head_writeback) {
tsize = roundup2((que->num_tx_desc *
sizeof(struct i40e_tx_desc)) +
sizeof(u32), DBA_ALIGN);
} else {
tsize = roundup2((que->num_tx_desc *
sizeof(struct i40e_tx_desc)), DBA_ALIGN);
}
if (i40e_allocate_dma_mem(&sc->hw,
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
device_printf(dev,
"Unable to allocate TX Descriptor memory\n");
error = ENOMEM;
goto err_destroy_tx_mtx;
}
txr->base = (struct i40e_tx_desc *)txr->dma.va;
bzero((void *)txr->base, tsize);
/* Now allocate transmit soft structs for the ring */
if (ixl_allocate_tx_data(que)) {
device_printf(dev,
"Critical Failure setting up TX structures\n");
error = ENOMEM;
goto err_free_tx_dma;
}
/* Allocate a buf ring */
txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
M_WAITOK, &txr->mtx);
if (txr->br == NULL) {
device_printf(dev,
"Critical Failure setting up TX buf ring\n");
error = ENOMEM;
goto err_free_tx_data;
}
/*
* Next the RX queues...
*/
rsize = roundup2(que->num_rx_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
rxr = &que->rxr;
rxr->que = que;
rxr->tail = I40E_QRX_TAIL1(que->me);
/* Initialize the RX side lock */
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
if (i40e_allocate_dma_mem(&sc->hw,
&rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
device_printf(dev,
"Unable to allocate RX Descriptor memory\n");
error = ENOMEM;
goto err_destroy_rx_mtx;
}
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
/* Allocate receive soft structs for the ring */
if (ixl_allocate_rx_data(que)) {
device_printf(dev,
"Critical Failure setting up receive structs\n");
error = ENOMEM;
goto err_free_rx_dma;
}
return (0);
err_free_rx_dma:
i40e_free_dma_mem(&sc->hw, &rxr->dma);
err_destroy_rx_mtx:
mtx_destroy(&rxr->mtx);
/* err_free_tx_buf_ring */
buf_ring_free(txr->br, M_DEVBUF);
err_free_tx_data:
ixl_free_que_tx(que);
err_free_tx_dma:
i40e_free_dma_mem(&sc->hw, &txr->dma);
err_destroy_tx_mtx:
mtx_destroy(&txr->mtx);
return (error);
}
/*
** Allocate and setup the interface queues
*/
@ -1625,9 +1773,7 @@ ixlv_setup_queues(struct ixlv_sc *sc)
device_t dev = sc->dev;
struct ixl_vsi *vsi;
struct ixl_queue *que;
struct tx_ring *txr;
struct rx_ring *rxr;
int rsize, tsize;
int i;
int error = I40E_SUCCESS;
vsi = &sc->vsi;
@ -1640,104 +1786,30 @@ ixlv_setup_queues(struct ixlv_sc *sc)
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate queue memory\n");
error = ENOMEM;
goto early;
return ENOMEM;
}
for (int i = 0; i < vsi->num_queues; i++) {
for (i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
que->num_desc = ixlv_ringsz;
que->num_tx_desc = vsi->num_tx_desc;
que->num_rx_desc = vsi->num_rx_desc;
que->me = i;
que->vsi = vsi;
txr = &que->txr;
txr->que = que;
txr->tail = I40E_QTX_TAIL1(que->me);
/* Initialize the TX lock */
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
/*
** Create the TX descriptor ring, the extra int is
** added as the location for HEAD WB.
*/
tsize = roundup2((que->num_desc *
sizeof(struct i40e_tx_desc)) +
sizeof(u32), DBA_ALIGN);
if (i40e_allocate_dma_mem(&sc->hw,
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
device_printf(dev,
"Unable to allocate TX Descriptor memory\n");
if (ixlv_setup_queue(sc, que)) {
error = ENOMEM;
goto fail;
}
txr->base = (struct i40e_tx_desc *)txr->dma.va;
bzero((void *)txr->base, tsize);
/* Now allocate transmit soft structs for the ring */
if (ixl_allocate_tx_data(que)) {
device_printf(dev,
"Critical Failure setting up TX structures\n");
error = ENOMEM;
goto fail;
}
/* Allocate a buf ring */
txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
M_WAITOK, &txr->mtx);
if (txr->br == NULL) {
device_printf(dev,
"Critical Failure setting up TX buf ring\n");
error = ENOMEM;
goto fail;
}
/*
* Next the RX queues...
*/
rsize = roundup2(que->num_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
rxr = &que->rxr;
rxr->que = que;
rxr->tail = I40E_QRX_TAIL1(que->me);
/* Initialize the RX side lock */
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
device_get_nameunit(dev), que->me);
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
if (i40e_allocate_dma_mem(&sc->hw,
&rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
device_printf(dev,
"Unable to allocate RX Descriptor memory\n");
error = ENOMEM;
goto fail;
}
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
/* Allocate receive soft structs for the ring */
if (ixl_allocate_rx_data(que)) {
device_printf(dev,
"Critical Failure setting up receive structs\n");
error = ENOMEM;
goto fail;
goto err_free_queues;
}
}
return (0);
fail:
for (int i = 0; i < vsi->num_queues; i++) {
que = &vsi->queues[i];
rxr = &que->rxr;
txr = &que->txr;
if (rxr->base)
i40e_free_dma_mem(&sc->hw, &rxr->dma);
if (txr->base)
i40e_free_dma_mem(&sc->hw, &txr->dma);
}
err_free_queues:
while (i--)
ixlv_free_queue(sc, &vsi->queues[i]);
free(vsi->queues, M_DEVBUF);
early:
return (error);
}
@ -2255,6 +2327,34 @@ ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
/* Hardware is always full-duplex */
ifmr->ifm_active |= IFM_FDX;
/* Based on the link speed reported by the PF over the AdminQ, choose a
* PHY type to report. This isn't 100% correct since we don't really
* know the underlying PHY type of the PF, but at least we can report
* a valid link speed...
*/
switch (sc->link_speed) {
case VIRTCHNL_LINK_SPEED_100MB:
ifmr->ifm_active |= IFM_100_TX;
break;
case VIRTCHNL_LINK_SPEED_1GB:
ifmr->ifm_active |= IFM_1000_T;
break;
case VIRTCHNL_LINK_SPEED_10GB:
ifmr->ifm_active |= IFM_10G_SR;
break;
case VIRTCHNL_LINK_SPEED_20GB:
case VIRTCHNL_LINK_SPEED_25GB:
ifmr->ifm_active |= IFM_25G_SR;
break;
case VIRTCHNL_LINK_SPEED_40GB:
ifmr->ifm_active |= IFM_40G_SR4;
break;
default:
ifmr->ifm_active |= IFM_UNKNOWN;
break;
}
mtx_unlock(&sc->mtx);
INIT_DBG_IF(ifp, "end");
return;
@ -2279,8 +2379,10 @@ ixlv_media_change(struct ifnet * ifp)
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
if_printf(ifp, "Changing speed is not supported\n");
INIT_DBG_IF(ifp, "end");
return (0);
return (ENODEV);
}
@ -2342,7 +2444,7 @@ ixlv_add_multi(struct ixl_vsi *vsi)
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC;
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
sc);
@ -2435,15 +2537,10 @@ ixlv_del_multi(struct ixl_vsi *vsi)
static void
ixlv_local_timer(void *arg)
{
struct ixlv_sc *sc = arg;
struct ixlv_sc *sc = arg;
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
struct tx_ring *txr;
int hung = 0;
u32 mask, val;
s32 timer, new_timer;
u32 val;
IXLV_CORE_LOCK_ASSERT(sc);
@ -2455,9 +2552,9 @@ ixlv_local_timer(void *arg)
val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (val != I40E_VFR_VFACTIVE
&& val != I40E_VFR_COMPLETED) {
DDPRINTF(dev, "reset in progress! (%d)", val);
if (val != VIRTCHNL_VFR_VFACTIVE
&& val != VIRTCHNL_VFR_COMPLETED) {
DDPRINTF(sc->dev, "reset in progress! (%d)", val);
return;
}
@ -2466,48 +2563,11 @@ ixlv_local_timer(void *arg)
/* clean and process any events */
taskqueue_enqueue(sc->tq, &sc->aq_irq);
/*
** Check status on the queues for a hang
*/
mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++, que++) {
txr = &que->txr;
timer = atomic_load_acq_32(&txr->watchdog_timer);
if (timer > 0) {
new_timer = timer - hz;
if (new_timer <= 0) {
atomic_store_rel_32(&txr->watchdog_timer, -1);
device_printf(dev, "WARNING: queue %d "
"appears to be hung!\n", que->me);
++hung;
} else {
/*
* If this fails, that means something in the TX path has updated
* the watchdog, so it means the TX path is still working and
* the watchdog doesn't need to countdown.
*/
atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
/* Any queues with outstanding work get a sw irq */
wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
}
}
}
/* Reset when a queue shows hung */
if (hung)
goto hung;
/* Increment stat when a queue shows hung */
if (ixl_queue_hang_check(vsi))
sc->watchdog_events++;
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
return;
hung:
device_printf(dev, "WARNING: Resetting!\n");
sc->init_state = IXLV_RESET_REQUIRED;
sc->watchdog_events++;
ixlv_stop(sc);
ixlv_init_locked(sc);
}
/*
@ -2524,8 +2584,8 @@ ixlv_update_link_status(struct ixlv_sc *sc)
if (sc->link_up){
if (vsi->link_active == FALSE) {
if (bootverbose)
if_printf(ifp,"Link is Up, %d Gbps\n",
(sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
if_printf(ifp,"Link is Up, %s\n",
ixlv_vc_speed_to_string(sc->link_speed));
vsi->link_active = TRUE;
if_link_state_change(ifp, LINK_STATE_UP);
}
@ -2573,6 +2633,33 @@ ixlv_stop(struct ixlv_sc *sc)
INIT_DBG_IF(ifp, "end");
}
/* Free a single queue struct */
static void
ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
if (!mtx_initialized(&txr->mtx)) /* uninitialized */
return;
IXL_TX_LOCK(txr);
if (txr->br)
buf_ring_free(txr->br, M_DEVBUF);
ixl_free_que_tx(que);
if (txr->base)
i40e_free_dma_mem(&sc->hw, &txr->dma);
IXL_TX_UNLOCK(txr);
IXL_TX_LOCK_DESTROY(txr);
if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
return;
IXL_RX_LOCK(rxr);
ixl_free_que_rx(que);
if (rxr->base)
i40e_free_dma_mem(&sc->hw, &rxr->dma);
IXL_RX_UNLOCK(rxr);
IXL_RX_LOCK_DESTROY(rxr);
}
/*********************************************************************
*
@ -2586,28 +2673,12 @@ ixlv_free_queues(struct ixl_vsi *vsi)
struct ixl_queue *que = vsi->queues;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
if (!mtx_initialized(&txr->mtx)) /* uninitialized */
continue;
IXL_TX_LOCK(txr);
ixl_free_que_tx(que);
if (txr->base)
i40e_free_dma_mem(&sc->hw, &txr->dma);
IXL_TX_UNLOCK(txr);
IXL_TX_LOCK_DESTROY(txr);
if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
continue;
IXL_RX_LOCK(rxr);
ixl_free_que_rx(que);
if (rxr->base)
i40e_free_dma_mem(&sc->hw, &rxr->dma);
IXL_RX_UNLOCK(rxr);
IXL_RX_LOCK_DESTROY(rxr);
/* First, free the MSI-X resources */
ixlv_free_msix_resources(sc, que);
/* Then free other queue data */
ixlv_free_queue(sc, que);
}
free(vsi->queues, M_DEVBUF);
}
@ -2716,10 +2787,10 @@ ixlv_config_rss_pf(struct ixlv_sc *sc)
static void
ixlv_config_rss(struct ixlv_sc *sc)
{
if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) {
DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
ixlv_config_rss_reg(sc);
} else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
} else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
ixlv_config_rss_pf(sc);
} else
@ -2823,7 +2894,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg;
struct virtchnl_msg *v_msg;
device_t dev = sc->dev;
u16 result = 0;
u32 reg, oldreg;
@ -2834,7 +2905,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = sc->aq_buffer;
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
v_msg = (struct virtchnl_msg *)&event.desc;
do {
ret = i40e_clean_arq_element(hw, &event, &result);
@ -2917,13 +2988,25 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
struct rx_ring *rxr;
/* Driver statistics sysctls */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &sc->watchdog_events,
"Watchdog timeouts");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
CTLFLAG_RD, &sc->admin_irq,
"Admin Queue IRQ Handled");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size",
CTLFLAG_RD, &vsi->num_tx_desc, 0,
"TX ring size");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size",
CTLFLAG_RD, &vsi->num_rx_desc, 0,
"RX ring size");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed",
CTLTYPE_STRING | CTLFLAG_RD,
sc, 0, ixlv_sysctl_current_speed,
"A", "Current Port Speed");
/* VSI statistics sysctls */
vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
CTLFLAG_RD, NULL, "VSI-specific statistics");
@ -3048,14 +3131,71 @@ ixlv_free_filters(struct ixlv_sc *sc)
SLIST_REMOVE_HEAD(sc->mac_filters, next);
free(f, M_DEVBUF);
}
free(sc->mac_filters, M_DEVBUF);
while (!SLIST_EMPTY(sc->vlan_filters)) {
v = SLIST_FIRST(sc->vlan_filters);
SLIST_REMOVE_HEAD(sc->vlan_filters, next);
free(v, M_DEVBUF);
}
free(sc->vlan_filters, M_DEVBUF);
return;
}
static char *
ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed)
{
int index;
char *speeds[] = {
"Unknown",
"100 Mbps",
"1 Gbps",
"10 Gbps",
"40 Gbps",
"20 Gbps",
"25 Gbps",
};
switch (link_speed) {
case VIRTCHNL_LINK_SPEED_100MB:
index = 1;
break;
case VIRTCHNL_LINK_SPEED_1GB:
index = 2;
break;
case VIRTCHNL_LINK_SPEED_10GB:
index = 3;
break;
case VIRTCHNL_LINK_SPEED_40GB:
index = 4;
break;
case VIRTCHNL_LINK_SPEED_20GB:
index = 5;
break;
case VIRTCHNL_LINK_SPEED_25GB:
index = 6;
break;
case VIRTCHNL_LINK_SPEED_UNKNOWN:
default:
index = 0;
break;
}
return speeds[index];
}
static int
ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
{
struct ixlv_sc *sc = (struct ixlv_sc *)arg1;
int error = 0;
error = sysctl_handle_string(oidp,
ixlv_vc_speed_to_string(sc->link_speed),
8, req);
return (error);
}
#ifdef IXL_DEBUG
/**
* ixlv_sysctl_qtx_tail_handler

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -188,8 +188,8 @@ enum ixl_dbg_mask {
* The driver currently always uses 32 byte Rx descriptors.
*/
#define IXL_DEFAULT_RING 1024
#define IXL_MAX_RING 8160
#define IXL_MIN_RING 32
#define IXL_MAX_RING 4096
#define IXL_MIN_RING 64
#define IXL_RING_INCREMENT 32
#define IXL_AQ_LEN 256
@ -207,6 +207,9 @@ enum ixl_dbg_mask {
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*
* XXX: Watchdog currently counts down in units of (hz)
* Set this to just (hz) if you want queues to hang under a little bit of stress
*/
#define IXL_WATCHDOG (10 * hz)
@ -214,8 +217,8 @@ enum ixl_dbg_mask {
* This parameters control when the driver calls the routine to reclaim
* transmit descriptors.
*/
#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
#define IXL_TX_OP_THRESHOLD (que->num_desc / 32)
#define IXL_TX_CLEANUP_THRESHOLD (que->num_tx_desc / 8)
#define IXL_TX_OP_THRESHOLD (que->num_tx_desc / 32)
#define MAX_MULTICAST_ADDR 128
@ -232,9 +235,10 @@ enum ixl_dbg_mask {
#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 6
#define IXL_SPARSE_CHAIN 7
#define IXL_QUEUE_HUNG 0x80000000
#define IXL_MIN_TSO_MSS 64
#define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
#define IXL_RSS_KEY_SIZE_REG 13
#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
@ -287,6 +291,8 @@ enum ixl_dbg_mask {
/* Misc flags for ixl_vsi.flags */
#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
#define IXL_FLAGS_USES_MSIX (1 << 2)
#define IXL_FLAGS_IS_VF (1 << 3)
#define IXL_VF_RESET_TIMEOUT 100
@ -505,7 +511,11 @@ struct ixl_queue {
u32 eims; /* This queue's EIMS bit */
struct resource *res;
void *tag;
int num_desc; /* both tx and rx */
int num_tx_desc; /* both tx and rx */
int num_rx_desc; /* both tx and rx */
#ifdef DEV_NETMAP
int num_desc; /* for compatibility with current netmap code in kernel */
#endif
struct tx_ring txr;
struct rx_ring rxr;
struct task task;
@ -536,9 +546,12 @@ struct ixl_vsi {
enum i40e_vsi_type type;
int id;
u16 num_queues;
int num_tx_desc;
int num_rx_desc;
u32 rx_itr_setting;
u32 tx_itr_setting;
u16 max_frame_size;
bool enable_head_writeback;
struct ixl_queue *queues; /* head of queues */
@ -596,7 +609,7 @@ ixl_rx_unrefreshed(struct ixl_queue *que)
if (rxr->next_check > rxr->next_refresh)
return (rxr->next_check - rxr->next_refresh - 1);
else
return ((que->num_desc + rxr->next_check) -
return ((que->num_rx_desc + rxr->next_check) -
rxr->next_refresh - 1);
}
@ -681,6 +694,9 @@ void ixl_free_que_rx(struct ixl_queue *);
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
void ixl_vsi_setup_rings_size(struct ixl_vsi *, int, int);
int ixl_queue_hang_check(struct ixl_vsi *);
void ixl_free_vsi(struct ixl_vsi *);
void ixl_qflush(struct ifnet *);
@ -689,4 +705,8 @@ void ixl_qflush(struct ifnet *);
uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
#endif
void ixl_get_default_rss_key(u32 *);
const char * i40e_vc_stat_str(struct i40e_hw *hw,
enum virtchnl_status_code stat_err);
void ixl_set_busmaster(device_t);
void ixl_set_msix_enable(device_t);
#endif /* _IXL_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -374,9 +374,16 @@ ixl_iw_register(struct ixl_iw_ops *ops)
{
struct ixl_iw_pf_entry *pf_entry;
int err = 0;
int iwarp_cap_on_pfs = 0;
INIT_DEBUGOUT("begin");
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
printf("%s: the device is not iwarp-capable, registering dropped\n",
__func__);
return (ENODEV);
}
if (ixl_enable_iwarp == 0) {
printf("%s: enable_iwarp is off, registering dropped\n",
__func__);
@ -389,19 +396,20 @@ ixl_iw_register(struct ixl_iw_ops *ops)
}
mtx_lock(&ixl_iw.mtx);
if (ixl_iw.registered) {
printf("%s: iwarp driver already registered\n", __func__);
err = EBUSY;
err = (EBUSY);
goto out;
}
ixl_iw.registered = true;
mtx_unlock(&ixl_iw.mtx);
ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
taskqueue_thread_enqueue, &ixl_iw.tq);
if (ixl_iw.tq == NULL) {
printf("%s: failed to create queue\n", __func__);
err = ENOMEM;
goto out;
ixl_iw.registered = false;
return (ENOMEM);
}
taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
@ -410,20 +418,19 @@ ixl_iw_register(struct ixl_iw_ops *ops)
if (ixl_iw.ops == NULL) {
printf("%s: failed to allocate memory\n", __func__);
taskqueue_free(ixl_iw.tq);
err = ENOMEM;
goto out;
ixl_iw.registered = false;
return (ENOMEM);
}
ixl_iw.ops->init = ops->init;
ixl_iw.ops->stop = ops->stop;
ixl_iw.registered = true;
mtx_lock(&ixl_iw.mtx);
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
}
out:
mtx_unlock(&ixl_iw.mtx);
@ -434,9 +441,23 @@ int
ixl_iw_unregister(void)
{
struct ixl_iw_pf_entry *pf_entry;
int iwarp_cap_on_pfs = 0;
INIT_DEBUGOUT("begin");
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
iwarp_cap_on_pfs += pf_entry->pf->hw.func_caps.iwarp;
if (!iwarp_cap_on_pfs && ixl_enable_iwarp) {
printf("%s: attempt to unregister driver when no iwarp-capable device present\n",
__func__);
return (ENODEV);
}
if (ixl_enable_iwarp == 0) {
printf("%s: attempt to unregister driver when enable_iwarp is off\n",
__func__);
return (ENODEV);
}
mtx_lock(&ixl_iw.mtx);
if (!ixl_iw.registered) {

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -36,7 +36,7 @@
#define _IXL_IW_H_
#define IXL_IW_MAX_USER_PRIORITY 8
#define IXL_IW_MAX_MSIX 64
struct ixl_iw_msix_mapping {
u8 itr_indx;

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -46,6 +46,7 @@
#define VF_FLAG_MAC_ANTI_SPOOF 0x10
#define IXL_PF_STATE_EMPR_RESETTING (1 << 0)
#define IXL_PF_STATE_FW_LLDP_DISABLED (1 << 1)
struct ixl_vf {
struct ixl_vsi vsi;
@ -94,7 +95,6 @@ struct ixl_pf {
/* Tunable values */
bool enable_msix;
int max_queues;
int ringsz;
bool enable_tx_fc_filter;
int dynamic_rx_itr;
int dynamic_tx_itr;
@ -157,6 +157,17 @@ struct ixl_pf {
"Set to 0 to disable link.\n" \
"Use \"sysctl -x\" to view flags properly."
#define IXL_SYSCTL_HELP_SUPPORTED_SPEED \
"\nSupported link speeds.\n" \
"Flags:\n" \
"\t 0x1 - 100M\n" \
"\t 0x2 - 1G\n" \
"\t 0x4 - 10G\n" \
"\t 0x8 - 20G\n" \
"\t0x10 - 25G\n" \
"\t0x20 - 40G\n\n" \
"Use \"sysctl -x\" to view flags properly."
#define IXL_SYSCTL_HELP_FC \
"\nSet flow control mode using the values below.\n" \
"\t0 - off\n" \
@ -168,6 +179,11 @@ struct ixl_pf {
"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \
" the response." \
#define IXL_SYSCTL_HELP_FW_LLDP \
"\nFW LLDP engine:\n" \
"\t0 - disable\n" \
"\t1 - enable\n"
extern const char * const ixl_fc_string[6];
MALLOC_DECLARE(M_IXL);
@ -204,8 +220,6 @@ void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
* PF-only function declarations
*/
void ixl_set_busmaster(device_t);
void ixl_set_msix_enable(device_t);
int ixl_setup_interface(device_t, struct ixl_vsi *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
@ -273,8 +287,8 @@ void ixl_configure_legacy(struct ixl_pf *);
void ixl_free_pci_resources(struct ixl_pf *);
void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
void ixl_config_rss(struct ixl_pf *);
int ixl_set_advertised_speeds(struct ixl_pf *, int);
void ixl_get_initial_advertised_speeds(struct ixl_pf *);
int ixl_set_advertised_speeds(struct ixl_pf *, int, bool);
void ixl_set_initial_advertised_speeds(struct ixl_pf *);
void ixl_print_nvm_version(struct ixl_pf *pf);
void ixl_add_device_sysctls(struct ixl_pf *);
void ixl_handle_mdd_event(struct ixl_pf *);
@ -287,7 +301,8 @@ int ixl_aq_get_link_status(struct ixl_pf *,
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
void ixl_handle_empr_reset(struct ixl_pf *);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up);
void ixl_set_queue_rx_itr(struct ixl_queue *);
void ixl_set_queue_tx_itr(struct ixl_queue *);
@ -330,6 +345,9 @@ void ixl_free_mac_filters(struct ixl_vsi *vsi);
void ixl_update_vsi_stats(struct ixl_vsi *);
void ixl_vsi_reset_stats(struct ixl_vsi *);
int ixl_vsi_setup_queues(struct ixl_vsi *vsi);
void ixl_vsi_free_queues(struct ixl_vsi *vsi);
/*
* I2C Function prototypes
*/
@ -339,4 +357,7 @@ s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
int ixl_get_fw_lldp_status(struct ixl_pf *pf);
int ixl_attach_get_link_status(struct ixl_pf *);
#endif /* _IXL_PF_H_ */

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -60,12 +60,12 @@ static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t
static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
enum i40e_queue_type *last_type, uint16_t *last_queue);
static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
@ -403,7 +403,7 @@ ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
if (i == IXL_VF_RESET_TIMEOUT)
device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
@ -416,7 +416,7 @@ ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
ixl_vf_setup_vsi(pf, vf);
ixl_vf_map_queues(pf, vf);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
ixl_flush(hw);
}
@ -424,7 +424,7 @@ static int
ixl_vc_opcode_level(uint16_t opcode)
{
switch (opcode) {
case I40E_VIRTCHNL_OP_GET_STATS:
case VIRTCHNL_OP_GET_STATS:
return (10);
default:
return (5);
@ -471,19 +471,19 @@ static void
ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_version_info reply;
struct virtchnl_version_info reply;
if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
if (msg_size != sizeof(struct virtchnl_version_info)) {
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
I40E_ERR_PARAM);
return;
}
vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
vf->version = ((struct virtchnl_version_info *)msg)->minor;
reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
reply.major = VIRTCHNL_VERSION_MAJOR;
reply.minor = VIRTCHNL_VERSION_MINOR;
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
sizeof(reply));
}
@ -493,7 +493,7 @@ ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
if (msg_size != 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
I40E_ERR_PARAM);
return;
}
@ -507,30 +507,30 @@ static void
ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vf_resource reply;
struct virtchnl_vf_resource reply;
if ((vf->version == 0 && msg_size != 0) ||
(vf->version == 1 && msg_size != 4)) {
device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
" for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
" for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
vf->version);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_ERR_PARAM);
return;
}
bzero(&reply, sizeof(reply));
if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
else
/* Force VF RSS setup by PF in 1.1+ VFs */
reply.vf_offload_flags = *(u32 *)msg & (
I40E_VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
reply.vf_cap_flags = *(u32 *)msg & (
VIRTCHNL_VF_OFFLOAD_L2 |
VIRTCHNL_VF_OFFLOAD_RSS_PF |
VIRTCHNL_VF_OFFLOAD_VLAN);
reply.num_vsis = 1;
reply.num_queue_pairs = vf->vsi.num_queues;
@ -538,17 +538,17 @@ ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
reply.rss_key_size = 52;
reply.rss_lut_size = 64;
reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
I40E_SUCCESS, &reply, sizeof(reply));
}
static int
ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
struct i40e_virtchnl_txq_info *info)
struct virtchnl_txq_info *info)
{
struct i40e_hw *hw;
struct i40e_hmc_obj_txq txq;
@ -593,7 +593,7 @@ ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
static int
ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
struct i40e_virtchnl_rxq_info *info)
struct virtchnl_rxq_info *info)
{
struct i40e_hw *hw;
struct i40e_hmc_obj_rxq rxq;
@ -662,13 +662,13 @@ static void
ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vsi_queue_config_info *info;
struct i40e_virtchnl_queue_pair_info *pair;
struct virtchnl_vsi_queue_config_info *info;
struct virtchnl_queue_pair_info *pair;
uint16_t expected_msg_size;
int i;
if (msg_size < sizeof(*info)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -677,7 +677,7 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -686,7 +686,7 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (msg_size != expected_msg_size) {
device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
vf->vf_num, msg_size, expected_msg_size);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -694,7 +694,7 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (info->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -708,29 +708,29 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
pair->txq.queue_id >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
return;
}
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
}
static void
ixl_vf_set_qctl(struct ixl_pf *pf,
const struct i40e_virtchnl_vector_map *vector,
const struct virtchnl_vector_map *vector,
enum i40e_queue_type cur_type, uint16_t cur_queue,
enum i40e_queue_type *last_type, uint16_t *last_queue)
{
@ -759,7 +759,7 @@ ixl_vf_set_qctl(struct ixl_pf *pf,
static void
ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
const struct i40e_virtchnl_vector_map *vector)
const struct virtchnl_vector_map *vector)
{
struct i40e_hw *hw;
u_int qindex;
@ -816,28 +816,28 @@ static void
ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_irq_map_info *map;
struct i40e_virtchnl_vector_map *vector;
struct virtchnl_irq_map_info *map;
struct virtchnl_vector_map *vector;
struct i40e_hw *hw;
int i, largest_txq, largest_rxq;
hw = &pf->hw;
if (msg_size < sizeof(*map)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
map = msg;
if (map->num_vectors == 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@ -848,7 +848,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
vector->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
return;
}
@ -856,7 +856,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
largest_rxq = fls(vector->rxq_map) - 1;
if (largest_rxq >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@ -866,7 +866,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
largest_txq = fls(vector->txq_map) - 1;
if (largest_txq >= vf->vsi.num_queues) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@ -875,7 +875,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
vector->txitr_idx > IXL_MAX_ITR_IDX) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
VIRTCHNL_OP_CONFIG_IRQ_MAP,
I40E_ERR_PARAM);
return;
}
@ -883,18 +883,18 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
ixl_vf_config_vector(pf, vf, vector);
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
}
static void
ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_queue_select *select;
struct virtchnl_queue_select *select;
int error = 0;
if (msg_size != sizeof(*select)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -902,7 +902,7 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
select = msg;
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -957,23 +957,23 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
}
if (error) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
I40E_ERR_TIMEOUT);
return;
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
}
static void
ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
void *msg, uint16_t msg_size)
{
struct i40e_virtchnl_queue_select *select;
struct virtchnl_queue_select *select;
int error = 0;
if (msg_size != sizeof(*select)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -981,7 +981,7 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
select = msg;
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_PARAM);
return;
}
@ -1039,12 +1039,12 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
}
if (error) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
I40E_ERR_TIMEOUT);
return;
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
}
static bool
@ -1085,8 +1085,8 @@ static void
ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_ether_addr_list *addr_list;
struct i40e_virtchnl_ether_addr *addr;
struct virtchnl_ether_addr_list *addr_list;
struct virtchnl_ether_addr *addr;
struct ixl_vsi *vsi;
int i;
size_t expected_size;
@ -1094,7 +1094,7 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
vsi = &vf->vsi;
if (msg_size < sizeof(*addr_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@ -1106,7 +1106,7 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (addr_list->num_elements == 0 ||
addr_list->vsi_id != vsi->vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@ -1114,7 +1114,7 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
for (i = 0; i < addr_list->num_elements; i++) {
if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
return;
}
}
@ -1124,20 +1124,20 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
}
static void
ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_ether_addr_list *addr_list;
struct i40e_virtchnl_ether_addr *addr;
struct virtchnl_ether_addr_list *addr_list;
struct virtchnl_ether_addr *addr;
size_t expected_size;
int i;
if (msg_size < sizeof(*addr_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@ -1149,7 +1149,7 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (addr_list->num_elements == 0 ||
addr_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
@ -1158,7 +1158,7 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
addr = &addr_list->list[i];
if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
return;
}
}
@ -1168,7 +1168,7 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
}
static enum i40e_status_code
@ -1189,13 +1189,13 @@ static void
ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vlan_filter_list *filter_list;
struct virtchnl_vlan_filter_list *filter_list;
enum i40e_status_code code;
size_t expected_size;
int i;
if (msg_size < sizeof(*filter_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
@ -1206,20 +1206,20 @@ ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (filter_list->num_elements == 0 ||
filter_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < filter_list->num_elements; i++) {
if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
@ -1227,26 +1227,26 @@ ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
code = ixl_vf_enable_vlan_strip(pf, vf);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
}
for (i = 0; i < filter_list->num_elements; i++)
ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
}
static void
ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_vlan_filter_list *filter_list;
struct virtchnl_vlan_filter_list *filter_list;
int i;
size_t expected_size;
if (msg_size < sizeof(*filter_list)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
I40E_ERR_PARAM);
return;
}
@ -1257,21 +1257,21 @@ ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (filter_list->num_elements == 0 ||
filter_list->vsi_id != vf->vsi.vsi_num ||
msg_size != expected_size) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < filter_list->num_elements; i++) {
if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
}
if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
}
@ -1279,76 +1279,76 @@ ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
for (i = 0; i < filter_list->num_elements; i++)
ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
}
static void
ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
void *msg, uint16_t msg_size)
{
struct i40e_virtchnl_promisc_info *info;
struct virtchnl_promisc_info *info;
enum i40e_status_code code;
if (msg_size != sizeof(*info)) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
info = msg;
if (info->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
return;
}
code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
return;
}
code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
if (code != I40E_SUCCESS) {
i40e_send_vf_nack(pf, vf,
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
return;
}
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
}
static void
ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_virtchnl_queue_select *queue;
struct virtchnl_queue_select *queue;
if (msg_size != sizeof(*queue)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
I40E_ERR_PARAM);
return;
}
queue = msg;
if (queue->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
I40E_ERR_PARAM);
return;
}
ixl_update_eth_stats(&vf->vsi);
ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
}
@ -1357,14 +1357,14 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
struct i40e_virtchnl_rss_key *key;
struct virtchnl_rss_key *key;
struct i40e_aqc_get_set_rss_key_data key_data;
enum i40e_status_code status;
hw = &pf->hw;
if (msg_size < sizeof(*key)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
@ -1374,7 +1374,7 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (key->key_len > 52) {
device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
vf->vf_num, key->key_len, 52);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
@ -1382,7 +1382,7 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (key->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_PARAM);
return;
}
@ -1400,19 +1400,19 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (status) {
device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
I40E_ERR_ADMIN_QUEUE_ERROR);
return;
}
} else {
for (int i = 0; i < (key->key_len / 4); i++)
i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)key->key)[i]);
i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
vf->vf_num, key->key[0]);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
}
static void
@ -1420,13 +1420,13 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
struct i40e_virtchnl_rss_lut *lut;
struct virtchnl_rss_lut *lut;
enum i40e_status_code status;
hw = &pf->hw;
if (msg_size < sizeof(*lut)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
@ -1436,7 +1436,7 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (lut->lut_entries > 64) {
device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
vf->vf_num, lut->lut_entries, 64);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
@ -1444,7 +1444,7 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (lut->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_PARAM);
return;
}
@ -1455,19 +1455,19 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
if (status) {
device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
I40E_ERR_ADMIN_QUEUE_ERROR);
return;
}
} else {
for (int i = 0; i < (lut->lut_entries / 4); i++)
i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)lut->lut)[i]);
i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
vf->vf_num, lut->lut[0], lut->lut_entries);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
}
static void
@ -1475,12 +1475,12 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
struct i40e_hw *hw;
struct i40e_virtchnl_rss_hena *hena;
struct virtchnl_rss_hena *hena;
hw = &pf->hw;
if (msg_size < sizeof(*hena)) {
i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
I40E_ERR_PARAM);
return;
}
@ -1488,13 +1488,39 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
hena = msg;
/* Set HENA */
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)hena->hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)(hena->hena >> 32));
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
vf->vf_num, hena->hena);
ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
}
static void
ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
{
struct virtchnl_pf_event event;
struct i40e_hw *hw;
hw = &pf->hw;
event.event = VIRTCHNL_EVENT_LINK_CHANGE;
event.severity = PF_EVENT_SEVERITY_INFO;
event.event_data.link_event.link_status = pf->vsi.link_active;
event.event_data.link_event.link_speed =
(enum virtchnl_link_speed)hw->phy.link_info.link_speed;
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
sizeof(event));
}
void
ixl_broadcast_link_state(struct ixl_pf *pf)
{
int i;
for (i = 0; i < pf->num_vfs; i++)
ixl_notify_vf_link_state(pf, &pf->vfs[i]);
}
void
@ -1528,58 +1554,66 @@ ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
return;
switch (opcode) {
case I40E_VIRTCHNL_OP_VERSION:
case VIRTCHNL_OP_VERSION:
ixl_vf_version_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
case VIRTCHNL_OP_RESET_VF:
ixl_vf_reset_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
case VIRTCHNL_OP_GET_VF_RESOURCES:
ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
/* Notify VF of link state after it obtains queues, as this is
* the last thing it will do as part of initialization
*/
ixl_notify_vf_link_state(pf, vf);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_ENABLE_QUEUES:
ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
/* Notify VF of link state after it obtains queues, as this is
* the last thing it will do as part of initialization
*/
ixl_notify_vf_link_state(pf, vf);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
case VIRTCHNL_OP_ADD_ETH_ADDR:
ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
case VIRTCHNL_OP_DEL_ETH_ADDR:
ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_ADD_VLAN:
ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
case VIRTCHNL_OP_GET_STATS:
ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
case VIRTCHNL_OP_CONFIG_RSS_KEY:
ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
break;
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
case VIRTCHNL_OP_SET_RSS_HENA:
ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
break;
/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
default:
i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
break;
@ -1746,6 +1780,7 @@ ixl_iov_uninit(device_t dev)
if (pf->vfs[i].vsi.seid != 0)
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
ixl_free_mac_filters(&pf->vfs[i].vsi);
DDPRINTF(dev, "VF %d: %d released\n",
i, pf->vfs[i].qtag.num_allocated);
DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -42,9 +42,6 @@
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
#define IXL_GLOBAL_VF_NUM(hw, vf) \
(vf->vf_num + hw->func_caps.vf_base_id)
/* Public functions */
/*
@ -61,5 +58,6 @@ int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
void ixl_initialize_sriov(struct ixl_pf *pf);
void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event);
void ixl_handle_vflr(void *arg, int pending);
void ixl_broadcast_link_state(struct ixl_pf *pf);
#endif /* _IXL_PF_IOV_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -57,6 +57,7 @@ static int ixl_xmit(struct ixl_queue *, struct mbuf **);
static int ixl_tx_setup_offload(struct ixl_queue *,
struct mbuf *, u32 *, u32 *);
static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
static void ixl_queue_sw_irq(struct ixl_vsi *, int);
static inline void ixl_rx_discard(struct rx_ring *, int);
static inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
@ -67,7 +68,9 @@ static inline u32 ixl_get_tx_head(struct ixl_queue *que);
#ifdef DEV_NETMAP
#include <dev/netmap/if_ixl_netmap.h>
#if __FreeBSD_version >= 1200000
int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1;
#endif
#endif /* DEV_NETMAP */
/*
@ -87,6 +90,62 @@ ixl_get_default_rss_key(u32 *key)
bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
}
/**
* i40e_vc_stat_str - convert virtchnl status err code to a string
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
**/
const char *
i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err)
{
switch (stat_err) {
case VIRTCHNL_STATUS_SUCCESS:
return "OK";
case VIRTCHNL_ERR_PARAM:
return "VIRTCHNL_ERR_PARAM";
case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
case VIRTCHNL_STATUS_NOT_SUPPORTED:
return "VIRTCHNL_STATUS_NOT_SUPPORTED";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
return hw->err_str;
}
/*
* PCI BUSMASTER needs to be set for proper operation.
*/
void
ixl_set_busmaster(device_t dev)
{
u16 pci_cmd_word;
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
}
/*
* Rewrite the ENABLE bit in the MSIX control register
*/
void
ixl_set_msix_enable(device_t dev)
{
int msix_ctrl, rid;
pci_find_cap(dev, PCIY_MSIX, &rid);
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(dev, rid, msix_ctrl, 2);
}
/*
** Multiqueue Transmit driver
*/
@ -102,13 +161,13 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
#endif
/*
** Which queue to use:
**
** When doing RSS, map it to the same outbound
** queue as the incoming flow would be mapped to.
** If everything is setup correctly, it should be
** the same bucket that the current CPU we're on is.
*/
* Which queue to use:
*
* When doing RSS, map it to the same outbound
* queue as the incoming flow would be mapped to.
* If everything is setup correctly, it should be
* the same bucket that the current CPU we're on is.
*/
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
#ifdef RSS
if (rss_hash2bucket(m->m_pkthdr.flowid,
@ -207,11 +266,6 @@ ixl_qflush(struct ifnet *ifp)
if_qflush(ifp);
}
/*
** Find mbuf chains passed to the driver
** that are 'sparse', using more than 8
** mbufs to deliver an mss-size chunk of data
*/
static inline bool
ixl_tso_detect_sparse(struct mbuf *mp)
{
@ -228,9 +282,9 @@ ixl_tso_detect_sparse(struct mbuf *mp)
num++;
mss -= m->m_len % mp->m_pkthdr.tso_segsz;
if (num > IXL_SPARSE_CHAIN)
return (true);
if (mss < 1) {
if (num > IXL_SPARSE_CHAIN)
return (true);
num = (mss == 0) ? 0 : 1;
mss += mp->m_pkthdr.tso_segsz;
}
@ -369,7 +423,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
last = i; /* descriptor that will get completion IRQ */
if (++i == que->num_desc)
if (++i == que->num_tx_desc)
i = 0;
buf->m_head = NULL;
@ -382,7 +436,10 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
txr->next_avail = i;
buf->m_head = m_head;
/* Swap the dma map between the first and last descriptor */
/* Swap the dma map between the first and last descriptor.
* The descriptor that gets checked on completion will now
* have the real map from the first descriptor.
*/
txr->buffers[first].map = buf->map;
buf->map = map;
bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
@ -424,7 +481,7 @@ ixl_allocate_tx_data(struct ixl_queue *que)
struct ixl_vsi *vsi = que->vsi;
device_t dev = vsi->dev;
struct ixl_tx_buf *buf;
int error = 0;
int i, error = 0;
/*
* Setup DMA descriptor areas.
@ -436,13 +493,13 @@ ixl_allocate_tx_data(struct ixl_queue *que)
NULL, NULL, /* filter, filterarg */
IXL_TSO_SIZE, /* maxsize */
IXL_MAX_TX_SEGS, /* nsegments */
PAGE_SIZE, /* maxsegsize */
IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txr->tx_tag))) {
device_printf(dev,"Unable to allocate TX DMA tag\n");
goto fail;
return (error);
}
/* Make a special tag for TSO */
@ -453,34 +510,51 @@ ixl_allocate_tx_data(struct ixl_queue *que)
NULL, NULL, /* filter, filterarg */
IXL_TSO_SIZE, /* maxsize */
IXL_MAX_TSO_SEGS, /* nsegments */
PAGE_SIZE, /* maxsegsize */
IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&txr->tso_tag))) {
device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
goto fail;
goto free_tx_dma;
}
if (!(txr->buffers =
(struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
que->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate tx_buffer memory\n");
error = ENOMEM;
goto fail;
goto free_tx_tso_dma;
}
/* Create the descriptor buffer default dma maps */
buf = txr->buffers;
for (int i = 0; i < que->num_desc; i++, buf++) {
for (i = 0; i < que->num_tx_desc; i++, buf++) {
buf->tag = txr->tx_tag;
error = bus_dmamap_create(buf->tag, 0, &buf->map);
if (error != 0) {
device_printf(dev, "Unable to create TX DMA map\n");
goto fail;
goto free_buffers;
}
}
fail:
return 0;
free_buffers:
while (i--) {
buf--;
bus_dmamap_destroy(buf->tag, buf->map);
}
free(txr->buffers, M_DEVBUF);
txr->buffers = NULL;
free_tx_tso_dma:
bus_dma_tag_destroy(txr->tso_tag);
txr->tso_tag = NULL;
free_tx_dma:
bus_dma_tag_destroy(txr->tx_tag);
txr->tx_tag = NULL;
return (error);
}
@ -514,7 +588,7 @@ ixl_init_tx_ring(struct ixl_queue *que)
#endif /* DEV_NETMAP */
bzero((void *)txr->base,
(sizeof(struct i40e_tx_desc)) * que->num_desc);
(sizeof(struct i40e_tx_desc)) * que->num_tx_desc);
/* Reset indices */
txr->next_avail = 0;
@ -523,14 +597,9 @@ ixl_init_tx_ring(struct ixl_queue *que)
/* Reset watchdog status */
txr->watchdog_timer = 0;
#ifdef IXL_FDIR
/* Initialize flow director */
txr->atr_rate = ixl_atr_rate;
txr->atr_count = 0;
#endif
/* Free any existing tx mbufs. */
buf = txr->buffers;
for (int i = 0; i < que->num_desc; i++, buf++) {
for (int i = 0; i < que->num_tx_desc; i++, buf++) {
if (buf->m_head != NULL) {
bus_dmamap_sync(buf->tag, buf->map,
BUS_DMASYNC_POSTWRITE);
@ -556,7 +625,7 @@ ixl_init_tx_ring(struct ixl_queue *que)
}
/* Set number of descriptors available */
txr->avail = que->num_desc;
txr->avail = que->num_tx_desc;
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@ -577,30 +646,17 @@ ixl_free_que_tx(struct ixl_queue *que)
INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
for (int i = 0; i < que->num_desc; i++) {
for (int i = 0; i < que->num_tx_desc; i++) {
buf = &txr->buffers[i];
if (buf->m_head != NULL) {
bus_dmamap_sync(buf->tag, buf->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(buf->tag,
buf->map);
m_freem(buf->m_head);
buf->m_head = NULL;
if (buf->map != NULL) {
bus_dmamap_destroy(buf->tag,
buf->map);
buf->map = NULL;
}
} else if (buf->map != NULL) {
bus_dmamap_unload(buf->tag,
buf->map);
bus_dmamap_destroy(buf->tag,
buf->map);
buf->map = NULL;
}
bus_dmamap_unload(buf->tag, buf->map);
bus_dmamap_destroy(buf->tag, buf->map);
}
if (txr->br != NULL)
buf_ring_free(txr->br, M_DEVBUF);
if (txr->buffers != NULL) {
free(txr->buffers, M_DEVBUF);
txr->buffers = NULL;
@ -702,9 +758,6 @@ ixl_tx_setup_offload(struct ixl_queue *que,
*off |= (tcp_hlen >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
#ifdef IXL_FDIR
ixl_atr(que, th, etype);
#endif
break;
case IPPROTO_UDP:
if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
@ -713,7 +766,6 @@ ixl_tx_setup_offload(struct ixl_queue *que,
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
}
break;
case IPPROTO_SCTP:
if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
@ -833,7 +885,7 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
buf->m_head = NULL;
buf->eop_index = -1;
if (++idx == que->num_desc)
if (++idx == que->num_tx_desc)
idx = 0;
txr->avail--;
@ -842,34 +894,32 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
return TRUE;
}
/*
** ixl_get_tx_head - Retrieve the value from the
** location the HW records its HEAD index
*/
/*
* ixl_get_tx_head - Retrieve the value from the
* location the HW records its HEAD index
*/
static inline u32
ixl_get_tx_head(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
void *head = &txr->base[que->num_desc];
void *head = &txr->base[que->num_tx_desc];
return LE32_TO_CPU(*(volatile __le32 *)head);
}
/**********************************************************************
*
* Examine each tx_buffer in the used queue. If the hardware is done
* processing the packet then free associated resources. The
* tx_buffer is put back on the free queue.
* Get index of last used descriptor/buffer from hardware, and clean
* the descriptors/buffers up to that index.
*
**********************************************************************/
bool
ixl_txeof(struct ixl_queue *que)
static bool
ixl_txeof_hwb(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
u32 first, last, head, done, processed;
u32 first, last, head, done;
struct ixl_tx_buf *buf;
struct i40e_tx_desc *tx_desc, *eop_desc;
mtx_assert(&txr->mtx, MA_OWNED);
#ifdef DEV_NETMAP
@ -879,12 +929,11 @@ ixl_txeof(struct ixl_queue *que)
#endif /* DEF_NETMAP */
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
if (txr->avail == que->num_tx_desc) {
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
processed = 0;
first = txr->next_to_clean;
buf = &txr->buffers[first];
tx_desc = (struct i40e_tx_desc *)&txr->base[first];
@ -893,6 +942,10 @@ ixl_txeof(struct ixl_queue *que)
return FALSE;
eop_desc = (struct i40e_tx_desc *)&txr->base[last];
/* Sync DMA before reading head index from ring */
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
BUS_DMASYNC_POSTREAD);
/* Get the Head WB value */
head = ixl_get_tx_head(que);
@ -902,11 +955,9 @@ ixl_txeof(struct ixl_queue *que)
** I do this so the comparison in the
** inner while loop below can be simple
*/
if (++last == que->num_desc) last = 0;
if (++last == que->num_tx_desc) last = 0;
done = last;
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
BUS_DMASYNC_POSTREAD);
/*
** The HEAD index of the ring is written in a
** defined location, this rather than a done bit
@ -917,7 +968,6 @@ ixl_txeof(struct ixl_queue *que)
/* We clean the range of the packet */
while (first != done) {
++txr->avail;
++processed;
if (buf->m_head) {
txr->bytes += /* for ITR adjustment */
@ -934,19 +984,21 @@ ixl_txeof(struct ixl_queue *que)
}
buf->eop_index = -1;
if (++first == que->num_desc)
if (++first == que->num_tx_desc)
first = 0;
buf = &txr->buffers[first];
tx_desc = &txr->base[first];
}
++txr->packets;
/* If a packet was successfully cleaned, reset the watchdog timer */
atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
/* See if there is more work now */
last = buf->eop_index;
if (last != -1) {
eop_desc = &txr->base[last];
/* Get next done point */
if (++last == que->num_desc) last = 0;
if (++last == que->num_tx_desc) last = 0;
done = last;
} else
break;
@ -956,11 +1008,10 @@ ixl_txeof(struct ixl_queue *que)
txr->next_to_clean = first;
/*
* If there are no pending descriptors, clear the timeout.
*/
if (txr->avail == que->num_desc) {
if (txr->avail == que->num_tx_desc) {
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
@ -968,6 +1019,142 @@ ixl_txeof(struct ixl_queue *que)
return TRUE;
}
/**********************************************************************
*
* Use index kept by driver and the flag on each descriptor to find used
* descriptor/buffers and clean them up for re-use.
*
* This method of reclaiming descriptors is current incompatible with
* DEV_NETMAP.
*
* Returns TRUE if there are more descriptors to be cleaned after this
* function exits.
*
**********************************************************************/
static bool
ixl_txeof_dwb(struct ixl_queue *que)
{
struct tx_ring *txr = &que->txr;
u32 first, last, done;
u32 limit = 256;
struct ixl_tx_buf *buf;
struct i40e_tx_desc *tx_desc, *eop_desc;
mtx_assert(&txr->mtx, MA_OWNED);
/* There are no descriptors to clean */
if (txr->avail == que->num_tx_desc) {
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
/* Set starting index/descriptor/buffer */
first = txr->next_to_clean;
buf = &txr->buffers[first];
tx_desc = &txr->base[first];
/*
* This function operates per-packet -- identifies the start of the
* packet and gets the index of the last descriptor of the packet from
* it, from eop_index.
*
* If the last descriptor is marked "done" by the hardware, then all
* of the descriptors for the packet are cleaned.
*/
last = buf->eop_index;
if (last == -1)
return FALSE;
eop_desc = &txr->base[last];
/* Sync DMA before reading from ring */
bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD);
/*
* Get the index of the first descriptor beyond the EOP and call that
* 'done'. Simplifies the comparison for the inner loop below.
*/
if (++last == que->num_tx_desc)
last = 0;
done = last;
/*
* We find the last completed descriptor by examining each
* descriptor's status bits to see if it's done.
*/
do {
/* Break if last descriptor in packet isn't marked done */
if ((eop_desc->cmd_type_offset_bsz & I40E_TXD_QW1_DTYPE_MASK)
!= I40E_TX_DESC_DTYPE_DESC_DONE)
break;
/* Clean the descriptors that make up the processed packet */
while (first != done) {
/*
* If there was a buffer attached to this descriptor,
* prevent the adapter from accessing it, and add its
* length to the queue's TX stats.
*/
if (buf->m_head) {
txr->bytes += buf->m_head->m_pkthdr.len;
txr->tx_bytes += buf->m_head->m_pkthdr.len;
bus_dmamap_sync(buf->tag, buf->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(buf->tag, buf->map);
m_freem(buf->m_head);
buf->m_head = NULL;
}
buf->eop_index = -1;
++txr->avail;
if (++first == que->num_tx_desc)
first = 0;
buf = &txr->buffers[first];
tx_desc = &txr->base[first];
}
++txr->packets;
/* If a packet was successfully cleaned, reset the watchdog timer */
atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
/*
* Since buf is the first buffer after the one that was just
* cleaned, check if the packet it starts is done, too.
*/
last = buf->eop_index;
if (last != -1) {
eop_desc = &txr->base[last];
/* Get next done point */
if (++last == que->num_tx_desc) last = 0;
done = last;
} else
break;
} while (--limit);
bus_dmamap_sync(txr->dma.tag, txr->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
txr->next_to_clean = first;
/*
* If there are no pending descriptors, clear the watchdog timer.
*/
if (txr->avail == que->num_tx_desc) {
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
return TRUE;
}
bool
ixl_txeof(struct ixl_queue *que)
{
struct ixl_vsi *vsi = que->vsi;
return (vsi->enable_head_writeback) ? ixl_txeof_hwb(que)
: ixl_txeof_dwb(que);
}
/*********************************************************************
*
* Refresh mbuf buffers for RX descriptor rings
@ -991,7 +1178,7 @@ ixl_refresh_mbufs(struct ixl_queue *que, int limit)
i = j = rxr->next_refresh;
/* Control the loop with one beyond */
if (++j == que->num_desc)
if (++j == que->num_rx_desc)
j = 0;
while (j != limit) {
@ -1057,7 +1244,7 @@ ixl_refresh_mbufs(struct ixl_queue *que, int limit)
/* Next is precalculated */
i = j;
rxr->next_refresh = i;
if (++j == que->num_desc)
if (++j == que->num_rx_desc)
j = 0;
}
update:
@ -1084,15 +1271,6 @@ ixl_allocate_rx_data(struct ixl_queue *que)
struct ixl_rx_buf *buf;
int i, bsize, error;
bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
if (!(rxr->buffers =
(struct ixl_rx_buf *) malloc(bsize,
M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
error = ENOMEM;
return (error);
}
if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR, /* lowaddr */
@ -1122,25 +1300,50 @@ ixl_allocate_rx_data(struct ixl_queue *que)
NULL, /* lockfuncarg */
&rxr->ptag))) {
device_printf(dev, "Unable to create RX DMA ptag\n");
return (error);
goto free_rx_htag;
}
for (i = 0; i < que->num_desc; i++) {
bsize = sizeof(struct ixl_rx_buf) * que->num_rx_desc;
if (!(rxr->buffers =
(struct ixl_rx_buf *) malloc(bsize,
M_DEVBUF, M_NOWAIT | M_ZERO))) {
device_printf(dev, "Unable to allocate rx_buffer memory\n");
error = ENOMEM;
goto free_rx_ptag;
}
for (i = 0; i < que->num_rx_desc; i++) {
buf = &rxr->buffers[i];
error = bus_dmamap_create(rxr->htag,
BUS_DMA_NOWAIT, &buf->hmap);
if (error) {
device_printf(dev, "Unable to create RX head map\n");
break;
goto free_buffers;
}
error = bus_dmamap_create(rxr->ptag,
BUS_DMA_NOWAIT, &buf->pmap);
if (error) {
bus_dmamap_destroy(rxr->htag, buf->hmap);
device_printf(dev, "Unable to create RX pkt map\n");
break;
goto free_buffers;
}
}
return 0;
free_buffers:
while (i--) {
buf = &rxr->buffers[i];
bus_dmamap_destroy(rxr->ptag, buf->pmap);
bus_dmamap_destroy(rxr->htag, buf->hmap);
}
free(rxr->buffers, M_DEVBUF);
rxr->buffers = NULL;
free_rx_ptag:
bus_dma_tag_destroy(rxr->ptag);
rxr->ptag = NULL;
free_rx_htag:
bus_dma_tag_destroy(rxr->htag);
rxr->htag = NULL;
return (error);
}
@ -1173,11 +1376,11 @@ ixl_init_rx_ring(struct ixl_queue *que)
slot = netmap_reset(na, NR_RX, que->me, 0);
#endif /* DEV_NETMAP */
/* Clear the ring contents */
rsize = roundup2(que->num_desc *
rsize = roundup2(que->num_rx_desc *
sizeof(union i40e_rx_desc), DBA_ALIGN);
bzero((void *)rxr->base, rsize);
/* Cleanup any existing buffers */
for (int i = 0; i < que->num_desc; i++) {
for (int i = 0; i < que->num_rx_desc; i++) {
buf = &rxr->buffers[i];
if (buf->m_head != NULL) {
bus_dmamap_sync(rxr->htag, buf->hmap,
@ -1201,7 +1404,7 @@ ixl_init_rx_ring(struct ixl_queue *que)
rxr->hdr_split = FALSE;
/* Now replenish the mbufs */
for (int j = 0; j != que->num_desc; ++j) {
for (int j = 0; j != que->num_rx_desc; ++j) {
struct mbuf *mh, *mp;
buf = &rxr->buffers[j];
@ -1286,7 +1489,7 @@ ixl_init_rx_ring(struct ixl_queue *que)
rxr->bytes = 0;
rxr->discard = FALSE;
wr32(vsi->hw, rxr->tail, que->num_desc - 1);
wr32(vsi->hw, rxr->tail, que->num_rx_desc - 1);
ixl_flush(vsi->hw);
#if defined(INET6) || defined(INET)
@ -1325,41 +1528,19 @@ ixl_free_que_rx(struct ixl_queue *que)
struct rx_ring *rxr = &que->rxr;
struct ixl_rx_buf *buf;
INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
/* Cleanup any existing buffers */
if (rxr->buffers != NULL) {
for (int i = 0; i < que->num_desc; i++) {
for (int i = 0; i < que->num_rx_desc; i++) {
buf = &rxr->buffers[i];
if (buf->m_head != NULL) {
bus_dmamap_sync(rxr->htag, buf->hmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->htag, buf->hmap);
buf->m_head->m_flags |= M_PKTHDR;
m_freem(buf->m_head);
}
if (buf->m_pack != NULL) {
bus_dmamap_sync(rxr->ptag, buf->pmap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->ptag, buf->pmap);
buf->m_pack->m_flags |= M_PKTHDR;
m_freem(buf->m_pack);
}
buf->m_head = NULL;
buf->m_pack = NULL;
if (buf->hmap != NULL) {
bus_dmamap_destroy(rxr->htag, buf->hmap);
buf->hmap = NULL;
}
if (buf->pmap != NULL) {
bus_dmamap_destroy(rxr->ptag, buf->pmap);
buf->pmap = NULL;
}
}
if (rxr->buffers != NULL) {
free(rxr->buffers, M_DEVBUF);
rxr->buffers = NULL;
/* Free buffers and unload dma maps */
ixl_rx_discard(rxr, i);
bus_dmamap_destroy(rxr->htag, buf->hmap);
bus_dmamap_destroy(rxr->ptag, buf->pmap);
}
free(rxr->buffers, M_DEVBUF);
rxr->buffers = NULL;
}
if (rxr->htag != NULL) {
@ -1370,9 +1551,6 @@ ixl_free_que_rx(struct ixl_queue *que)
bus_dma_tag_destroy(rxr->ptag);
rxr->ptag = NULL;
}
INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
return;
}
static inline void
@ -1409,32 +1587,35 @@ ixl_rx_discard(struct rx_ring *rxr, int i)
{
struct ixl_rx_buf *rbuf;
KASSERT(rxr != NULL, ("Receive ring pointer cannot be null"));
KASSERT(i < rxr->que->num_rx_desc, ("Descriptor index must be less than que->num_desc"));
rbuf = &rxr->buffers[i];
if (rbuf->fmp != NULL) {/* Partial chain ? */
rbuf->fmp->m_flags |= M_PKTHDR;
/* Free the mbufs in the current chain for the packet */
if (rbuf->fmp != NULL) {
bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
m_freem(rbuf->fmp);
rbuf->fmp = NULL;
}
/*
** With advanced descriptors the writeback
** clobbers the buffer addrs, so its easier
** to just free the existing mbufs and take
** the normal refresh path to get new buffers
** and mapping.
*/
* Free the mbufs for the current descriptor; and let ixl_refresh_mbufs()
* assign new mbufs to these.
*/
if (rbuf->m_head) {
bus_dmamap_sync(rxr->htag, rbuf->hmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->htag, rbuf->hmap);
m_free(rbuf->m_head);
rbuf->m_head = NULL;
}
if (rbuf->m_pack) {
bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(rxr->ptag, rbuf->pmap);
m_free(rbuf->m_pack);
rbuf->m_pack = NULL;
}
return;
}
#ifdef RSS
@ -1505,7 +1686,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
union i40e_rx_desc *cur;
struct ixl_rx_buf *rbuf, *nbuf;
IXL_RX_LOCK(rxr);
#ifdef DEV_NETMAP
@ -1586,7 +1766,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
/* Prefetch the next buffer */
if (!eop) {
nextp = i + 1;
if (nextp == que->num_desc)
if (nextp == que->num_rx_desc)
nextp = 0;
nbuf = &rxr->buffers[nextp];
prefetch(nbuf);
@ -1708,7 +1888,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Advance our pointers to the next descriptor. */
if (++i == que->num_desc)
if (++i == que->num_rx_desc)
i = 0;
/* Now send to the stack or do LRO */
@ -1717,10 +1897,14 @@ ixl_rxeof(struct ixl_queue *que, int count)
IXL_RX_UNLOCK(rxr);
ixl_rx_input(rxr, ifp, sendmp, ptype);
IXL_RX_LOCK(rxr);
/*
* Update index used in loop in case another
* ixl_rxeof() call executes when lock is released
*/
i = rxr->next_check;
}
/* Every 8 descriptors we go to refresh mbufs */
/* Every 8 descriptors we go to refresh mbufs */
if (processed == 8) {
ixl_refresh_mbufs(que, i);
processed = 0;
@ -1837,3 +2021,119 @@ ixl_get_counter(if_t ifp, ift_counter cnt)
}
#endif
/*
* Set TX and RX ring size adjusting value to supported range
*/
void
ixl_vsi_setup_rings_size(struct ixl_vsi * vsi, int tx_ring_size, int rx_ring_size)
{
struct device * dev = vsi->dev;
if (tx_ring_size < IXL_MIN_RING
|| tx_ring_size > IXL_MAX_RING
|| tx_ring_size % IXL_RING_INCREMENT != 0) {
device_printf(dev, "Invalid tx_ring_size value of %d set!\n",
tx_ring_size);
device_printf(dev, "tx_ring_size must be between %d and %d, "
"inclusive, and must be a multiple of %d\n",
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
device_printf(dev, "Using default value of %d instead\n",
IXL_DEFAULT_RING);
vsi->num_tx_desc = IXL_DEFAULT_RING;
} else
vsi->num_tx_desc = tx_ring_size;
if (rx_ring_size < IXL_MIN_RING
|| rx_ring_size > IXL_MAX_RING
|| rx_ring_size % IXL_RING_INCREMENT != 0) {
device_printf(dev, "Invalid rx_ring_size value of %d set!\n",
rx_ring_size);
device_printf(dev, "rx_ring_size must be between %d and %d, "
"inclusive, and must be a multiple of %d\n",
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
device_printf(dev, "Using default value of %d instead\n",
IXL_DEFAULT_RING);
vsi->num_rx_desc = IXL_DEFAULT_RING;
} else
vsi->num_rx_desc = rx_ring_size;
device_printf(dev, "using %d tx descriptors and %d rx descriptors\n",
vsi->num_tx_desc, vsi->num_rx_desc);
}
static void
ixl_queue_sw_irq(struct ixl_vsi *vsi, int qidx)
{
struct i40e_hw *hw = vsi->hw;
u32 reg, mask;
if ((vsi->flags & IXL_FLAGS_IS_VF) != 0) {
mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
reg = I40E_VFINT_DYN_CTLN1(qidx);
} else {
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
reg = ((vsi->flags & IXL_FLAGS_USES_MSIX) != 0) ?
I40E_PFINT_DYN_CTLN(qidx) : I40E_PFINT_DYN_CTL0;
}
wr32(hw, reg, mask);
}
int
ixl_queue_hang_check(struct ixl_vsi *vsi)
{
struct ixl_queue *que = vsi->queues;
device_t dev = vsi->dev;
struct tx_ring *txr;
s32 timer, new_timer;
int hung = 0;
for (int i = 0; i < vsi->num_queues; i++, que++) {
txr = &que->txr;
/*
* If watchdog_timer is equal to defualt value set by ixl_txeof
* just substract hz and move on - the queue is most probably
* running. Otherwise check the value.
*/
if (atomic_cmpset_rel_32(&txr->watchdog_timer,
IXL_WATCHDOG, (IXL_WATCHDOG) - hz) == 0) {
timer = atomic_load_acq_32(&txr->watchdog_timer);
/*
* Again - if the timer was reset to default value
* then queue is running. Otherwise check if watchdog
* expired and act accrdingly.
*/
if (timer > 0 && timer != IXL_WATCHDOG) {
new_timer = timer - hz;
if (new_timer <= 0) {
atomic_store_rel_32(&txr->watchdog_timer, -1);
device_printf(dev, "WARNING: queue %d "
"appears to be hung!\n", que->me);
++hung;
/* Try to unblock the queue with SW IRQ */
ixl_queue_sw_irq(vsi, i);
} else {
/*
* If this fails, that means something in the TX path
* has updated the watchdog, so it means the TX path
* is still working and the watchdog doesn't need
* to countdown.
*/
atomic_cmpset_rel_32(&txr->watchdog_timer,
timer, new_timer);
}
}
}
}
return (hung);
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -136,8 +136,8 @@ struct ixlv_sc {
int pf_version;
int if_flags;
bool link_up;
u32 link_speed;
bool link_up;
enum virtchnl_link_speed link_speed;
struct mtx mtx;
@ -176,8 +176,8 @@ struct ixlv_sc {
struct ixl_vc_cmd config_rss_lut_cmd;
/* Virtual comm channel */
struct i40e_virtchnl_vf_resource *vf_res;
struct i40e_virtchnl_vsi_resource *vsi_res;
struct virtchnl_vf_resource *vf_res;
struct virtchnl_vsi_resource *vsi_res;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@ -222,7 +222,8 @@ void ixlv_del_ether_filters(struct ixlv_sc *);
void ixlv_request_stats(struct ixlv_sc *);
void ixlv_request_reset(struct ixlv_sc *);
void ixlv_vc_completion(struct ixlv_sc *,
enum i40e_virtchnl_ops, i40e_status, u8 *, u16);
enum virtchnl_ops, enum virtchnl_status_code,
u8 *, u16);
void ixlv_add_ether_filter(struct ixlv_sc *);
void ixlv_add_vlans(struct ixlv_sc *);
void ixlv_del_vlans(struct ixlv_sc *);

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

View File

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -48,7 +48,7 @@
#define IXLV_BUSY_WAIT_COUNT 50
static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
enum i40e_status_code);
enum virtchnl_status_code);
static void ixl_vc_process_next(struct ixl_vc_mgr *mgr);
static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
static void ixl_vc_send_current(struct ixl_vc_mgr *mgr);
@ -65,81 +65,81 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
/* Validate message length. */
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct i40e_virtchnl_version_info);
case VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
case VIRTCHNL_OP_RESET_VF:
valid_len = 0;
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
case VIRTCHNL_OP_GET_VF_RESOURCES:
/* Valid length in api v1.0 is 0, v1.1 is 4 */
valid_len = 4;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct virtchnl_txq_info);
break;
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_rxq_info);
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_vsi_queue_config_info *vqc =
(struct i40e_virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
valid_len += (vqc->num_queue_pairs *
sizeof(struct
i40e_virtchnl_queue_pair_info));
virtchnl_queue_pair_info));
if (vqc->num_queue_pairs == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct i40e_virtchnl_irq_map_info *vimi =
(struct i40e_virtchnl_irq_map_info *)msg;
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
valid_len += (vimi->num_vectors *
sizeof(struct i40e_virtchnl_vector_map));
sizeof(struct virtchnl_vector_map));
if (vimi->num_vectors == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
case VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
valid_len = sizeof(struct virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct i40e_virtchnl_ether_addr_list *veal =
(struct i40e_virtchnl_ether_addr_list *)msg;
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
valid_len += veal->num_elements *
sizeof(struct i40e_virtchnl_ether_addr);
sizeof(struct virtchnl_ether_addr);
if (veal->num_elements == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
case I40E_VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct i40e_virtchnl_vlan_filter_list *vfl =
(struct i40e_virtchnl_vlan_filter_list *)msg;
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
valid_len += vfl->num_elements * sizeof(u16);
if (vfl->num_elements == 0)
err_msg_format = true;
}
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct i40e_virtchnl_promisc_info);
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct virtchnl_promisc_info);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct i40e_virtchnl_queue_select);
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
return EPERM;
break;
@ -159,7 +159,7 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
*/
static int
ixlv_send_pf_msg(struct ixlv_sc *sc,
enum i40e_virtchnl_ops op, u8 *msg, u16 len)
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct i40e_hw *hw = &sc->hw;
device_t dev = sc->dev;
@ -197,12 +197,12 @@ ixlv_send_pf_msg(struct ixlv_sc *sc,
int
ixlv_send_api_ver(struct ixlv_sc *sc)
{
struct i40e_virtchnl_version_info vvi;
struct virtchnl_version_info vvi;
vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
vvi.major = VIRTCHNL_VERSION_MAJOR;
vvi.minor = VIRTCHNL_VERSION_MINOR;
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
return ixlv_send_pf_msg(sc, VIRTCHNL_OP_VERSION,
(u8 *)&vvi, sizeof(vvi));
}
@ -216,7 +216,7 @@ ixlv_send_api_ver(struct ixlv_sc *sc)
int
ixlv_verify_api_ver(struct ixlv_sc *sc)
{
struct i40e_virtchnl_version_info *pf_vvi;
struct virtchnl_version_info *pf_vvi;
struct i40e_hw *hw = &sc->hw;
struct i40e_arq_event_info event;
device_t dev = sc->dev;
@ -244,8 +244,8 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
goto out_alloc;
}
if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_VERSION) {
if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
VIRTCHNL_OP_VERSION) {
DDPRINTF(dev, "Received unexpected op response: %d\n",
le32toh(event.desc.cookie_high));
/* Don't stop looking for expected response */
@ -260,10 +260,10 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
break;
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
(pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR))) {
pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
(pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) {
device_printf(dev, "Critical PF/VF API version mismatch!\n");
err = EIO;
} else
@ -272,7 +272,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
/* Log PF/VF api versions */
device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
pf_vvi->major, pf_vvi->minor,
I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR);
VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
out_alloc:
free(event.msg_buf, M_DEVBUF);
@ -292,15 +292,15 @@ ixlv_send_vf_config_msg(struct ixlv_sc *sc)
{
u32 caps;
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
caps = VIRTCHNL_VF_OFFLOAD_L2 |
VIRTCHNL_VF_OFFLOAD_RSS_PF |
VIRTCHNL_VF_OFFLOAD_VLAN;
if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
if (sc->pf_version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
NULL, 0);
else
return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
(u8 *)&caps, sizeof(caps));
}
@ -323,8 +323,8 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
u32 retries = 0;
/* Note this assumes a single VSI */
len = sizeof(struct i40e_virtchnl_vf_resource) +
sizeof(struct i40e_virtchnl_vsi_resource);
len = sizeof(struct virtchnl_vf_resource) +
sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
if (!event.msg_buf) {
@ -337,8 +337,8 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
i40e_msec_pause(10);
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
} else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
VIRTCHNL_OP_GET_VF_RESOURCES) {
DDPRINTF(dev, "Received a response from PF,"
" opcode %d, error %d",
le32toh(event.desc.cookie_high),
@ -391,12 +391,12 @@ ixlv_configure_queues(struct ixlv_sc *sc)
struct rx_ring *rxr;
int len, pairs;
struct i40e_virtchnl_vsi_queue_config_info *vqci;
struct i40e_virtchnl_queue_pair_info *vqpi;
struct virtchnl_vsi_queue_config_info *vqci;
struct virtchnl_queue_pair_info *vqpi;
pairs = vsi->num_queues;
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
len = sizeof(struct virtchnl_vsi_queue_config_info) +
(sizeof(struct virtchnl_queue_pair_info) * pairs);
vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!vqci) {
device_printf(dev, "%s: unable to allocate memory\n", __func__);
@ -414,23 +414,25 @@ ixlv_configure_queues(struct ixlv_sc *sc)
rxr = &que->rxr;
vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = que->num_desc;
vqpi->txq.ring_len = que->num_tx_desc;
vqpi->txq.dma_ring_addr = txr->dma.pa;
/* Enable Head writeback */
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = txr->dma.pa +
(que->num_desc * sizeof(struct i40e_tx_desc));
if (vsi->enable_head_writeback) {
vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = txr->dma.pa +
(que->num_tx_desc * sizeof(struct i40e_tx_desc));
}
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = que->num_desc;
vqpi->rxq.ring_len = que->num_rx_desc;
vqpi->rxq.dma_ring_addr = rxr->dma.pa;
vqpi->rxq.max_pkt_size = vsi->max_frame_size;
vqpi->rxq.databuffer_size = rxr->mbuf_sz;
vqpi->rxq.splithdr_enabled = 0;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
(u8 *)vqci, len);
free(vqci, M_DEVBUF);
}
@ -443,12 +445,12 @@ ixlv_configure_queues(struct ixlv_sc *sc)
void
ixlv_enable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
struct virtchnl_queue_select vqs;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@ -460,12 +462,12 @@ ixlv_enable_queues(struct ixlv_sc *sc)
void
ixlv_disable_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
struct virtchnl_queue_select vqs;
vqs.vsi_id = sc->vsi_res->vsi_id;
vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
vqs.rx_queues = vqs.tx_queues;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
(u8 *)&vqs, sizeof(vqs));
}
@ -478,7 +480,7 @@ ixlv_disable_queues(struct ixlv_sc *sc)
void
ixlv_map_queues(struct ixlv_sc *sc)
{
struct i40e_virtchnl_irq_map_info *vm;
struct virtchnl_irq_map_info *vm;
int i, q, len;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
@ -486,8 +488,8 @@ ixlv_map_queues(struct ixlv_sc *sc)
/* How many queue vectors, adminq uses one */
q = sc->msix - 1;
len = sizeof(struct i40e_virtchnl_irq_map_info) +
(sc->msix * sizeof(struct i40e_virtchnl_vector_map));
len = sizeof(struct virtchnl_irq_map_info) +
(sc->msix * sizeof(struct virtchnl_vector_map));
vm = malloc(len, M_DEVBUF, M_NOWAIT);
if (!vm) {
printf("%s: unable to allocate memory\n", __func__);
@ -514,7 +516,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 0;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP,
(u8 *)vm, len);
free(vm, M_DEVBUF);
}
@ -527,7 +529,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
void
ixlv_add_vlans(struct ixlv_sc *sc)
{
struct i40e_virtchnl_vlan_filter_list *v;
struct virtchnl_vlan_filter_list *v;
struct ixlv_vlan_filter *f, *ftmp;
device_t dev = sc->dev;
int len, i = 0, cnt = 0;
@ -540,11 +542,11 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (!cnt) { /* no work... */
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
I40E_SUCCESS);
VIRTCHNL_STATUS_SUCCESS);
return;
}
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
len = sizeof(struct virtchnl_vlan_filter_list) +
(cnt * sizeof(u16));
if (len > IXL_AQ_BUF_SZ) {
@ -576,7 +578,7 @@ ixlv_add_vlans(struct ixlv_sc *sc)
break;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
}
@ -590,7 +592,7 @@ void
ixlv_del_vlans(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
struct i40e_virtchnl_vlan_filter_list *v;
struct virtchnl_vlan_filter_list *v;
struct ixlv_vlan_filter *f, *ftmp;
int len, i = 0, cnt = 0;
@ -602,11 +604,11 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (!cnt) { /* no work... */
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
I40E_SUCCESS);
VIRTCHNL_STATUS_SUCCESS);
return;
}
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
len = sizeof(struct virtchnl_vlan_filter_list) +
(cnt * sizeof(u16));
if (len > IXL_AQ_BUF_SZ) {
@ -639,7 +641,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
break;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
/* add stats? */
}
@ -653,7 +655,7 @@ ixlv_del_vlans(struct ixlv_sc *sc)
void
ixlv_add_ether_filters(struct ixlv_sc *sc)
{
struct i40e_virtchnl_ether_addr_list *a;
struct virtchnl_ether_addr_list *a;
struct ixlv_mac_filter *f;
device_t dev = sc->dev;
int len, j = 0, cnt = 0;
@ -666,12 +668,12 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
if (cnt == 0) { /* Should not happen... */
DDPRINTF(dev, "cnt == 0, exiting...");
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
I40E_SUCCESS);
VIRTCHNL_STATUS_SUCCESS);
return;
}
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(cnt * sizeof(struct i40e_virtchnl_ether_addr));
len = sizeof(struct virtchnl_ether_addr_list) +
(cnt * sizeof(struct virtchnl_ether_addr));
a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (a == NULL) {
@ -699,7 +701,7 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
DDPRINTF(dev, "len %d, j %d, cnt %d",
len, j, cnt);
ixlv_send_pf_msg(sc,
I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len);
/* add stats? */
free(a, M_DEVBUF);
return;
@ -713,7 +715,7 @@ ixlv_add_ether_filters(struct ixlv_sc *sc)
void
ixlv_del_ether_filters(struct ixlv_sc *sc)
{
struct i40e_virtchnl_ether_addr_list *d;
struct virtchnl_ether_addr_list *d;
device_t dev = sc->dev;
struct ixlv_mac_filter *f, *f_temp;
int len, j = 0, cnt = 0;
@ -726,12 +728,12 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
if (cnt == 0) {
DDPRINTF(dev, "cnt == 0, exiting...");
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
I40E_SUCCESS);
VIRTCHNL_STATUS_SUCCESS);
return;
}
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(cnt * sizeof(struct i40e_virtchnl_ether_addr));
len = sizeof(struct virtchnl_ether_addr_list) +
(cnt * sizeof(struct virtchnl_ether_addr));
d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (d == NULL) {
@ -757,7 +759,7 @@ ixlv_del_ether_filters(struct ixlv_sc *sc)
break;
}
ixlv_send_pf_msg(sc,
I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len);
/* add stats? */
free(d, M_DEVBUF);
return;
@ -775,8 +777,8 @@ ixlv_request_reset(struct ixlv_sc *sc)
** the request, this avoids any possibility of
** a mistaken early detection of completion.
*/
wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS);
ixlv_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0);
}
/*
@ -786,12 +788,12 @@ ixlv_request_reset(struct ixlv_sc *sc)
void
ixlv_request_stats(struct ixlv_sc *sc)
{
struct i40e_virtchnl_queue_select vqs;
struct virtchnl_queue_select vqs;
int error = 0;
vqs.vsi_id = sc->vsi_res->vsi_id;
/* Low priority, we don't need to error check */
error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
error = ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS,
(u8 *)&vqs, sizeof(vqs));
#ifdef IXL_DEBUG
if (error)
@ -836,7 +838,7 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
void
ixlv_config_rss_key(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_key *rss_key_msg;
struct virtchnl_rss_key *rss_key_msg;
int msg_len, key_length;
u8 rss_seed[IXL_RSS_KEY_SIZE];
@ -849,7 +851,7 @@ ixlv_config_rss_key(struct ixlv_sc *sc)
/* Send the fetched key */
key_length = IXL_RSS_KEY_SIZE;
msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rss_key_msg == NULL) {
device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
@ -863,7 +865,7 @@ ixlv_config_rss_key(struct ixlv_sc *sc)
DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
rss_key_msg->vsi_id, rss_key_msg->key_len);
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY,
(u8 *)rss_key_msg, msg_len);
free(rss_key_msg, M_DEVBUF);
@ -872,25 +874,25 @@ ixlv_config_rss_key(struct ixlv_sc *sc)
void
ixlv_set_rss_hena(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_hena hena;
struct virtchnl_rss_hena hena;
hena.hena = IXL_DEFAULT_RSS_HENA_X722;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&hena, sizeof(hena));
}
void
ixlv_config_rss_lut(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_lut *rss_lut_msg;
struct virtchnl_rss_lut *rss_lut_msg;
int msg_len;
u16 lut_length;
u32 lut;
int i, que_id;
lut_length = IXL_RSS_VSI_LUT_SIZE;
msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rss_lut_msg == NULL) {
device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
@ -918,7 +920,7 @@ ixlv_config_rss_lut(struct ixlv_sc *sc)
rss_lut_msg->lut[i] = lut;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT,
(u8 *)rss_lut_msg, msg_len);
free(rss_lut_msg, M_DEVBUF);
@ -933,18 +935,18 @@ ixlv_config_rss_lut(struct ixlv_sc *sc)
*/
void
ixlv_vc_completion(struct ixlv_sc *sc,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen)
enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
struct i40e_virtchnl_pf_event *vpe =
(struct i40e_virtchnl_pf_event *)msg;
if (v_opcode == VIRTCHNL_OP_EVENT) {
struct virtchnl_pf_event *vpe =
(struct virtchnl_pf_event *)msg;
switch (vpe->event) {
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
case VIRTCHNL_EVENT_LINK_CHANGE:
#ifdef IXL_DEBUG
device_printf(dev, "Link change: status %d, speed %d\n",
vpe->event_data.link_event.link_status,
@ -956,7 +958,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
vpe->event_data.link_event.link_speed;
ixlv_update_link_status(sc);
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
case VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
sc->init_state = IXLV_RESET_PENDING;
mtx_unlock(&sc->mtx);
@ -976,19 +978,19 @@ ixlv_vc_completion(struct ixlv_sc *sc,
if (v_retval) {
device_printf(dev,
"%s: AQ returned error %s to our request %s!\n",
__func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
__func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
}
#ifdef IXL_DEBUG
if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
if (v_opcode != VIRTCHNL_OP_GET_STATS)
DDPRINTF(dev, "opcode %d", v_opcode);
#endif
switch (v_opcode) {
case I40E_VIRTCHNL_OP_GET_STATS:
case VIRTCHNL_OP_GET_STATS:
ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
case VIRTCHNL_OP_ADD_ETH_ADDR:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
v_retval);
if (v_retval) {
@ -996,23 +998,23 @@ ixlv_vc_completion(struct ixlv_sc *sc,
device_printf(dev, "WARNING: Device may not receive traffic!\n");
}
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
case VIRTCHNL_OP_DEL_ETH_ADDR:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
v_retval);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_ADD_VLAN:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
v_retval);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
v_retval);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_ENABLE_QUEUES:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
v_retval);
if (v_retval == 0) {
@ -1025,7 +1027,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* TODO: Clear a state flag, so we know we're ready to run init again */
}
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
v_retval);
if (v_retval == 0) {
@ -1035,23 +1037,23 @@ ixlv_vc_completion(struct ixlv_sc *sc,
vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
case VIRTCHNL_OP_CONFIG_RSS_KEY:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
v_retval);
break;
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
case VIRTCHNL_OP_SET_RSS_HENA:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
case VIRTCHNL_OP_CONFIG_RSS_LUT:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
v_retval);
break;
@ -1141,7 +1143,7 @@ ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
static void
ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
enum i40e_status_code err)
enum virtchnl_status_code err)
{
struct ixl_vc_cmd *cmd;
@ -1150,7 +1152,8 @@ ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
return;
callout_stop(&mgr->callout);
ixl_vc_process_completion(mgr, err);
/* ATM, the virtchnl codes map to i40e ones directly */
ixl_vc_process_completion(mgr, (enum i40e_status_code)err);
}
static void

747
sys/dev/ixl/virtchnl.h Normal file
View File

@ -0,0 +1,747 @@
/******************************************************************************
Copyright (c) 2013-2017, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
/* Description:
* This header file describes the VF-PF communication protocol used
* by the drivers for all devices starting from our 40G product line
*
* Admin queue buffer usage:
* desc->opcode is always aqc_opc_send_msg_to_pf
* flags, retval, datalen, and data addr are all used normally.
* The Firmware copies the cookie fields when sending messages between the
* PF and VF, but uses all other fields internally. Due to this limitation,
* we must send all messages as "indirect", i.e. using an external buffer.
*
* All the VSI indexes are relative to the VF. Each VF can have maximum of
* three VSIs. All the queue indexes are relative to the VSI. Each VF can
* have a maximum of sixteen queues for all of its VSIs.
*
* The PF is required to return a status code in v_retval for all messages
* except RESET_VF, which does not require any response. The return value
* is of status_code type, defined in the shared type.h.
*
* In general, VF driver initialization should roughly follow the order of
* these opcodes. The VF driver must first validate the API version of the
* PF driver, then request a reset, then get resources, then configure
* queues and interrupts. After these operations are complete, the VF
* driver may start its queues, optionally add MAC and VLAN filters, and
* process traffic.
*/
/* START GENERIC DEFINES
* Need to ensure the following enums and defines hold the same meaning and
* value in current and future projects
*/
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
VIRTCHNL_ERR_PARAM = -5,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
};
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
/* deprecated with AVF 1.0 */
enum virtchnl_rx_hsplit {
VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
};
#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
/* END GENERIC DEFINES */
/* Opcodes for VF-PF communication. These are placed in the v_opcode field
* of the virtchnl_msg structure.
*/
enum virtchnl_ops {
/* The PF sends status change events to VFs using
* the VIRTCHNL_OP_EVENT opcode.
* VFs send requests to the PF using the other ops.
* Use of "advanced opcode" features must be negotiated as part of capabilities
* exchange and are not considered part of base mode feature set.
*/
VIRTCHNL_OP_UNKNOWN = 0,
VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
VIRTCHNL_OP_RESET_VF = 2,
VIRTCHNL_OP_GET_VF_RESOURCES = 3,
VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
VIRTCHNL_OP_ENABLE_QUEUES = 8,
VIRTCHNL_OP_DISABLE_QUEUES = 9,
VIRTCHNL_OP_ADD_ETH_ADDR = 10,
VIRTCHNL_OP_DEL_ETH_ADDR = 11,
VIRTCHNL_OP_ADD_VLAN = 12,
VIRTCHNL_OP_DEL_VLAN = 13,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
VIRTCHNL_OP_SET_RSS_HENA = 26,
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
};
/* This macro is used to generate a compilation error if a structure
* is not exactly the correct length. It gives a divide by zero error if the
* structure is not of the correct size, otherwise it creates an enum that is
* never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
{virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
*/
struct virtchnl_msg {
u8 pad[8]; /* AQ flags/opcode/len/retval fields */
enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
enum virtchnl_status_code v_retval; /* ditto for desc->retval */
u32 vfid; /* used by PF when sending to VF */
};
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
/* Message descriptions and data structures.*/
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
* in the same format, along with a return code.
* Reply from PF has its major/minor versions also in param0 and param1.
* If there is a major version mismatch, then the VF cannot operate.
* If there is a minor version mismatch, then the VF can operate but should
* add a warning to the system log.
*
* This enum element MUST always be specified as == 1, regardless of other
* changes in the API. The PF must always respond to this message without
* error regardless of version mismatch.
*/
#define VIRTCHNL_VERSION_MAJOR 1
#define VIRTCHNL_VERSION_MINOR 1
#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
struct virtchnl_version_info {
u32 major;
u32 minor;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
/* VIRTCHNL_OP_RESET_VF
* VF sends this request to PF with no parameters
* PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
* until reset completion is indicated. The admin queue must be reinitialized
* after this operation.
*
* When reset is complete, PF must ensure that all queues in all VSIs associated
* with the VF are stopped, all queue configurations in the HMC are set to 0,
* and all MAC and VLAN filters (except the default MAC address) on all VSIs
* are cleared.
*/
/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
* vsi_type should always be 6 for backward compatibility. Add other fields
* as needed.
*/
enum virtchnl_vsi_type {
VIRTCHNL_VSI_TYPE_INVALID = 0,
VIRTCHNL_VSI_SRIOV = 6,
};
/* VIRTCHNL_OP_GET_VF_RESOURCES
* Version 1.0 VF sends this request to PF with no parameters
* Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
* PF responds with an indirect message containing
* virtchnl_vf_resource and one or more
* virtchnl_vsi_resource structures.
*/
struct virtchnl_vsi_resource {
u16 vsi_id;
u16 num_queue_pairs;
enum virtchnl_vsi_type vsi_type;
u16 qset_handle;
u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
/* VF capability flags
* VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
* TX/RX Checksum offloading and TSO for non-tunnelled packets.
*/
#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct virtchnl_vf_resource {
u16 num_vsis;
u16 num_queue_pairs;
u16 max_vectors;
u16 max_mtu;
u32 vf_cap_flags;
u32 rss_key_size;
u32 rss_lut_size;
struct virtchnl_vsi_resource vsi_res[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
/* VIRTCHNL_OP_CONFIG_TX_QUEUE
* VF sends this message to set up parameters for one TX queue.
* External data buffer contains one instance of virtchnl_txq_info.
* PF configures requested queue and returns a status code.
*/
/* Tx queue config info */
struct virtchnl_txq_info {
u16 vsi_id;
u16 queue_id;
u16 ring_len; /* number of descriptors, multiple of 8 */
u16 headwb_enabled; /* deprecated with AVF 1.0 */
u64 dma_ring_addr;
u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
};
VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
* PF configures requested queue and returns a status code.
*/
/* Rx queue config info */
struct virtchnl_rxq_info {
u16 vsi_id;
u16 queue_id;
u32 ring_len; /* number of descriptors, multiple of 32 */
u16 hdr_size;
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
u32 pad1;
u64 dma_ring_addr;
enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
u32 pad2;
};
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
* VF sends this message to set parameters for all active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
struct virtchnl_txq_info txq;
struct virtchnl_rxq_info rxq;
};
VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
struct virtchnl_vsi_queue_config_info {
u16 vsi_id;
u16 num_queue_pairs;
u32 pad;
struct virtchnl_queue_pair_info qpair[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
* additional queues must be negotiated. This is a best effort request as it
* is possible the PF does not have enough queues left to support the request.
* If the PF cannot support the number requested it will respond with the
* maximum number it is able to support; otherwise it will respond with the
* number requested.
*/
/* VF resource request */
struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
* The "other" causes are always mapped to vector 0.
* PF configures interrupt mapping and returns status.
*/
struct virtchnl_vector_map {
u16 vsi_id;
u16 vector_id;
u16 rxq_map;
u16 txq_map;
u16 rxitr_idx;
u16 txitr_idx;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
struct virtchnl_irq_map_info {
u16 num_vectors;
struct virtchnl_vector_map vecmap[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
/* VIRTCHNL_OP_ENABLE_QUEUES
* VIRTCHNL_OP_DISABLE_QUEUES
* VF sends these message to enable or disable TX/RX queue pairs.
* The queues fields are bitmaps indicating which queues to act upon.
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
*/
struct virtchnl_queue_select {
u16 vsi_id;
u16 pad;
u32 rx_queues;
u32 tx_queues;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
/* VIRTCHNL_OP_ADD_ETH_ADDR
* VF sends this message in order to add one or more unicast or multicast
* address filters for the specified VSI.
* PF adds the filters and returns status.
*/
/* VIRTCHNL_OP_DEL_ETH_ADDR
* VF sends this message in order to remove one or more unicast or multicast
* filters for the specified VSI.
* PF removes the filters and returns status.
*/
struct virtchnl_ether_addr {
u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
u8 pad[2];
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
struct virtchnl_ether_addr_list {
u16 vsi_id;
u16 num_elements;
struct virtchnl_ether_addr list[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
/* VIRTCHNL_OP_ADD_VLAN
* VF sends this message to add one or more VLAN tag filters for receives.
* PF adds the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
/* VIRTCHNL_OP_DEL_VLAN
* VF sends this message to remove one or more VLAN tag filters for receives.
* PF removes the filters and returns status.
* If a port VLAN is configured by the PF, this operation will return an
* error to the VF.
*/
struct virtchnl_vlan_filter_list {
u16 vsi_id;
u16 num_elements;
u16 vlan_id[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
* VF sends VSI id and flags.
* PF returns status code in retval.
* Note: we assume that broadcast accept mode is always enabled.
*/
struct virtchnl_promisc_info {
u16 vsi_id;
u16 flags;
};
VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
#define FLAG_VF_UNICAST_PROMISC 0x00000001
#define FLAG_VF_MULTICAST_PROMISC 0x00000002
/* VIRTCHNL_OP_GET_STATS
* VF sends this message to request stats for the selected VSI. VF uses
* the virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
* PF replies with struct eth_stats in an external buffer.
*/
/* VIRTCHNL_OP_CONFIG_RSS_KEY
* VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the RSS fields in
* the VF resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF.
*/
struct virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
struct virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table */
};
VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
* VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
*/
struct virtchnl_rss_hena {
u64 hena;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
* messages in response to this one.
*/
enum virtchnl_event_codes {
VIRTCHNL_EVENT_UNKNOWN = 0,
VIRTCHNL_EVENT_LINK_CHANGE,
VIRTCHNL_EVENT_RESET_IMPENDING,
VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
};
#define PF_EVENT_SEVERITY_INFO 0
#define PF_EVENT_SEVERITY_ATTENTION 1
#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
struct {
enum virtchnl_link_speed link_speed;
bool link_status;
} link_event;
} event_data;
int severity;
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
* VF uses this message to request PF to map IWARP vectors to IWARP queues.
* The request for this originates from the VF IWARP driver through
* a client interface between VF LAN and VF IWARP driver.
* A vector could have an AEQ and CEQ attached to it although
* there is a single AEQ per VF IWARP instance in which case
* most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
* There will never be a case where there will be multiple CEQs attached
* to a single vector.
* PF configures interrupt mapping and returns status.
*/
/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
* In order for us to keep the interface simple, SW will define a
* unique type value for AEQ.
*/
#define QUEUE_TYPE_PE_AEQ 0x80
#define QUEUE_INVALID_IDX 0xFFFF
struct virtchnl_iwarp_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
u16 aeq_idx;
u8 itr_idx;
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info);
struct virtchnl_iwarp_qvlist_info {
u32 num_vectors;
struct virtchnl_iwarp_qv_info qv_info[1];
};
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
* When the reset is complete, it writes 1
* When the PF detects that the VF has recovered, it writes 2
* VF checks this register periodically to determine if a reset has occurred,
* then polls it to know when the reset is complete.
* If either the PF or VF reads the register while the hardware
* is in a reset state, it will return DEADBEEF, which, when masked
* will result in 3.
*/
enum virtchnl_vfr_states {
VIRTCHNL_VFR_INPROGRESS = 0,
VIRTCHNL_VFR_COMPLETED,
VIRTCHNL_VFR_VFACTIVE,
};
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
* @v_opcode: Opcode for the message
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* validate msg format against struct for each opcode
*/
static inline int
virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
u8 *msg, u16 msglen)
{
bool err_msg_format = FALSE;
int valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
valid_len = sizeof(struct virtchnl_version_info);
break;
case VIRTCHNL_OP_RESET_VF:
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(ver))
valid_len = sizeof(u32);
break;
case VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct virtchnl_txq_info);
break;
case VIRTCHNL_OP_CONFIG_RX_QUEUE:
valid_len = sizeof(struct virtchnl_rxq_info);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
valid_len += (vqc->num_queue_pairs *
sizeof(struct
virtchnl_queue_pair_info));
if (vqc->num_queue_pairs == 0)
err_msg_format = TRUE;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
valid_len = sizeof(struct virtchnl_irq_map_info);
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
valid_len += (vimi->num_vectors *
sizeof(struct virtchnl_vector_map));
if (vimi->num_vectors == 0)
err_msg_format = TRUE;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
case VIRTCHNL_OP_DISABLE_QUEUES:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
case VIRTCHNL_OP_DEL_ETH_ADDR:
valid_len = sizeof(struct virtchnl_ether_addr_list);
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
valid_len += veal->num_elements *
sizeof(struct virtchnl_ether_addr);
if (veal->num_elements == 0)
err_msg_format = TRUE;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
case VIRTCHNL_OP_DEL_VLAN:
valid_len = sizeof(struct virtchnl_vlan_filter_list);
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
valid_len += vfl->num_elements * sizeof(u16);
if (vfl->num_elements == 0)
err_msg_format = TRUE;
}
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
valid_len = sizeof(struct virtchnl_promisc_info);
break;
case VIRTCHNL_OP_GET_STATS:
valid_len = sizeof(struct virtchnl_queue_select);
break;
case VIRTCHNL_OP_IWARP:
/* These messages are opaque to us and will be validated in
* the RDMA client code. We just need to check for nonzero
* length. The firmware will enforce max length restrictions.
*/
if (msglen)
valid_len = msglen;
else
err_msg_format = TRUE;
break;
case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
break;
case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
valid_len = sizeof(struct virtchnl_iwarp_qvlist_info);
if (msglen >= valid_len) {
struct virtchnl_iwarp_qvlist_info *qv =
(struct virtchnl_iwarp_qvlist_info *)msg;
if (qv->num_vectors == 0) {
err_msg_format = TRUE;
break;
}
valid_len += ((qv->num_vectors - 1) *
sizeof(struct virtchnl_iwarp_qv_info));
}
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
valid_len = sizeof(struct virtchnl_rss_key);
if (msglen >= valid_len) {
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
valid_len += vrk->key_len - 1;
}
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
valid_len = sizeof(struct virtchnl_rss_lut);
if (msglen >= valid_len) {
struct virtchnl_rss_lut *vrl =
(struct virtchnl_rss_lut *)msg;
valid_len += vrl->lut_entries - 1;
}
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
break;
case VIRTCHNL_OP_SET_RSS_HENA:
valid_len = sizeof(struct virtchnl_rss_hena);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
return VIRTCHNL_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)
return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
return 0;
}
#endif /* _VIRTCHNL_H_ */

View File

@ -10,7 +10,7 @@ SRCS += ixl_iw.c
SRCS.PCI_IOV= pci_iov_if.h ixl_pf_iov.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c i40e_dcb.c
# Debug messages / sysctls
# CFLAGS += -DIXL_DEBUG