ixl(4): Update to ixl-1.6.6-k.

Submitted by:	erj
Reviewed by:	jeffrey.e.pieper@intel.com
MFC after:	3 days
Sponsored by:	Intel Corporation
Differential Revision:	https://reviews.freebsd.org/D7391
This commit is contained in:
Sean Bruno 2016-08-07 18:12:36 +00:00
parent b57417e56f
commit 4294f337b0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=303816
27 changed files with 11706 additions and 7470 deletions

View File

@ -216,6 +216,12 @@ dev/ipmi/ipmi_pci.c optional ipmi pci
dev/ipmi/ipmi_linux.c optional ipmi compat_linux32
dev/ixl/if_ixl.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_main.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_qmgr.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_iov.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/if_ixlv.c optional ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixlvc.c optional ixlv pci \

View File

@ -38,16 +38,6 @@
#include "i40e_adminq.h"
#include "i40e_prototype.h"
/**
* i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
* @desc: API request descriptor
**/
static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
{
return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase)) ||
(desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
}
/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@ -661,13 +651,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
hw->aq.nvm_release_on_done = FALSE;
hw->nvm_release_on_done = FALSE;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
0,
NULL);
ret_code = I40E_SUCCESS;
/* success! */
@ -1081,26 +1067,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
if (i40e_is_nvm_update_op(&e->desc)) {
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
hw->aq.nvm_release_on_done = FALSE;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_STATE_WRITE_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
break;
default:
break;
}
}
i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)

View File

@ -105,7 +105,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
bool nvm_release_on_done;
struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */

View File

@ -140,6 +140,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
/* Proxy commands */
i40e_aqc_opc_set_proxy_config = 0x0104,
i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@ -147,6 +151,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@ -185,6 +193,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@ -212,7 +221,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
@ -271,6 +279,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
i40e_aqc_opc_set_rss_key = 0x0B02,
i40e_aqc_opc_set_rss_lut = 0x0B03,
i40e_aqc_opc_get_rss_key = 0x0B04,
i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@ -433,6 +445,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@ -457,13 +470,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
#define I40E_AQ_ARP_INIT_IPV4 0x0008
#define I40E_AQ_ARP_UNSUP_CTL 0x0010
#define I40E_AQ_ARP_ENA 0x0020
#define I40E_AQ_ARP_ADD_IPV4 0x0040
#define I40E_AQ_ARP_DEL_IPV4 0x0080
#define I40E_AQ_ARP_INIT_IPV4 0x0800
#define I40E_AQ_ARP_UNSUP_CTL 0x1000
#define I40E_AQ_ARP_ENA 0x2000
#define I40E_AQ_ARP_ADD_IPV4 0x4000
#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
__le32 pfpm_proxyfc;
__le32 enabled_offloads;
#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@ -478,17 +493,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
#define I40E_AQ_NS_PROXY_ADD_0 0x0100
#define I40E_AQ_NS_PROXY_DEL_0 0x0200
#define I40E_AQ_NS_PROXY_ADD_1 0x0400
#define I40E_AQ_NS_PROXY_DEL_1 0x0800
#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
#define I40E_AQ_NS_PROXY_ADD_0 0x0001
#define I40E_AQ_NS_PROXY_DEL_0 0x0002
#define I40E_AQ_NS_PROXY_ADD_1 0x0004
#define I40E_AQ_NS_PROXY_DEL_1 0x0008
#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@ -538,6 +555,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
#define I40E_AQC_MC_MAG_EN 0x0100
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@ -561,6 +579,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
/* Set WoL Filter (0x0120) */
struct i40e_aqc_set_wol_filter {
__le16 filter_index;
#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
u8 reserved[2];
__le32 address_high;
__le32 address_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
struct i40e_aqc_set_wol_filter_data {
u8 filter[128];
u8 mask[16];
};
I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
/* Get Wake Reason (0x0121) */
struct i40e_aqc_get_wake_reason_completion {
u8 reserved_1[2];
__le16 wake_reason;
#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
u8 reserved_2[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@ -643,6 +711,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@ -694,6 +764,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@ -862,8 +933,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@ -1597,15 +1672,12 @@ struct i40e_aq_get_set_hmc_resource_profile {
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
/* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
/* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
I40E_HMC_PROFILE_DEFAULT = 1,
I40E_HMC_PROFILE_FAVOR_VF = 2,
I40E_HMC_PROFILE_EQUAL = 3,
};
#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@ -1641,6 +1713,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_25GBASE_KR = 0x1F,
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@ -1649,6 +1725,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@ -1656,7 +1733,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@ -1689,7 +1767,13 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 reserved[3];
u8 phy_type_ext;
#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
u8 mod_type_ext;
u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@ -1711,7 +1795,12 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
u8 reserved[3];
u8 phy_type_ext;
#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@ -1791,16 +1880,24 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
/* 25G Error Codes */
#define I40E_AQ_25G_NO_ERR 0X00
#define I40E_AQ_25G_NOT_PRESENT 0X01
#define I40E_AQ_25G_NVM_CRC_ERR 0X02
#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 external_power_ability;
u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
@ -1857,7 +1954,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
/* Disable link manageability on all ports needs both bits 4 and 5 */
#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@ -2296,6 +2396,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
__le16 vsi_id;
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
struct i40e_aqc_get_set_rss_key_data {
u8 standard_rss_key[0x28];
u8 extended_hash_key[0xc];
};
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
__le16 flags;
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure {

View File

@ -64,8 +64,24 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_X722_A0:
case I40E_DEV_ID_KX_X722:
case I40E_DEV_ID_QSFP_X722:
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
case I40E_DEV_ID_X722_VF_HV:
case I40E_DEV_ID_X722_A0_VF:
hw->mac.type = I40E_MAC_X722_VF;
break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@ -341,14 +357,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
/* the most we could have left is 16 bytes, pad with zeros */
if (i < len) {
char d_buf[16];
int j;
int j, i_sav;
i_sav = i;
memset(d_buf, 0, sizeof(d_buf));
for (j = 0; i < len; j++, i++)
d_buf[j] = buf[i];
i40e_debug(hw, mask,
"\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
i, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
d_buf[4], d_buf[5], d_buf[6], d_buf[7],
d_buf[8], d_buf[9], d_buf[10], d_buf[11],
d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
@ -400,6 +417,164 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set TRUE, for VSI table set FALSE
* @lut: pointer to the lut buffer provided by the caller
* @lut_size: size of the lut buffer
* @set: set TRUE to set the table, FALSE to get the table
*
* Internal function to get or set RSS look up table
**/
static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
u16 vsi_id, bool pf_lut,
u8 *lut, u16 lut_size,
bool set)
{
enum i40e_status_code status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_rss_lut);
else
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_lut);
/* Indirect command */
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
cmd_resp->vsi_id =
CPU_TO_LE16((u16)((vsi_id <<
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
if (pf_lut)
cmd_resp->flags |= CPU_TO_LE16((u16)
((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
else
cmd_resp->flags |= CPU_TO_LE16((u16)
((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
}
/**
* i40e_aq_get_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set TRUE, for VSI table set FALSE
* @lut: pointer to the lut buffer provided by the caller
* @lut_size: size of the lut buffer
*
* get the RSS lookup table, PF or VSI type
**/
enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
FALSE);
}
/**
* i40e_aq_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set TRUE, for VSI table set FALSE
* @lut: pointer to the lut buffer provided by the caller
* @lut_size: size of the lut buffer
*
* set the RSS lookup table, PF or VSI type
**/
enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE);
}
/**
* i40e_aq_get_set_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
* @set: set TRUE to set the key, FALSE to get the key
*
* get the RSS key per VSI
**/
static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key,
bool set)
{
enum i40e_status_code status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_key *cmd_resp =
(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_rss_key);
else
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_key);
/* Indirect command */
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
cmd_resp->vsi_id =
CPU_TO_LE16((u16)((vsi_id <<
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
return status;
}
/**
* i40e_aq_get_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
*
**/
enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE);
}
/**
* i40e_aq_set_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
*
* set the RSS key per VSI
**/
enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE);
}
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
@ -563,7 +738,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@ -813,6 +988,7 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
case I40E_MAC_X722:
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@ -832,6 +1008,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
status = i40e_init_nvm(hw);
return status;
}
@ -1104,8 +1283,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
/* It can take upto 15 secs for GRST steady state */
grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
grst_del = grst_del * 20;
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
@ -1452,8 +1630,10 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
if (report_init)
if (report_init) {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
}
return status;
}
@ -1996,16 +2176,46 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_clear_default_vsi
* @hw: pointer to the hw struct
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw,
u16 seid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
cmd->promiscuous_flags = CPU_TO_LE16(0);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
cmd->seid = CPU_TO_LE16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
* @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details)
struct i40e_asq_cmd_details *cmd_details,
bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@ -2018,8 +2228,9 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
(hw->aq.api_maj_ver > 1))
if (rx_only_promisc &&
(((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
(hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@ -2192,7 +2403,7 @@ enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
i40e_aqc_opc_set_vsi_promiscuous_modes);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
cmd->seid = CPU_TO_LE16(seid);
@ -2826,10 +3037,7 @@ enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
if (!rule_id)
return I40E_ERR_PARAM;
} else {
if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@ -3025,67 +3233,6 @@ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_get_hmc_resource_profile
* @hw: pointer to the hw struct
* @profile: type of profile the HMC is to be set as
* @pe_vf_enabled_count: the number of PE enabled VFs the system has
* @cmd_details: pointer to command details structure or NULL
*
* query the HMC profile of the device.
**/
enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile *profile,
u8 *pe_vf_enabled_count,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aq_get_set_hmc_resource_profile *resp =
(struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_query_hmc_resource_profile);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
*profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
*pe_vf_enabled_count = resp->pe_vf_enabled &
I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
return status;
}
/**
* i40e_aq_set_hmc_resource_profile
* @hw: pointer to the hw struct
* @profile: type of profile the HMC is to be set as
* @pe_vf_enabled_count: the number of PE enabled VFs the system has
* @cmd_details: pointer to command details structure or NULL
*
* set the HMC profile of the device.
**/
enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile profile,
u8 pe_vf_enabled_count,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aq_get_set_hmc_resource_profile *cmd =
(struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_hmc_resource_profile);
cmd->pm_profile = (u8)profile;
cmd->pe_vf_enabled = pe_vf_enabled_count;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
@ -3603,6 +3750,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
"HW Capability: wr_csr_prot = 0x%llX\n\n",
(p->wr_csr_prot & 0xffff));
break;
case I40E_AQ_CAP_ID_NVM_MGMT:
if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
p->sec_rev_disabled = TRUE;
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
p->update_disabled = TRUE;
break;
case I40E_AQ_CAP_ID_WOL_AND_PROXY:
hw->num_wol_proxy_filters = (u16)number;
hw->wol_proxy_vsi_seid = (u16)logical_id;
p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
break;
default:
break;
}
@ -5210,6 +5377,35 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
DEBUGOUT("Ethtype Filter Add failed: Error pruning Tx flow control frames\n");
}
/**
* i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
* @filters: list of cloud filters
* @filter_count: length of list
*
* There's an issue in the device where the Geneve VNI layout needs
* to be shifted 1 byte over from the VxLAN VNI
**/
static void i40e_fix_up_geneve_vni(
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count)
{
struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
int i;
for (i = 0; i < filter_count; i++) {
u16 tnl_type;
u32 ti;
tnl_type = (LE16_TO_CPU(f[i].flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
ti = LE32_TO_CPU(f[i].tenant_id);
f[i].tenant_id = CPU_TO_LE32(ti << 8);
}
}
}
/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
@ -5230,8 +5426,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
u16 buff_len;
enum i40e_status_code status;
u16 buff_len;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@ -5242,6 +5438,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
i40e_fix_up_geneve_vni(filters, filter_count);
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
@ -5279,6 +5477,8 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
i40e_fix_up_geneve_vni(filters, filter_count);
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
@ -6263,3 +6463,158 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
/**
* i40e_aq_set_arp_proxy_config
* @hw: pointer to the HW structure
* @proxy_config - pointer to proxy config command table struct
* @cmd_details: pointer to command details
*
* Set ARP offload parameters from pre-populated
* i40e_aqc_arp_proxy_data struct
**/
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
if (!proxy_config)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
cmd_details);
return status;
}
/**
* i40e_aq_opc_set_ns_proxy_table_entry
* @hw: pointer to the HW structure
* @ns_proxy_table_entry: pointer to NS table entry command struct
* @cmd_details: pointer to command details
*
* Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
* from pre-populated i40e_aqc_ns_proxy_data struct
**/
enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
if (!ns_proxy_table_entry)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
cmd_details);
return status;
}
/**
* i40e_aq_set_clear_wol_filter
* @hw: pointer to the hw struct
* @filter_index: index of filter to modify (0-7)
* @filter: buffer containing filter to be set
* @set_filter: TRUE to set filter, FALSE to clear filter
* @no_wol_tco: if TRUE, pass through packets cannot cause wake-up
* if FALSE, pass through packets may cause wake-up
* @filter_valid: TRUE if filter action is valid
* @no_wol_tco_valid: TRUE if no WoL in TCO traffic action valid
* @cmd_details: pointer to command details structure or NULL
*
* Set or clear WoL filter for port attached to the PF
**/
enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
u8 filter_index,
struct i40e_aqc_set_wol_filter_data *filter,
bool set_filter, bool no_wol_tco,
bool filter_valid, bool no_wol_tco_valid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_wol_filter *cmd =
(struct i40e_aqc_set_wol_filter *)&desc.params.raw;
enum i40e_status_code status;
u16 cmd_flags = 0;
u16 valid_flags = 0;
u16 buff_len = 0;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
return I40E_ERR_PARAM;
cmd->filter_index = CPU_TO_LE16(filter_index);
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
buff_len = sizeof(*filter);
}
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
if (filter_valid)
valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
if (no_wol_tco_valid)
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
status = i40e_asq_send_command(hw, &desc, filter,
buff_len, cmd_details);
return status;
}
/**
* i40e_aq_get_wake_event_reason
* @hw: pointer to the hw struct
* @wake_reason: return value, index of matching filter
* @cmd_details: pointer to command details structure or NULL
*
* Get information for the reason of a Wake Up event
**/
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_wake_reason_completion *resp =
(struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (status == I40E_SUCCESS)
*wake_reason = LE16_TO_CPU(resp->wake_reason);
return status;
}

View File

@ -50,8 +50,20 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_X722_A0 0x374C
#define I40E_DEV_ID_X722_A0_VF 0x374D
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \

View File

@ -220,7 +220,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
i40e_release_nvm(hw);
}
} else {
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
return ret_code;
}
@ -238,7 +246,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
else
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
return ret_code;
}
@ -330,7 +341,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
else
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
return ret_code;
}
@ -350,7 +364,16 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
data);
i40e_release_nvm(hw);
}
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
return ret_code;
}
@ -834,10 +857,10 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
hw->aq.nvm_release_on_done,
hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@ -851,7 +874,18 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
if (!cmd->data_size) {
*perrno = -EFAULT;
return I40E_ERR_BUF_TOO_SHORT;
}
bytes[0] = hw->nvmupd_state;
if (cmd->data_size >= 4) {
bytes[1] = 0;
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
return I40E_SUCCESS;
}
@ -870,6 +904,14 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
/* if we need to stop waiting for an event, clear
* the wait info and return before doing anything else
*/
if (cmd->offset == 0xffff) {
i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
return I40E_SUCCESS;
}
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@ -942,7 +984,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = TRUE;
hw->nvm_release_on_done = TRUE;
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@ -958,7 +1001,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = TRUE;
hw->nvm_release_on_done = TRUE;
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@ -971,10 +1015,12 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status)
if (status) {
i40e_release_nvm(hw);
else
} else {
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
}
break;
@ -992,7 +1038,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
hw->aq.nvm_release_on_done = TRUE;
hw->nvm_release_on_done = TRUE;
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@ -1087,8 +1134,10 @@ static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (!status)
if (!status) {
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
case I40E_NVMUPD_WRITE_LCB:
@ -1100,7 +1149,8 @@ static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
hw->aq.nvm_release_on_done = TRUE;
hw->nvm_release_on_done = TRUE;
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@ -1115,6 +1165,7 @@ static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@ -1129,7 +1180,8 @@ static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
hw->aq.nvm_release_on_done = TRUE;
hw->nvm_release_on_done = TRUE;
hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@ -1178,6 +1230,38 @@ static enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
return status;
}
/**
* i40e_nvmupd_check_wait_event - handle NVM update operation events
* @hw: pointer to the hardware structure
* @opcode: the event that just happened
**/
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
{
if (opcode == hw->nvm_wait_opcode) {
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
if (hw->nvm_release_on_done) {
i40e_release_nvm(hw);
hw->nvm_release_on_done = FALSE;
}
hw->nvm_wait_opcode = 0;
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
break;
case I40E_NVMUPD_STATE_WRITE_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
break;
default:
break;
}
}
}
/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
@ -1340,6 +1424,12 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
/* should we wait for a followup event? */
if (cmd->offset) {
hw->nvm_wait_opcode = cmd->offset;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
return status;
}

View File

@ -32,7 +32,7 @@
******************************************************************************/
/*$FreeBSD$*/
#include <machine/stdarg.h>
#include <sys/limits.h>
#include "ixl.h"
@ -137,7 +137,7 @@ void
i40e_init_spinlock(struct i40e_spinlock *lock)
{
mtx_init(&lock->mutex, "mutex",
MTX_NETWORK_LOCK, MTX_DEF | MTX_DUPOK);
"ixl spinlock", MTX_DEF | MTX_DUPOK);
}
void
@ -159,11 +159,34 @@ i40e_destroy_spinlock(struct i40e_spinlock *lock)
mtx_destroy(&lock->mutex);
}
void
i40e_msec_pause(int msecs)
{
int ticks_to_pause = (msecs * hz) / 1000;
int start_ticks = ticks;
if (cold || SCHEDULER_STOPPED()) {
i40e_msec_delay(msecs);
return;
}
while (1) {
kern_yield(PRI_USER);
int yielded_ticks = ticks - start_ticks;
if (yielded_ticks > ticks_to_pause)
break;
else if (yielded_ticks < 0
&& (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) {
break;
}
}
}
/*
* Helper function for debug statement printing
*/
void
i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
{
va_list args;

View File

@ -55,7 +55,7 @@
#include <dev/pci/pcireg.h>
#define i40e_usec_delay(x) DELAY(x)
#define i40e_msec_delay(x) DELAY(1000*(x))
#define i40e_msec_delay(x) DELAY(1000 * (x))
#define DBG 0
#define MSGOUT(S, A, B) printf(S "\n", A, B)
@ -75,12 +75,13 @@
#define DEBUGOUT7(S,A,B,C,D,E,F,G)
#endif
#define UNREFERENCED_XPARAMETER
/* Remove unused shared code macros */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
#define STATIC static
#define INLINE inline
@ -110,9 +111,6 @@
#define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y))
#define BIT(a) (1UL << (a))
#define BIT_ULL(a) (1ULL << (a))
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
@ -180,10 +178,13 @@ void i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
** i40e_debug - OS dependent version of shared code debug printing
*/
enum i40e_debug_mask;
#define i40e_debug(h, m, s, ...) i40e_debug_d(h, m, s, ##__VA_ARGS__)
extern void i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask,
#define i40e_debug(h, m, s, ...) i40e_debug_shared(h, m, s, ##__VA_ARGS__)
extern void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask,
char *fmt_str, ...);
/* Non-busy-wait that uses kern_yield() */
void i40e_msec_pause(int);
/*
** This hardware supports either 16 or 32 byte rx descriptors;
** the driver only uses the 32 byte kind.

View File

@ -77,6 +77,17 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
@ -107,6 +118,8 @@ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
@ -149,7 +162,8 @@ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
@ -329,10 +343,6 @@ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
u8 tcmap, bool request, u8 *tcmap_ret,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile *profile,
u8 *pe_vf_enabled_count,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
@ -343,10 +353,6 @@ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
enum i40e_aq_hmc_profile profile,
u8 pe_vf_enabled_count,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@ -386,7 +392,6 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@ -448,6 +453,7 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
@ -496,9 +502,26 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
u8 filter_index,
struct i40e_aqc_set_wol_filter_data *filter,
bool set_filter, bool no_wol_tco,
bool filter_valid, bool no_wol_tco_valid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -43,7 +43,6 @@
#include "i40e_lan_hmc.h"
#include "i40e_devids.h"
#define UNREFERENCED_XPARAMETER
#define BIT(a) (1UL << (a))
#define BIT_ULL(a) (1ULL << (a))
@ -147,8 +146,10 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
#define I40E_MDIO_STCODE 0
#define I40E_MDIO_OPCODE_ADDRESS 0
#define I40E_MDIO_STCODE I40E_MASK(0, \
I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
@ -177,6 +178,7 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@ -193,6 +195,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@ -275,50 +279,61 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
enum i40e_aq_capabilities_phy_type {
I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
};
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
u32 phy_types;
u64 phy_types;
};
#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
};
#define I40E_WOL_SUPPORT_MASK 1
#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
#define I40E_PROXY_SUPPORT_MASK (1 << 2)
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@ -348,6 +363,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
bool sec_rev_disabled;
bool update_disabled;
#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@ -377,6 +397,9 @@ struct i40e_hw_capabilities {
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
bool apm_wol_support;
enum i40e_acpi_programming_method acpi_prog_method;
bool proxy_support;
};
struct i40e_mac_info {
@ -622,6 +645,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
bool nvm_release_on_done;
u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@ -634,6 +659,10 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
/* WoL and proxy support */
u16 num_wol_proxy_filters;
u16 wol_proxy_vsi_seid;
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
@ -644,7 +673,8 @@ struct i40e_hw {
static INLINE bool i40e_is_vf(struct i40e_hw *hw)
{
return hw->mac.type == I40E_MAC_VF;
return (hw->mac.type == I40E_MAC_VF ||
hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@ -748,7 +778,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@ -756,7 +786,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@ -1134,6 +1164,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@ -1170,15 +1202,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
/* Note: Values 0-30 are reserved for future use */
/* Note: Values 0-28 are reserved for future use.
* Value 29, 30, 32 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
/* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
/* Note: Values 37-40 are reserved for future use */
/* Note: Values 37-38 are reserved for future use.
* Value 39, 40, 42 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@ -1234,6 +1275,10 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@ -1544,4 +1589,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
/* INPUT SET MASK for RSS, flow director, and flexible payload */
#define I40E_L3_SRC_SHIFT 47
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
#define I40E_L3_V6_SRC_SHIFT 43
#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
#define I40E_L3_DST_SHIFT 35
#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
#define I40E_L3_V6_DST_SHIFT 35
#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
#define I40E_L4_SRC_SHIFT 34
#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
#define I40E_L4_DST_SHIFT 33
#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
#define I40E_VERIFY_TAG_SHIFT 31
#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
#define I40E_FLEX_50_SHIFT 13
#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
#define I40E_FLEX_51_SHIFT 12
#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
#define I40E_FLEX_52_SHIFT 11
#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
#define I40E_FLEX_53_SHIFT 10
#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
#define I40E_FLEX_54_SHIFT 9
#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
#define I40E_FLEX_55_SHIFT 8
#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
#define I40E_FLEX_56_SHIFT 7
#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */

View File

@ -88,7 +88,12 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
I40E_VIRTCHNL_OP_EVENT = 17,
I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
};
/* Virtual channel message descriptor. This overlays the admin queue
@ -162,6 +167,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@ -170,8 +176,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
u32 max_fcoe_contexts;
u32 max_fcoe_filters;
u32 rss_key_size;
u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@ -330,6 +336,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
* I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
* and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
* configuration negotiation. If this is the case, then the rss fields in
* the vf resource struct are valid.
* Both the key and LUT are initialized to 0 by the PF, meaning that
* RSS is effectively disabled until set up by the VF.
*/
struct i40e_virtchnl_rss_key {
u16 vsi_id;
u16 key_len;
u8 key[1]; /* RSS hash key, packed bytes */
};
struct i40e_virtchnl_rss_lut {
u16 vsi_id;
u16 lut_entries;
u8 lut[1]; /* RSS lookup table*/
};
/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
* I40E_VIRTCHNL_OP_SET_RSS_HENA
* VF sends these messages to get and set the hash filter enable bits for RSS.
* By default, the PF sets these to all possible traffic types that the
* hardware supports. The VF can query this value if it wants to change the
* traffic types that are hashed by the hardware.
* Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
*/
struct i40e_virtchnl_rss_hena {
u64 hena;
};
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other

File diff suppressed because it is too large Load Diff

View File

@ -32,23 +32,13 @@
******************************************************************************/
/*$FreeBSD$*/
#ifndef IXL_STANDALONE_BUILD
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#endif
#include "ixl.h"
#include "ixlv.h"
#ifdef RSS
#include <net/rss_config.h>
#endif
/*********************************************************************
* Driver version
*********************************************************************/
char ixlv_driver_version[] = "1.2.11-k";
char ixlv_driver_version[] = "1.4.6-k";
/*********************************************************************
* PCI Device ID Table
@ -64,6 +54,9 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@ -73,7 +66,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
*********************************************************************/
static char *ixlv_strings[] = {
"Intel(R) Ethernet Connection XL710 VF Driver"
"Intel(R) Ethernet Connection XL710/X722 VF Driver"
};
@ -119,6 +112,7 @@ static void ixlv_set_queue_rx_itr(struct ixl_queue *);
static void ixlv_set_queue_tx_itr(struct ixl_queue *);
static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
enum i40e_status_code);
static void ixlv_configure_itr(struct ixlv_sc *);
static void ixlv_enable_adminq_irq(struct i40e_hw *);
static void ixlv_disable_adminq_irq(struct i40e_hw *);
@ -137,8 +131,10 @@ static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
static void ixlv_add_sysctls(struct ixlv_sc *);
#ifdef IXL_DEBUG
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
#endif
/*********************************************************************
* FreeBSD Device Interface Entry Points
@ -306,7 +302,7 @@ ixlv_attach(device_t dev)
/* Allocate filter lists */
ixlv_init_filters(sc);
/* Core Lock Init*/
/* Core Lock Init */
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
@ -365,14 +361,16 @@ ixlv_attach(device_t dev)
goto err_aq;
}
INIT_DBG_DEV(dev, "VF config from PF:");
INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
sc->vf_res->num_vsis,
sc->vf_res->num_queue_pairs,
sc->vf_res->max_vectors,
sc->vf_res->max_mtu);
INIT_DBG_DEV(dev, "Offload flags: %#010x",
sc->vf_res->vf_offload_flags);
sc->vf_res->rss_key_size,
sc->vf_res->rss_lut_size);
#ifdef IXL_DEBUG
device_printf(dev, "Offload flags: 0x%b\n",
sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
#endif
/* got VF config message back from PF, now we can parse it */
for (int i = 0; i < sc->vf_res->num_vsis; i++) {
@ -396,6 +394,14 @@ ixlv_attach(device_t dev)
bcopy(addr, hw->mac.addr, sizeof(addr));
}
/* Now that the number of queues for this VF is known, set up interrupts */
sc->msix = ixlv_init_msix(sc);
/* We fail without MSIX support */
if (sc->msix == 0) {
error = ENXIO;
goto err_res_buf;
}
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
sc->link_up = TRUE;
@ -419,7 +425,12 @@ ixlv_attach(device_t dev)
INIT_DBG_DEV(dev, "Queue memory and interface setup");
/* Do queue interrupt setup */
ixlv_assign_msix(sc);
if (ixlv_assign_msix(sc) != 0) {
device_printf(dev, "%s: allocating queue interrupts failed!\n",
__func__);
error = ENXIO;
goto out;
}
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
@ -829,8 +840,8 @@ ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
*/
if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
if_printf(sc->vsi.ifp,
"Error %d waiting for PF to complete operation %d\n",
code, cmd->request);
"Error %s waiting for PF to complete operation %d\n",
i40e_stat_str(&sc->hw, code), cmd->request);
}
}
@ -901,6 +912,9 @@ ixlv_init_locked(struct ixlv_sc *sc)
ixl_init_rx_ring(que);
}
/* Set initial ITR values */
ixlv_configure_itr(sc);
/* Configure queues */
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
@ -936,18 +950,31 @@ ixlv_init(void *arg)
struct ixlv_sc *sc = vsi->back;
int retries = 0;
/* Prevent init from running again while waiting for AQ calls
* made in init_locked() to complete. */
mtx_lock(&sc->mtx);
if (sc->init_in_progress) {
mtx_unlock(&sc->mtx);
return;
} else
sc->init_in_progress = true;
ixlv_init_locked(sc);
mtx_unlock(&sc->mtx);
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
&& ++retries < IXLV_AQ_MAX_ERR) {
i40e_msec_delay(25);
i40e_msec_pause(25);
}
if (retries >= IXLV_AQ_MAX_ERR)
if (retries >= IXLV_AQ_MAX_ERR) {
if_printf(vsi->ifp,
"Init failed to complete in allotted time!\n");
}
mtx_lock(&sc->mtx);
sc->init_in_progress = false;
mtx_unlock(&sc->mtx);
}
/*
@ -990,8 +1017,8 @@ ixlv_setup_vc(struct ixlv_sc *sc)
/* Need to set these AQ paramters before initializing AQ */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
/* Initialize admin queue */
@ -1021,13 +1048,13 @@ ixlv_setup_vc(struct ixlv_sc *sc)
while (!i40e_asq_done(hw)) {
if (++asq_retries > IXLV_AQ_MAX_ERR) {
i40e_shutdown_adminq(hw);
DDPRINTF(dev, "Admin Queue timeout "
"(waiting for send_api_ver), %d more retries...",
device_printf(dev, "Admin Queue timeout "
"(waiting for send_api_ver), %d more tries...\n",
IXLV_AQ_MAX_ERR - (i + 1));
ret_error = 3;
break;
}
i40e_msec_delay(10);
i40e_msec_pause(10);
}
if (asq_retries > IXLV_AQ_MAX_ERR)
continue;
@ -1055,7 +1082,7 @@ ixlv_setup_vc(struct ixlv_sc *sc)
if (error) {
device_printf(dev,
"%s: Unable to verify API version,"
" error %d\n", __func__, error);
" error %s\n", __func__, i40e_stat_str(hw, error));
ret_error = 5;
}
break;
@ -1096,7 +1123,7 @@ ixlv_vf_config(struct ixlv_sc *sc)
ret_error = 3;
goto fail;
}
i40e_msec_delay(10);
i40e_msec_pause(10);
}
INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
@ -1149,6 +1176,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
int rid, want, vectors, queues, available;
int auto_max_queues;
rid = PCIR_BAR(IXL_BAR);
sc->msix_mem = bus_alloc_resource_any(dev,
@ -1156,7 +1184,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
if (!sc->msix_mem) {
/* May not be enabled */
device_printf(sc->dev,
"Unable to map MSIX table \n");
"Unable to map MSIX table\n");
goto fail;
}
@ -1168,20 +1196,30 @@ ixlv_init_msix(struct ixlv_sc *sc)
goto fail;
}
/* Figure out a reasonable auto config value */
queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
/* Clamp queues to number of CPUs and # of MSI-X vectors available */
auto_max_queues = min(mp_ncpus, available - 1);
/* Clamp queues to # assigned to VF by PF */
auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
/* Override with hardcoded value if sane */
if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
/* Override with tunable value if tunable is less than autoconfig count */
if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
queues = ixlv_max_queues;
/* Use autoconfig amount if that's lower */
else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
device_printf(dev, "ixlv_max_queues (%d) is too large, using "
"autoconfig amount (%d)...\n",
ixlv_max_queues, auto_max_queues);
queues = auto_max_queues;
}
/* Limit maximum auto-configured queues to 8 if no user value is set */
else
queues = min(auto_max_queues, 8);
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
/* Enforce the VF max value */
if (queues > IXLV_MAX_QUEUES)
queues = IXLV_MAX_QUEUES;
/*
** Want one vector (RX/TX pair) per queue
@ -1225,25 +1263,6 @@ ixlv_init_msix(struct ixlv_sc *sc)
sc->vsi.num_queues = queues;
}
/*
** Explicitly set the guest PCI BUSMASTER capability
** and we must rewrite the ENABLE in the MSIX control
** register again at this point to cause the host to
** successfully initialize us.
*/
{
u16 pci_cmd_word;
int msix_ctrl;
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
pci_find_cap(dev, PCIY_MSIX, &rid);
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(dev, rid, msix_ctrl, 2);
}
/* Next we need to setup the vector for the Admin Queue */
rid = 1; // zero vector + 1
sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
@ -1280,7 +1299,7 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
&rid, RF_ACTIVE);
if (!(sc->pci_mem)) {
device_printf(dev,"Unable to allocate bus resource: memory\n");
device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
@ -1294,18 +1313,29 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->hw.back = &sc->osdep;
/* Disable adminq interrupts */
ixlv_disable_adminq_irq(&sc->hw);
/*
** Now setup MSI/X, it will return
** us the number of supported vectors
** Explicitly set the guest PCI BUSMASTER capability
** and we must rewrite the ENABLE in the MSIX control
** register again at this point to cause the host to
** successfully initialize us.
**
** This must be set before accessing any registers.
*/
sc->msix = ixlv_init_msix(sc);
{
u16 pci_cmd_word;
int msix_ctrl;
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
pci_find_cap(dev, PCIY_MSIX, &rid);
rid += PCIR_MSIX_CTRL;
msix_ctrl = pci_read_config(dev, rid, 2);
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(dev, rid, msix_ctrl, 2);
}
/* We fail without MSIX support */
if (sc->msix == 0)
return (ENXIO);
/* Disable adminq interrupts (just in case) */
ixlv_disable_adminq_irq(&sc->hw);
return (0);
}
@ -1330,8 +1360,10 @@ ixlv_free_pci_resources(struct ixlv_sc *sc)
bus_teardown_intr(dev, que->res, que->tag);
que->tag = NULL;
}
if (que->res != NULL)
if (que->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
que->res = NULL;
}
}
early:
@ -1340,8 +1372,10 @@ ixlv_free_pci_resources(struct ixlv_sc *sc)
bus_teardown_intr(dev, sc->res, sc->tag);
sc->tag = NULL;
}
if (sc->res != NULL)
if (sc->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
sc->res = NULL;
}
pci_release_msi(dev);
@ -1352,8 +1386,6 @@ ixlv_free_pci_resources(struct ixlv_sc *sc)
if (sc->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), sc->pci_mem);
return;
}
/*
@ -1418,7 +1450,6 @@ ixlv_assign_msix(struct ixlv_sc *sc)
#endif
bus_bind_intr(dev, que->res, cpu_id);
que->msix = vector;
vsi->que_mask |= (u64)(1 << que->msix);
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixlv_handle_que, que);
que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
@ -1454,7 +1485,7 @@ ixlv_reset(struct ixlv_sc *sc)
if (sc->init_state != IXLV_RESET_PENDING)
ixlv_request_reset(sc);
i40e_msec_delay(100);
i40e_msec_pause(100);
error = ixlv_reset_complete(hw);
if (error) {
device_printf(dev, "%s: VF reset failed\n",
@ -1484,6 +1515,7 @@ ixlv_reset_complete(struct i40e_hw *hw)
{
u32 reg;
/* Wait up to ~10 seconds */
for (int i = 0; i < 100; i++) {
reg = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
@ -1491,7 +1523,7 @@ ixlv_reset_complete(struct i40e_hw *hw)
if ((reg == I40E_VFR_VFACTIVE) ||
(reg == I40E_VFR_COMPLETED))
return (0);
i40e_msec_delay(100);
i40e_msec_pause(100);
}
return (EBUSY);
@ -1522,7 +1554,7 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
ifp->if_baudrate = 4000000000; // ??
ifp->if_baudrate = IF_Gbps(40);
ifp->if_init = ixlv_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@ -1686,7 +1718,7 @@ ixlv_setup_queues(struct ixlv_sc *sc)
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
/* Allocate receive soft structs for the ring*/
/* Allocate receive soft structs for the ring */
if (ixl_allocate_rx_data(que)) {
device_printf(dev,
"Critical Failure setting up receive structs\n");
@ -1896,18 +1928,48 @@ ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
u32 reg;
reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
}
static void
ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
{
wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
wr32(hw, I40E_VFINT_DYN_CTLN1(id),
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
rd32(hw, I40E_VFGEN_RSTAT);
return;
}
/*
* Get initial ITR values from tunable values.
*/
static void
ixlv_configure_itr(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
vsi->rx_itr_setting = ixlv_rx_itr;
vsi->tx_itr_setting = ixlv_tx_itr;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
struct rx_ring *rxr = &que->rxr;
wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
vsi->rx_itr_setting);
rxr->itr = vsi->rx_itr_setting;
rxr->latency = IXL_AVE_LATENCY;
wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
vsi->tx_itr_setting);
txr->itr = vsi->tx_itr_setting;
txr->latency = IXL_AVE_LATENCY;
}
}
/*
** Provide a update to the queue RX
@ -2251,7 +2313,7 @@ ixlv_add_multi(struct ixl_vsi *vsi)
}
if_maddr_runlock(ifp);
// TODO: Remove -- cannot set promiscuous mode in a VF
/* TODO: Remove -- cannot set promiscuous mode in a VF */
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
@ -2381,7 +2443,8 @@ ixlv_local_timer(void *arg)
** Check status on the queues for a hang
*/
mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++,que++) {
/* Any queues with outstanding work get a sw irq */
@ -2522,28 +2585,17 @@ ixlv_free_queues(struct ixl_vsi *vsi)
free(vsi->queues, M_DEVBUF);
}
/*
** ixlv_config_rss - setup RSS
**
** RSS keys and table are cleared on VF reset.
*/
static void
ixlv_config_rss(struct ixlv_sc *sc)
ixlv_config_rss_reg(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
u32 lut = 0;
u64 set_hena = 0, hena;
int i, j, que_id;
u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
#ifdef RSS
u32 rss_hash_config;
u32 rss_seed[IXL_KEYSZ];
#else
u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c,
0x35897377, 0x328b25e1, 0x4fa98922,
0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
#endif
/* Don't set up RSS if using a single queue */
@ -2557,9 +2609,12 @@ ixlv_config_rss(struct ixlv_sc *sc)
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey((uint8_t *) &rss_seed);
#else
ixl_get_default_rss_key(rss_seed);
#endif
/* Fill out hash function seed */
for (i = 0; i <= IXL_KEYSZ; i++)
for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
/* Enable PCTYPES for RSS: */
@ -2580,18 +2635,7 @@ ixlv_config_rss(struct ixlv_sc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
set_hena =
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
set_hena = IXL_DEFAULT_RSS_HENA;
#endif
hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@ -2599,9 +2643,8 @@ ixlv_config_rss(struct ixlv_sc *sc)
wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
// TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
/* Populate the LUT with max no. of queues in round robin fashion */
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
if (j == vsi->num_queues)
j = 0;
#ifdef RSS
@ -2616,16 +2659,46 @@ ixlv_config_rss(struct ixlv_sc *sc)
que_id = j;
#endif
/* lut = 4-byte sliding window of 4 lut entries */
lut = (lut << 8) | (que_id & 0xF);
lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
/* On i = 3, we have 4 entries in lut; write to the register */
if ((i & 3) == 3) {
wr32(hw, I40E_VFQF_HLUT(i), lut);
wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
}
}
ixl_flush(hw);
}
static void
ixlv_config_rss_pf(struct ixlv_sc *sc)
{
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
}
/*
** ixlv_config_rss - setup RSS
**
** RSS keys and table are cleared on VF reset.
*/
static void
ixlv_config_rss(struct ixlv_sc *sc)
{
if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
ixlv_config_rss_reg(sc);
} else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
ixlv_config_rss_pf(sc);
} else
device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
}
/*
** This routine refreshes vlan filters, called by init
@ -2868,8 +2941,8 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &(queues[q].tso),
"TSO");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
CTLFLAG_RD, &(queues[q].tx_dma_setup),
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
"Driver tx dma failure in xmit");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
@ -2886,7 +2959,14 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
CTLFLAG_RD, &(rxr->itr), 0,
"Queue Rx ITR Interval");
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
CTLFLAG_RD, &(txr->itr), 0,
"Queue Tx ITR Interval");
#ifdef IXL_DEBUG
/* Examine queue state */
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
@ -2898,6 +2978,7 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
sizeof(struct ixl_queue),
ixlv_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
#endif
}
}
@ -2932,6 +3013,7 @@ ixlv_free_filters(struct ixlv_sc *sc)
return;
}
#ifdef IXL_DEBUG
/**
* ixlv_sysctl_qtx_tail_handler
* Retrieves I40E_QTX_TAIL1 value from hardware
@ -2975,4 +3057,5 @@ ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
return error;
return (0);
}
#endif

View File

@ -36,7 +36,6 @@
#ifndef _IXL_H_
#define _IXL_H_
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/buf_ring.h>
@ -90,29 +89,26 @@
#include <sys/smp.h>
#include <sys/sbuf.h>
#include <machine/smp.h>
#include <machine/stdarg.h>
#ifdef PCI_IOV
#include <sys/nv.h>
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
#ifdef RSS
#include <net/rss_config.h>
#endif
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#include "i40e_type.h"
#include "i40e_prototype.h"
#if defined(IXL_DEBUG) || defined(IXL_DEBUG_SYSCTL)
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
(mac_addr)[4], (mac_addr)[5]
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
#endif /* IXL_DEBUG || IXL_DEBUG_SYSCTL */
#ifdef IXL_DEBUG
/* Enable debug sysctls */
#ifndef IXL_DEBUG_SYSCTL
#define IXL_DEBUG_SYSCTL 1
#endif
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
@ -157,6 +153,26 @@
#define HW_DEBUGOUT(...)
#endif /* IXL_DEBUG */
enum ixl_dbg_mask {
IXL_DBG_INFO = 0x00000001,
IXL_DBG_EN_DIS = 0x00000002,
IXL_DBG_AQ = 0x00000004,
IXL_DBG_NVMUPD = 0x00000008,
IXL_DBG_IOCTL_KNOWN = 0x00000010,
IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
IXL_DBG_IOCTL_ALL = 0x00000030,
I40E_DEBUG_RSS = 0x00000100,
IXL_DBG_IOV = 0x00001000,
IXL_DBG_IOV_VC = 0x00002000,
IXL_DBG_SWITCH_INFO = 0x00010000,
IXL_DBG_ALL = 0xFFFFFFFF
};
/* Tunables */
/*
@ -167,27 +183,28 @@
* Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
* The driver currently always uses 32 byte Rx descriptors.
*/
#define DEFAULT_RING 1024
#define PERFORM_RING 2048
#define MAX_RING 4096
#define MIN_RING 32
#define DEFAULT_RING 1024
#define IXL_MAX_RING 8160
#define IXL_MIN_RING 32
#define IXL_RING_INCREMENT 32
#define IXL_AQ_LEN 256
#define IXL_AQ_LEN_MAX 1024
/*
** Default number of entries in Tx queue buf_ring.
*/
#define SMALL_TXBRSZ 4096
/* This may require mbuf cluster tuning */
#define DEFAULT_TXBRSZ (SMALL_TXBRSZ * SMALL_TXBRSZ)
#define DEFAULT_TXBRSZ 4096
/* Alignment for rings */
#define DBA_ALIGN 128
#define DBA_ALIGN 128
/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*/
#define IXL_WATCHDOG (10 * hz)
#define IXL_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
@ -196,11 +213,6 @@
#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
#define IXL_TX_OP_THRESHOLD (que->num_desc / 32)
/* Flow control constants */
#define IXL_FC_PAUSE 0xFFFF
#define IXL_FC_HI 0x20000
#define IXL_FC_LO 0x10000
#define MAX_MULTICAST_ADDR 128
#define IXL_BAR 3
@ -208,10 +220,6 @@
#define IXL_TSO_SIZE 65535
#define IXL_AQ_BUF_SZ ((u32) 4096)
#define IXL_RX_HDR 128
/* Controls the length of the Admin Queue */
#define IXL_AQ_LEN 256
#define IXL_AQ_LEN_MAX 1024
#define IXL_AQ_BUFSZ 4096
#define IXL_RX_LIMIT 512
#define IXL_RX_ITR 0
#define IXL_TX_ITR 1
@ -219,25 +227,29 @@
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TSO_SEGS 66
#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
#define IXL_KEYSZ 10
#define IXL_RSS_KEY_SIZE_REG 13
#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
#define IXL_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */
#define IXL_RSS_VSI_LUT_ENTRY_MASK 0x3F
#define IXL_RSS_VF_LUT_ENTRY_MASK 0xF
#define IXL_VF_MAX_BUFFER 0x3F80
#define IXL_VF_MAX_HDR_BUFFER 0x840
#define IXL_VF_MAX_FRAME 0x3FFF
/* ERJ: hardware can support ~1.5k filters between all functions */
#define IXL_MAX_FILTERS 256
#define IXL_MAX_TX_BUSY 10
/* ERJ: hardware can support ~2k (SW5+) filters between all functions */
#define IXL_MAX_FILTERS 256
#define IXL_MAX_TX_BUSY 10
#define IXL_NVM_VERSION_LO_SHIFT 0
#define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT)
#define IXL_NVM_VERSION_HI_SHIFT 12
#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
/*
* Interrupt Moderation parameters
*/
@ -298,6 +310,19 @@
#define IXL_END_OF_INTR_LNKLST 0x7FF
#define IXL_DEFAULT_RSS_HENA (\
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@ -337,11 +362,6 @@
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
/* Pre-10.2 media type compatibility */
#if __FreeBSD_version < 1002000
#define IFM_OTHER IFM_UNKNOWN
#endif
/*
*****************************************************************************
* vendor_info_array
@ -480,15 +500,12 @@ struct ixl_queue {
u64 mbuf_defrag_failed;
u64 mbuf_hdr_failed;
u64 mbuf_pkt_failed;
u64 tx_map_avail;
u64 tx_dma_setup;
u64 tx_dmamap_failed;
u64 dropped_pkts;
};
/*
** Virtual Station interface:
** there would be one of these per traffic class/type
** for now just one, and its embedded in the pf
** Virtual Station Interface
*/
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
struct ixl_vsi {
@ -498,27 +515,25 @@ struct ixl_vsi {
struct i40e_hw *hw;
struct ifmedia media;
enum i40e_vsi_type type;
u64 que_mask;
int id;
u16 vsi_num;
u16 msix_base; /* station base MSIX vector */
u16 first_queue;
u16 num_queues;
u32 rx_itr_setting;
u32 tx_itr_setting;
u16 max_frame_size;
struct ixl_queue *queues; /* head of queues */
u16 vsi_num;
bool link_active;
u16 seid;
u16 uplink_seid;
u16 downlink_seid;
u16 max_frame_size;
u16 rss_table_size;
u16 rss_size;
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
u16 num_macs;
/* Contains readylist & stat counter id */
struct i40e_aqc_vsi_properties_data info;
eventhandler_tag vlan_attach;
@ -565,7 +580,7 @@ ixl_rx_unrefreshed(struct ixl_queue *que)
else
return ((que->num_desc + rxr->next_check) -
rxr->next_refresh - 1);
}
}
/*
** Find the next available unused filter
@ -600,6 +615,28 @@ cmp_etheraddr(const u8 *ea1, const u8 *ea2)
return (cmp);
}
/*
* Return next largest power of 2, unsigned
*
* Public domain, from Bit Twiddling Hacks
*/
static inline u32
next_power_of_two(u32 n)
{
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
/* Next power of two > 0 is 1 */
n += (n == 0);
return (n);
}
/*
* Info for stats sysctls
*/
@ -609,7 +646,8 @@ struct ixl_sysctl_info {
char *description;
};
extern int ixl_atr_rate;
static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*********************************************************************
* TXRX Function prototypes
@ -620,18 +658,18 @@ void ixl_init_tx_ring(struct ixl_queue *);
int ixl_init_rx_ring(struct ixl_queue *);
bool ixl_rxeof(struct ixl_queue *, int);
bool ixl_txeof(struct ixl_queue *);
void ixl_free_que_tx(struct ixl_queue *);
void ixl_free_que_rx(struct ixl_queue *);
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
void ixl_qflush(struct ifnet *);
void ixl_free_vsi(struct ixl_vsi *);
void ixl_free_que_tx(struct ixl_queue *);
void ixl_free_que_rx(struct ixl_queue *);
#ifdef IXL_FDIR
void ixl_atr(struct ixl_queue *, struct tcphdr *, int);
#endif
void ixl_qflush(struct ifnet *);
/* Common function prototypes between PF/VF driver */
#if __FreeBSD_version >= 1100000
uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
#endif
void ixl_get_default_rss_key(u32 *);
#endif /* _IXL_H_ */

View File

@ -36,6 +36,9 @@
#ifndef _IXL_PF_H_
#define _IXL_PF_H_
#include "ixl.h"
#include "ixl_pf_qmgr.h"
#define VF_FLAG_ENABLED 0x01
#define VF_FLAG_SET_MAC_CAP 0x02
#define VF_FLAG_VLAN_CAP 0x04
@ -52,6 +55,7 @@ struct ixl_vf {
uint16_t vf_num;
uint32_t version;
struct ixl_pf_qtag qtag;
struct sysctl_ctx_list ctx;
};
@ -60,6 +64,7 @@ struct ixl_pf {
struct i40e_hw hw;
struct i40e_osdep osdep;
struct device *dev;
struct ixl_vsi vsi;
struct resource *pci_mem;
struct resource *msix_mem;
@ -77,6 +82,19 @@ struct ixl_pf {
int if_flags;
int state;
struct ixl_pf_qmgr qmgr;
struct ixl_pf_qtag qtag;
/* Tunable values */
bool enable_msix;
int max_queues;
int ringsz;
bool enable_tx_fc_filter;
int dynamic_rx_itr;
int dynamic_tx_itr;
int tx_itr;
int rx_itr;
struct mtx pf_mtx;
u32 qbase;
@ -87,17 +105,8 @@ struct ixl_pf {
bool link_up;
u32 link_speed;
int advertised_speed;
int fc; /* local flow ctrl setting */
/*
** Network interfaces
** These are the traffic class holders, and
** will have a stack interface and queues
** associated with them.
** NOTE: The PF has only a single interface,
** so it is embedded in the PF struct.
*/
struct ixl_vsi vsi;
int fc; /* link flow ctrl setting */
enum ixl_dbg_mask dbg_mask;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@ -108,6 +117,7 @@ struct ixl_pf {
struct i40e_hw_port_stats stats_offsets;
bool stat_offsets_loaded;
/* SR-IOV */
struct ixl_vf *vfs;
int num_vfs;
uint16_t veb_seid;
@ -149,42 +159,18 @@ struct ixl_pf {
"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \
" the response." \
static char *ixl_fc_string[6] = {
"None",
"Rx",
"Tx",
"Full",
"Priority",
"Default"
};
static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
/*** Functions / Macros ***/
/*
** Put the NVM, EEtrackID, and OEM version information into a string
*/
static void
ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
{
u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
sbuf_printf(buf,
"nvm %x.%02x etid %08x oem %d.%d.%d",
(hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
IXL_NVM_VERSION_HI_SHIFT,
(hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
IXL_NVM_VERSION_LO_SHIFT,
hw->nvm.eetrack,
oem_ver, oem_build, oem_patch);
}
static void
ixl_print_nvm_version(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
struct sbuf *sbuf;
sbuf = sbuf_new_auto();
ixl_nvm_version_str(hw, sbuf);
sbuf_finish(sbuf);
device_printf(dev, "%s\n", sbuf_data(sbuf));
sbuf_delete(sbuf);
}
#define I40E_VC_DEBUG(pf, level, ...) \
do { \
if ((pf)->vc_debug_lvl >= (level)) \
@ -201,4 +187,136 @@ ixl_print_nvm_version(struct ixl_pf *pf)
#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
/* For stats sysctl naming */
#define QUEUE_NAME_LEN 32
/*
* PF-only function declarations
*/
void ixl_set_busmaster(device_t);
int ixl_setup_interface(device_t, struct ixl_vsi *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
void ixl_handle_que(void *context, int pending);
void ixl_init(void *);
void ixl_local_timer(void *);
void ixl_register_vlan(void *, struct ifnet *, u16);
void ixl_unregister_vlan(void *, struct ifnet *, u16);
void ixl_intr(void *);
void ixl_msix_que(void *);
void ixl_msix_adminq(void *);
void ixl_do_adminq(void *, int);
int ixl_res_alloc_cmp(const void *, const void *);
char * ixl_switch_res_type_string(u8);
char * ixl_switch_element_string(struct sbuf *,
struct i40e_aqc_switch_config_element_resp *);
void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct i40e_hw_port_stats *);
void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
struct sysctl_oid_list *,
struct i40e_eth_stats *);
void ixl_media_status(struct ifnet *, struct ifmediareq *);
int ixl_media_change(struct ifnet *);
int ixl_ioctl(struct ifnet *, u_long, caddr_t);
void ixl_enable_adminq(struct i40e_hw *);
void ixl_get_bus_info(struct i40e_hw *, device_t);
void ixl_disable_adminq(struct i40e_hw *);
void ixl_enable_queue(struct i40e_hw *, int);
void ixl_disable_queue(struct i40e_hw *, int);
void ixl_enable_legacy(struct i40e_hw *);
void ixl_disable_legacy(struct i40e_hw *);
void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf);
void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
u64 *, u64 *);
void ixl_stat_update32(struct i40e_hw *, u32, bool,
u64 *, u64 *);
void ixl_stop(struct ixl_pf *);
void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
int ixl_get_hw_capabilities(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
int ixl_allocate_pci_resources(struct ixl_pf *);
int ixl_setup_stations(struct ixl_pf *);
int ixl_switch_config(struct ixl_pf *);
void ixl_stop_locked(struct ixl_pf *);
int ixl_teardown_hw_structs(struct ixl_pf *);
int ixl_reset(struct ixl_pf *);
void ixl_init_locked(struct ixl_pf *);
void ixl_set_rss_key(struct ixl_pf *);
void ixl_set_rss_pctypes(struct ixl_pf *);
void ixl_set_rss_hlut(struct ixl_pf *);
int ixl_setup_adminq_msix(struct ixl_pf *);
int ixl_setup_adminq_tq(struct ixl_pf *);
int ixl_teardown_adminq_msix(struct ixl_pf *);
void ixl_configure_intr0_msix(struct ixl_pf *);
void ixl_configure_queue_intr_msix(struct ixl_pf *);
void ixl_free_adminq_tq(struct ixl_pf *);
int ixl_assign_vsi_legacy(struct ixl_pf *);
int ixl_init_msix(struct ixl_pf *);
void ixl_configure_itr(struct ixl_pf *);
void ixl_configure_legacy(struct ixl_pf *);
void ixl_free_pci_resources(struct ixl_pf *);
void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
void ixl_config_rss(struct ixl_pf *);
int ixl_set_advertised_speeds(struct ixl_pf *, int);
void ixl_get_initial_advertised_speeds(struct ixl_pf *);
void ixl_print_nvm_version(struct ixl_pf *pf);
void ixl_add_device_sysctls(struct ixl_pf *);
void ixl_handle_mdd_event(struct ixl_pf *);
void ixl_add_hw_stats(struct ixl_pf *);
void ixl_update_stats_counters(struct ixl_pf *);
void ixl_pf_reset_stats(struct ixl_pf *);
void ixl_dbg(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
void ixl_handle_empr_reset(struct ixl_pf *);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
void ixl_set_queue_rx_itr(struct ixl_queue *);
void ixl_set_queue_tx_itr(struct ixl_queue *);
void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
void ixl_reconfigure_filters(struct ixl_vsi *vsi);
int ixl_disable_rings(struct ixl_vsi *);
int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
int ixl_enable_rings(struct ixl_vsi *);
int ixl_enable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
void ixl_update_eth_stats(struct ixl_vsi *);
void ixl_disable_intr(struct ixl_vsi *);
void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
int ixl_initialize_vsi(struct ixl_vsi *);
void ixl_add_ifmedia(struct ixl_vsi *, u32);
int ixl_setup_queue_msix(struct ixl_vsi *);
int ixl_setup_queue_tqs(struct ixl_vsi *);
int ixl_teardown_queue_msix(struct ixl_vsi *);
void ixl_free_queue_tqs(struct ixl_vsi *);
void ixl_enable_intr(struct ixl_vsi *);
void ixl_disable_rings_intr(struct ixl_vsi *);
void ixl_set_promisc(struct ixl_vsi *);
void ixl_add_multi(struct ixl_vsi *);
void ixl_del_multi(struct ixl_vsi *);
void ixl_setup_vlan_filters(struct ixl_vsi *);
void ixl_init_filters(struct ixl_vsi *);
void ixl_add_hw_filters(struct ixl_vsi *, int, int);
void ixl_del_hw_filters(struct ixl_vsi *, int);
struct ixl_mac_filter *
ixl_find_filter(struct ixl_vsi *, u8 *, s16);
void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
void ixl_free_mac_filters(struct ixl_vsi *vsi);
void ixl_update_vsi_stats(struct ixl_vsi *);
void ixl_vsi_reset_stats(struct ixl_vsi *);
#endif /* _IXL_PF_H_ */

1925
sys/dev/ixl/ixl_pf_iov.c Normal file

File diff suppressed because it is too large Load Diff

62
sys/dev/ixl/ixl_pf_iov.h Normal file
View File

@ -0,0 +1,62 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXL_PF_IOV_H_
#define _IXL_PF_IOV_H_
#include "ixl_pf.h"
#include <sys/nv.h>
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
/* Public functions */
/*
* These three are DEVMETHODs required for SR-IOV PF support.
*/
int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
void ixl_iov_uninit(device_t dev);
int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
/*
* The standard PF driver needs to call these during normal execution when
* SR-IOV mode is active.
*/
void ixl_initialize_sriov(struct ixl_pf *pf);
void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event);
void ixl_handle_vflr(void *arg, int pending);
#endif /* _IXL_PF_IOV_H_ */

5556
sys/dev/ixl/ixl_pf_main.c Normal file

File diff suppressed because it is too large Load Diff

308
sys/dev/ixl/ixl_pf_qmgr.c Normal file
View File

@ -0,0 +1,308 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#include "ixl_pf_qmgr.h"
static int ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num);
int
ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues)
{
if (num_queues < 1)
return (EINVAL);
qmgr->num_queues = num_queues;
qmgr->qinfo = malloc(num_queues * sizeof(struct ixl_pf_qmgr_qinfo),
M_IXL, M_ZERO | M_WAITOK);
if (qmgr->qinfo == NULL)
return ENOMEM;
return (0);
}
int
ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag)
{
int i;
int avail;
int block_start;
u16 alloc_size;
if (qtag == NULL || num < 1)
return (EINVAL);
/* We have to allocate in power-of-two chunks, so get next power of two */
alloc_size = (u16)next_power_of_two(num);
/* Don't try if there aren't enough queues */
avail = ixl_pf_qmgr_get_num_free(qmgr);
if (avail < alloc_size)
return (ENOSPC);
block_start = ixl_pf_qmgr_find_free_contiguous_block(qmgr, alloc_size);
if (block_start < 0)
return (ENOSPC);
/* Mark queues as allocated */
for (i = block_start; i < block_start + alloc_size; i++)
qmgr->qinfo[i].allocated = true;
bzero(qtag, sizeof(*qtag));
qtag->qmgr = qmgr;
qtag->type = IXL_PF_QALLOC_CONTIGUOUS;
qtag->qidx[0] = block_start;
qtag->num_allocated = num;
qtag->num_active = alloc_size;
return (0);
}
/*
* NB: indices is u16 because this is the queue index width used in the Add VSI AQ command
*/
int
ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag)
{
int i;
int avail, count = 0;
u16 alloc_size;
if (qtag == NULL || num < 1 || num > 16)
return (EINVAL);
/* We have to allocate in power-of-two chunks, so get next power of two */
alloc_size = (u16)next_power_of_two(num);
avail = ixl_pf_qmgr_get_num_free(qmgr);
if (avail < alloc_size)
return (ENOSPC);
bzero(qtag, sizeof(*qtag));
qtag->qmgr = qmgr;
qtag->type = IXL_PF_QALLOC_SCATTERED;
qtag->num_active = num;
qtag->num_allocated = alloc_size;
for (i = 0; i < qmgr->num_queues; i++) {
if (!qmgr->qinfo[i].allocated) {
qtag->qidx[count] = i;
count++;
qmgr->qinfo[i].allocated = true;
if (count == alloc_size)
return (0);
}
}
// Shouldn't get here
return (EDOOFUS);
}
int
ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag)
{
u16 i, qidx;
if (qtag == NULL)
return (EINVAL);
if (qtag->type == IXL_PF_QALLOC_SCATTERED) {
for (i = 0; i < qtag->num_allocated; i++) {
qidx = qtag->qidx[i];
bzero(&qmgr->qinfo[qidx], sizeof(qmgr->qinfo[qidx]));
}
} else {
u16 first_index = qtag->qidx[0];
for (i = first_index; i < first_index + qtag->num_allocated; i++)
bzero(&qmgr->qinfo[i], sizeof(qmgr->qinfo[qidx]));
}
qtag->qmgr = NULL;
return (0);
}
int
ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr)
{
return (qmgr->num_queues);
}
/*
* ERJ: This assumes the info array isn't longer than INT_MAX.
* This assumption might cause a y3k bug or something, I'm sure.
*/
int
ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr)
{
int count = 0;
for (int i = 0; i < qmgr->num_queues; i++) {
if (!qmgr->qinfo[i].allocated)
count++;
}
return (count);
}
int
ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start)
{
int i;
if (start > qmgr->num_queues - 1)
return (-EINVAL);
for (i = start; i < qmgr->num_queues; i++) {
if (qmgr->qinfo[i].allocated)
continue;
else
return (i);
}
// No free queues
return (-ENOSPC);
}
void
ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr)
{
free(qmgr->qinfo, M_IXL);
qmgr->qinfo = NULL;
}
void
ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
{
MPASS(qtag != NULL);
struct ixl_pf_qmgr *qmgr = qtag->qmgr;
u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
if (tx)
qmgr->qinfo[pf_qidx].tx_enabled = true;
else
qmgr->qinfo[pf_qidx].rx_enabled = true;
}
void
ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
{
MPASS(qtag != NULL);
struct ixl_pf_qmgr *qmgr = qtag->qmgr;
u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
if (tx)
qmgr->qinfo[pf_qidx].tx_enabled = false;
else
qmgr->qinfo[pf_qidx].rx_enabled = false;
}
void
ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
{
MPASS(qtag != NULL);
struct ixl_pf_qmgr *qmgr = qtag->qmgr;
u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
if (tx)
qmgr->qinfo[pf_qidx].tx_configured = true;
else
qmgr->qinfo[pf_qidx].rx_configured = true;
}
bool
ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
{
MPASS(qtag != NULL);
struct ixl_pf_qmgr *qmgr = qtag->qmgr;
u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
if (tx)
return (qmgr->qinfo[pf_qidx].tx_enabled);
else
return (qmgr->qinfo[pf_qidx].rx_enabled);
}
bool
ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
{
MPASS(qtag != NULL);
struct ixl_pf_qmgr *qmgr = qtag->qmgr;
u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
if (tx)
return (qmgr->qinfo[pf_qidx].tx_configured);
else
return (qmgr->qinfo[pf_qidx].rx_configured);
}
u16
ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index)
{
MPASS(index < qtag->num_allocated);
if (qtag->type == IXL_PF_QALLOC_CONTIGUOUS)
return qtag->qidx[0] + index;
else
return qtag->qidx[index];
}
/* Static Functions */
static int
ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num)
{
int i;
int count = 0;
bool block_started = false;
int possible_start;
for (i = 0; i < qmgr->num_queues; i++) {
if (!qmgr->qinfo[i].allocated) {
if (!block_started) {
block_started = true;
possible_start = i;
}
count++;
if (count == num)
return (possible_start);
} else { /* this queue is already allocated */
block_started = false;
count = 0;
}
}
/* Can't find a contiguous block of the requested size */
return (-1);
}

109
sys/dev/ixl/ixl_pf_qmgr.h Normal file
View File

@ -0,0 +1,109 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#include "ixl_pf.h"
#ifndef _IXL_PF_QMGR_H_
#define _IXL_PF_QMGR_H_
/*
* Primarily manages the queues that need to be allocated to VSIs.
*
* Cardinality: There should only be one of these in a PF.
* Lifetime: Created and initialized in attach(); destroyed in detach().
*/
#define IXL_MAX_SCATTERED_QUEUES 16
#define IXL_MAX_CONTIGUOUS_QUEUES_XL710 64
#define IXL_MAX_CONTIGUOUS_QUEUES_X722 128
/* Structures */
/* Manager */
struct ixl_pf_qmgr_qinfo {
bool allocated;
bool tx_enabled;
bool rx_enabled;
bool tx_configured;
bool rx_configured;
};
struct ixl_pf_qmgr {
u16 num_queues;
struct ixl_pf_qmgr_qinfo *qinfo;
};
/* Tag */
enum ixl_pf_qmgr_qalloc_type {
IXL_PF_QALLOC_CONTIGUOUS,
IXL_PF_QALLOC_SCATTERED
};
struct ixl_pf_qtag {
struct ixl_pf_qmgr *qmgr;
enum ixl_pf_qmgr_qalloc_type type;
u16 qidx[IXL_MAX_SCATTERED_QUEUES];
u16 num_allocated;
u16 num_active;
};
/* Public manager functions */
int ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues);
void ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr);
int ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr);
int ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start);
int ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr);
/* Allocate queues for a VF VSI */
int ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag);
/* Allocate queues for the LAN VSIs, or X722 VF VSIs */
int ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag);
/* Release a queue allocation */
int ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag);
/* Help manage queues used in VFs */
/* Typically hardware refers to RX as 0 and TX as 1, so continue that convention here */
void ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
void ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
void ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
bool ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
bool ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
/* Public tag functions */
u16 ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index);
#endif /* _IXL_PF_QMGR_H_ */

View File

@ -35,7 +35,7 @@
/*
** IXL driver TX/RX Routines:
** This was seperated to allow usage by
** both the BASE and the VF drivers.
** both the PF and VF drivers.
*/
#ifndef IXL_STANDALONE_BUILD
@ -58,14 +58,36 @@ static int ixl_tx_setup_offload(struct ixl_queue *,
struct mbuf *, u32 *, u32 *);
static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
static __inline void ixl_rx_discard(struct rx_ring *, int);
static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
static inline void ixl_rx_discard(struct rx_ring *, int);
static inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
struct mbuf *, u8);
static inline bool ixl_tso_detect_sparse(struct mbuf *mp);
static int ixl_tx_setup_offload(struct ixl_queue *que,
struct mbuf *mp, u32 *cmd, u32 *off);
static inline u32 ixl_get_tx_head(struct ixl_queue *que);
#ifdef DEV_NETMAP
#include <dev/netmap/if_ixl_netmap.h>
#endif /* DEV_NETMAP */
/*
* @key key is saved into this parameter
*/
void
ixl_get_default_rss_key(u32 *key)
{
MPASS(key != NULL);
u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
0x183cfd8c, 0xce880440, 0x580cbc3c,
0x35897377, 0x328b25e1, 0x4fa98922,
0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
0x0, 0x0, 0x0};
bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
}
/*
** Multiqueue Transmit driver
*/
@ -98,13 +120,6 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
i = m->m_pkthdr.flowid % vsi->num_queues;
} else
i = curcpu % vsi->num_queues;
/*
** This may not be perfect, but until something
** better comes along it will keep from scheduling
** on stalled queues.
*/
if (((1 << i) & vsi->active_queues) == 0)
i = ffsl(vsi->active_queues);
que = &vsi->queues[i];
txr = &que->txr;
@ -239,7 +254,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
struct ixl_tx_buf *buf;
struct i40e_tx_desc *txd = NULL;
struct mbuf *m_head, *m;
int i, j, error, nsegs, maxsegs;
int i, j, error, nsegs;
int first, last = 0;
u16 vtag = 0;
u32 cmd, off;
@ -259,12 +274,10 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
buf = &txr->buffers[first];
map = buf->map;
tag = txr->tx_tag;
maxsegs = IXL_MAX_TX_SEGS;
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
/* Use larger mapping for TSO */
tag = txr->tso_tag;
maxsegs = IXL_MAX_TSO_SEGS;
if (ixl_tso_detect_sparse(m_head)) {
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
@ -299,19 +312,19 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == ENOMEM) {
que->tx_dma_setup++;
que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
que->tx_dma_setup++;
que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
}
} else if (error == ENOMEM) {
que->tx_dma_setup++;
que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
que->tx_dma_setup++;
que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
@ -804,6 +817,7 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
type = I40E_TX_DESC_DTYPE_CONTEXT;
cmd = I40E_TX_CTX_DESC_TSO;
/* ERJ: this must not be less than 64 */
mss = mp->m_pkthdr.tso_segsz;
type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
@ -1374,7 +1388,7 @@ ixl_free_que_rx(struct ixl_queue *que)
return;
}
static __inline void
static inline void
ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
{
@ -1405,7 +1419,7 @@ ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
}
static __inline void
static inline void
ixl_rx_discard(struct rx_ring *rxr, int i)
{
struct ixl_rx_buf *rbuf;
@ -1532,7 +1546,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
u32 rsc, status, error;
u32 status, error;
u16 hlen, plen, vtag;
u64 qword;
u8 ptype;
@ -1565,7 +1579,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
count--;
sendmp = NULL;
nbuf = NULL;
rsc = 0;
cur->wb.qword1.status_error_len = 0;
rbuf = &rxr->buffers[i];
mh = rbuf->m_head;
@ -1673,10 +1686,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
sendmp = mp;
sendmp->m_flags |= M_PKTHDR;
sendmp->m_pkthdr.len = mp->m_len;
if (vtag) {
sendmp->m_pkthdr.ether_vtag = vtag;
sendmp->m_flags |= M_VLANTAG;
}
}
/* Pass the head pointer on */
if (eop == 0) {
@ -1695,6 +1704,11 @@ ixl_rxeof(struct ixl_queue *que, int count)
/* capture data for dynamic ITR adjustment */
rxr->packets++;
rxr->bytes += sendmp->m_pkthdr.len;
/* Set VLAN tag (field only valid in eop desc) */
if (vtag) {
sendmp->m_pkthdr.ether_vtag = vtag;
sendmp->m_flags |= M_VLANTAG;
}
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
ixl_rx_checksum(sendmp, status, error, ptype);
#ifdef RSS

View File

@ -44,7 +44,7 @@
#define IXLV_AQ_TIMEOUT (1 * hz)
#define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */
#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1)
#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0)
#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
@ -55,6 +55,10 @@
#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
#define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11)
#define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12)
#define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13)
#define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
/* printf %b arg */
#define IXLV_FLAGS \
@ -62,9 +66,17 @@
"\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS"
/* Hack for compatibility with 1.0.x linux pf driver */
#define I40E_VIRTCHNL_OP_EVENT 17
#define IXLV_PRINTF_VF_OFFLOAD_FLAGS \
"\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \
"\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \
"\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \
"\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \
"\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \
"\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \
"\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \
"\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \
"\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \
"\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF"
/* Driver state */
enum ixlv_state_t {
@ -80,9 +92,11 @@ enum ixlv_state_t {
IXLV_INIT_MAPPING,
IXLV_INIT_ENABLE,
IXLV_INIT_COMPLETE,
IXLV_RUNNING,
IXLV_RUNNING,
};
/* Structs */
struct ixlv_mac_filter {
SLIST_ENTRY(ixlv_mac_filter) next;
u8 macaddr[ETHER_ADDR_LEN];
@ -107,6 +121,7 @@ struct ixlv_sc {
struct resource *msix_mem;
enum ixlv_state_t init_state;
int init_in_progress;
/*
* Interrupt resources
@ -154,6 +169,10 @@ struct ixlv_sc {
struct ixl_vc_cmd del_vlan_cmd;
struct ixl_vc_cmd add_multi_cmd;
struct ixl_vc_cmd del_multi_cmd;
struct ixl_vc_cmd config_rss_key_cmd;
struct ixl_vc_cmd get_rss_hena_caps_cmd;
struct ixl_vc_cmd set_rss_hena_cmd;
struct ixl_vc_cmd config_rss_lut_cmd;
/* Virtual comm channel */
struct i40e_virtchnl_vf_resource *vf_res;
@ -209,5 +228,9 @@ void ixlv_del_vlans(struct ixlv_sc *);
void ixlv_update_stats_counters(struct ixlv_sc *,
struct i40e_eth_stats *);
void ixlv_update_link_status(struct ixlv_sc *);
void ixlv_get_default_rss_key(u32 *, bool);
void ixlv_config_rss_key(struct ixlv_sc *);
void ixlv_set_rss_hena(struct ixlv_sc *);
void ixlv_config_rss_lut(struct ixlv_sc *);
#endif /* _IXLV_H_ */

View File

@ -69,8 +69,10 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
valid_len = 0;
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
// TODO: valid length in api v1.0 is 0, v1.1 is 4
/* Valid length in api v1.0 is 0, v1.1 is 4 */
valid_len = 4;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
@ -218,7 +220,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
i40e_status err;
int retries = 0;
event.buf_len = IXL_AQ_BUFSZ;
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
if (!event.msg_buf) {
err = ENOMEM;
@ -230,7 +232,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
goto out_alloc;
/* Initial delay here is necessary */
i40e_msec_delay(100);
i40e_msec_pause(100);
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
continue;
@ -288,7 +290,7 @@ ixlv_send_vf_config_msg(struct ixlv_sc *sc)
u32 caps;
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
@ -331,7 +333,7 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
i40e_msec_delay(10);
i40e_msec_pause(10);
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
DDPRINTF(dev, "Received a response from PF,"
@ -498,7 +500,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << que->me);
vm->vecmap[i].rxitr_idx = 0;
vm->vecmap[i].txitr_idx = 0;
vm->vecmap[i].txitr_idx = 1;
}
/* Misc vector last - this is only for AdminQ messages */
@ -570,13 +572,6 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
// ERJ: Should this be taken out?
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
I40E_SUCCESS);
return;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
@ -640,13 +635,6 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
// ERJ: Take this out?
if (i == 0) { /* Should not happen... */
device_printf(dev, "%s: i == 0?\n", __func__);
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
I40E_SUCCESS);
return;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
@ -842,6 +830,100 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
vsi->eth_stats = *es;
}
void
ixlv_config_rss_key(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_key *rss_key_msg;
int msg_len, key_length;
u8 rss_seed[IXL_RSS_KEY_SIZE];
#ifdef RSS
u32 rss_hash_config;
#endif
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey(&rss_seed);
#else
ixl_get_default_rss_key((u32 *)rss_seed);
#endif
/* Send the fetched key */
key_length = IXL_RSS_KEY_SIZE;
msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rss_key_msg == NULL) {
device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
return;
}
rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
rss_key_msg->key_len = key_length;
bcopy(rss_seed, &rss_key_msg->key[0], key_length);
DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
rss_key_msg->vsi_id, rss_key_msg->key_len);
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
(u8 *)rss_key_msg, msg_len);
free(rss_key_msg, M_DEVBUF);
}
void
ixlv_set_rss_hena(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_hena hena;
hena.hena = IXL_DEFAULT_RSS_HENA;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&hena, sizeof(hena));
}
void
ixlv_config_rss_lut(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_lut *rss_lut_msg;
int msg_len;
u16 lut_length;
u32 lut;
int i, que_id;
lut_length = IXL_RSS_VSI_LUT_SIZE;
msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (rss_lut_msg == NULL) {
device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
return;
}
rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
/* Each LUT entry is a max of 1 byte, so this is easy */
rss_lut_msg->lut_entries = lut_length;
/* Populate the LUT with max no. of queues in round robin fashion */
for (i = 0; i < lut_length; i++) {
#ifdef RSS
/*
* Fetch the RSS bucket id for the given indirection entry.
* Cap it at the number of configured buckets (which is
* num_queues.)
*/
que_id = rss_get_indirection_to_bucket(i);
que_id = que_id % sc->vsi.num_queues;
#else
que_id = i % sc->vsi.num_queues;
#endif
lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
rss_lut_msg->lut[i] = lut;
}
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
(u8 *)rss_lut_msg, msg_len);
free(rss_lut_msg, M_DEVBUF);
}
/*
** ixlv_vc_completion
**
@ -940,7 +1022,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixlv_enable_intr(vsi);
/* And inform the stack we're ready */
vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
/* TODO: Clear a state flag, so we know we're ready to run init again */
}
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
@ -950,7 +1032,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* Turn off all interrupts */
ixlv_disable_intr(vsi);
/* Tell the stack that the interface is no longer active */
vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
@ -961,6 +1043,18 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
v_retval);
break;
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
v_retval);
break;
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
v_retval);
break;
default:
#ifdef IXL_DEBUG
device_printf(dev,
@ -1008,6 +1102,18 @@ ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
case IXLV_FLAG_AQ_ENABLE_QUEUES:
ixlv_enable_queues(sc);
break;
case IXLV_FLAG_AQ_CONFIG_RSS_KEY:
ixlv_config_rss_key(sc);
break;
case IXLV_FLAG_AQ_SET_RSS_HENA:
ixlv_set_rss_hena(sc);
break;
case IXLV_FLAG_AQ_CONFIG_RSS_LUT:
ixlv_config_rss_lut(sc);
break;
}
}

View File

@ -5,16 +5,13 @@
KMOD = if_ixl
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += if_ixl.c ixl_txrx.c i40e_osdep.c
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c i40e_osdep.c
SRCS += ixl_pf_iov.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c
CFLAGS += -DSMP -DIXL_DEBUG_SYSCTL
# Add Flow Director support
# CFLAGS += -DIXL_FDIR
# Debug messages / sysctls
# CFLAGS += -DIXLE_DEBUG
# CFLAGS += -DIXL_DEBUG
.include <bsd.kmod.mk>

6
sys/modules/ixlv/Makefile Executable file → Normal file
View File

@ -10,11 +10,7 @@ SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c
CFLAGS += -DSMP
# Add Flow Director support
# CFLAGS += -DIXL_FDIR
# Debug messages / sysctls
# CFLAGS += -DIXLE_DEBUG
# CFLAGS += -DIXL_DEBUG
.include <bsd.kmod.mk>