ixl(4): Update to 1.7.12-k

Refresh upstream driver before impending conversion to iflib.

Major new features:

- Support for Fortville-based 25G adapters
- Support for I2C reads/writes

(To prevent getting or sending corrupt data, you should set
dev.ixl.0.debug.disable_fw_link_management=1 when using I2C
[this will disable link!], then set it to 0 when done. The driver implements
the SIOCGI2C ioctl, so ifconfig -v works for reading I2C data,
but there are read_i2c and write_i2c sysctls under the .debug sysctl tree
[the latter being useful for upper page support in QSFP+]).

- Addition of an iWARP client interface (so the future iWARP driver for
  X722 devices can communicate with the base driver).
  - Compiling this option in is enabled by default, with "options IXL_IW" in
    GENERIC.

Differential Revision:	https://reviews.freebsd.org/D9227
Reviewed by:	sbruno
MFC after:	2 weeks
Sponsored by:	Intel Corporation
This commit is contained in:
Eric Joyner 2017-02-10 01:04:11 +00:00
parent e628e1b919
commit cb6b8299fd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=313497
31 changed files with 2990 additions and 683 deletions

View File

@ -233,6 +233,7 @@ device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel PRO/10GbE PCIE PF Ethernet
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel XL710 40Gbe PCIE Ethernet
options IXL_IW # Enable iWARP Client Interface in ixl(4)
device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device ti # Alteon Networks Tigon I/II gigabit Ethernet

View File

@ -335,6 +335,7 @@ device ipw # Intel 2100 wireless NICs.
device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs.
device iwn # Intel 4965/1000/5000/6000 wireless NICs.
device ixl # Intel XL710 40Gbe PCIE Ethernet
options IXL_IW # Enable iWARP Client Interface in ixl(4)
device ixlv # Intel XL710 40Gbe VF PCIE Ethernet
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand

View File

@ -256,6 +256,10 @@ dev/ixl/ixl_pf_qmgr.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_iov.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_pf_i2c.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixl_iw.c optional ixl pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/if_ixlv.c optional ixlv pci \
compile-with "${NORMAL_C} -I$S/dev/ixl"
dev/ixl/ixlvc.c optional ixlv pci \

View File

@ -48,6 +48,9 @@ AGP_DEBUG opt_agp.h
ATKBD_DFLT_KEYMAP opt_atkbd.h
# iWARP client interface support in ixl
IXL_IW opt_ixl.h
# -------------------------------
# EOF
# -------------------------------

View File

@ -1020,11 +1020,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",

View File

@ -154,6 +154,7 @@ enum i40e_admin_queue_opc {
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
i40e_aqc_opc_clear_all_wol_filters = 0x025E,
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
@ -535,7 +536,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
#define I40E_AQC_ADDR_VALID_MASK 0x1F0
#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@ -556,6 +558,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
struct i40e_aqc_mac_address_write {
__le16 command_flags;
#define I40E_AQC_MC_MAG_EN 0x0100
#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@ -594,6 +597,7 @@ struct i40e_aqc_set_wol_filter {
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
@ -1757,6 +1761,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@ -1768,11 +1774,20 @@ struct i40e_aq_get_phy_abilities_resp {
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 phy_type_ext;
#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
u8 mod_type_ext;
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
#define I40E_AQ_REQUEST_FEC_KR 0x04
#define I40E_AQ_REQUEST_FEC_RS 0x08
#define I40E_AQ_ENABLE_FEC_AUTO 0x10
#define I40E_AQ_FEC
#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
@ -1796,11 +1811,15 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 eeer;
u8 low_power_ctrl;
u8 phy_type_ext;
#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
u8 reserved[2];
u8 fec_config;
#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
#define I40E_AQ_SET_FEC_AUTO BIT(4)
#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@ -1890,6 +1909,8 @@ struct i40e_aqc_get_link_status {
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
u8 power_desc;

View File

@ -78,7 +78,6 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
case I40E_DEV_ID_X722_VF_HV:
case I40E_DEV_ID_X722_A0_VF:
hw->mac.type = I40E_MAC_X722_VF;
break;
@ -1088,7 +1087,8 @@ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac),
I40E_NONDMA_TO_NONDMA);
return status;
}
@ -1111,7 +1111,8 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@ -1224,6 +1225,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
case I40E_PHY_TYPE_25GBASE_LR:
case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@ -1238,6 +1241,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@ -1245,6 +1249,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@ -1725,10 +1730,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@ -1888,6 +1896,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
@ -1910,12 +1920,13 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = FALSE;
if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = TRUE;
else
hw_link_info->lse_enable = FALSE;
if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
if ((hw->mac.type == I40E_MAC_XL710) &&
(hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
@ -2279,6 +2290,43 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_set_vsi_full_promiscuous
* @hw: pointer to the hw struct
* @seid: VSI number
* @set: set promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
enum i40e_status_code status;
u16 flags = 0;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
if (set)
flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
I40E_AQC_SET_VSI_PROMISC_MULTICAST |
I40E_AQC_SET_VSI_PROMISC_BROADCAST;
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
I40E_AQC_SET_VSI_PROMISC_MULTICAST |
I40E_AQC_SET_VSI_PROMISC_BROADCAST);
cmd->seid = CPU_TO_LE16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_set_vsi_mc_promisc_on_vlan
* @hw: pointer to the hw struct
@ -2347,6 +2395,40 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_set_vsi_bc_promisc_on_vlan
* @hw: pointer to the hw struct
* @seid: vsi number
* @enable: set broadcast promiscuous enable/disable for a given VLAN
* @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
enum i40e_status_code status;
u16 flags = 0;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
cmd->seid = CPU_TO_LE16(seid);
cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
@ -2680,14 +2762,17 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
/* extra checking needed to ensure link info to user is timely */
if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
!(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, FALSE, false,
&abilities, NULL);
if (status)
return status;
memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type));
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
return status;
}
@ -3537,6 +3622,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
if (major_rev > 1) {
p->mng_protocols_over_mctp = logical_id;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Protocols over MCTP = %d\n",
p->mng_protocols_over_mctp);
} else {
p->mng_protocols_over_mctp = 0;
}
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
@ -3765,7 +3858,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
@ -3806,8 +3898,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
hw->num_partitions = num_functions / hw->num_ports;
if (hw->num_ports != 0) {
hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
hw->num_partitions = num_functions / hw->num_ports;
}
/* additional HW specific goodies that might
* someday be HW version specific
@ -4292,11 +4386,15 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
* @udp_port: the UDP port to add
* @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
*
* Note: Firmware expects the udp_port value to be in Little Endian format,
* and this function will call CPU_TO_LE16 to convert from Host byte order to
* Little Endian order.
**/
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
@ -5905,9 +6003,6 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
if (bwd_size > I40E_AQ_LARGE_BUF)
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
desc.datalen = CPU_TO_LE16(bwd_size);
status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
@ -5916,7 +6011,92 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
* i40e_read_phy_register
* i40e_read_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
**/
enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
u32 command = 0;
u16 retry = 1000;
command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
(I40E_MDIO_CLAUSE22_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = I40E_SUCCESS;
break;
}
i40e_usec_delay(10);
retry--;
} while (retry);
if (status) {
i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't write command to external PHY.\n");
} else {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
}
return status;
}
/**
* i40e_write_phy_register_clause22
* @hw: pointer to the HW structure
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes specified PHY register value
**/
enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
u32 command = 0;
u16 retry = 1000;
command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
(I40E_MDIO_CLAUSE22_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = I40E_SUCCESS;
break;
}
i40e_usec_delay(10);
retry--;
} while (retry);
return status;
}
/**
* i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@ -5925,9 +6105,8 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr,
u16 *value)
enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@ -5937,8 +6116,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_ADDRESS) |
(I40E_MDIO_STCODE) |
(I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@ -5960,8 +6139,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_READ) |
(I40E_MDIO_STCODE) |
(I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@ -5991,7 +6170,7 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
}
/**
* i40e_write_phy_register
* i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@ -6000,9 +6179,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
*
* Writes value to specified PHY register
**/
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr,
u16 value)
enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@ -6012,8 +6190,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_ADDRESS) |
(I40E_MDIO_STCODE) |
(I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@ -6037,8 +6215,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_WRITE) |
(I40E_MDIO_STCODE) |
(I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@ -6058,6 +6236,78 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
return status;
}
/**
* i40e_write_phy_register
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
**/
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
status = i40e_write_phy_register_clause22(hw,
reg, phy_addr, value);
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
status = i40e_write_phy_register_clause45(hw,
page, reg, phy_addr, value);
break;
default:
status = I40E_ERR_UNKNOWN_PHY;
break;
}
return status;
}
/**
* i40e_read_phy_register
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
**/
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value);
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
status = i40e_read_phy_register_clause45(hw, page, reg,
phy_addr, value);
break;
default:
status = I40E_ERR_UNKNOWN_PHY;
break;
}
return status;
}
/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
@ -6100,14 +6350,16 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
status = i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
&led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
status = i40e_write_phy_register(hw,
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
@ -6119,20 +6371,18 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
status = i40e_read_phy_register(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
&led_reg);
status = i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
status = i40e_write_phy_register(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
i40e_msec_delay(interval);
@ -6140,8 +6390,9 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
}
restore_config:
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
phy_addr, led_ctl);
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
@ -6172,8 +6423,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
temp_addr, phy_addr, &reg_val);
status = i40e_read_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
temp_addr, phy_addr,
&reg_val);
if (status)
return status;
*val = reg_val;
@ -6206,41 +6459,42 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
phy_addr, &led_reg);
status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_reg);
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
if (status)
return status;
}
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_reg);
status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
status = i40e_write_phy_register(hw,
status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
phy_addr, led_ctl);
status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
return status;
}
@ -6485,10 +6739,13 @@ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
@ -6519,10 +6776,13 @@ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
@ -6569,9 +6829,11 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
buff_len = sizeof(*filter);
cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
}
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
@ -6582,6 +6844,12 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
buff_len = sizeof(*filter);
desc.datalen = CPU_TO_LE16(buff_len);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
@ -6618,3 +6886,24 @@ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
return status;
}
/**
* i40e_aq_clear_all_wol_filters
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
*
* Get information for the reason of a Wake Up event
**/
enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_all_wol_filters);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}

View File

@ -63,7 +63,6 @@
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \

View File

@ -1240,11 +1240,6 @@ enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
if (NULL == hmc_info) {
ret_code = I40E_ERR_BAD_PTR;
DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
goto exit;
}
if (NULL == hmc_info->hmc_obj) {
ret_code = I40E_ERR_BAD_PTR;
DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");

View File

@ -220,14 +220,14 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
i40e_release_nvm(hw);
} else {
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
} else {
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
i40e_release_nvm(hw);
}
return ret_code;
}
@ -886,9 +886,20 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
/* Clear error status on read */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
return I40E_SUCCESS;
}
/* Clear status even it is not read and log */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
i40e_debug(hw, I40E_DEBUG_NVM,
"Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@ -1247,6 +1258,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
if (hw->aq.arq_last_status) {
hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
return;
}
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
@ -1409,7 +1425,8 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
if (hw->nvm_buff.va) {
buff = hw->nvm_buff.va;
memcpy(buff, &bytes[aq_desc_len], aq_data_len);
i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
I40E_NONDMA_TO_NONDMA);
}
}
@ -1482,7 +1499,7 @@ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
__func__, cmd->offset, cmd->offset + len);
buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
memcpy(bytes, buff, len);
i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
bytes += len;
remainder -= len;
@ -1496,7 +1513,7 @@ static enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
__func__, start_byte, start_byte + remainder);
memcpy(bytes, buff, remainder);
i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
}
return I40E_SUCCESS;

View File

@ -189,15 +189,71 @@ void
i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
{
va_list args;
device_t dev;
if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
return;
dev = ((struct i40e_osdep *)hw->back)->dev;
/* Re-implement device_printf() */
device_print_prettyname(dev);
va_start(args, fmt);
device_printf(((struct i40e_osdep *)hw->back)->dev, fmt, args);
vprintf(fmt, args);
va_end(args);
}
const char *
ixl_vc_opcode_str(uint16_t op)
{
switch (op) {
case I40E_VIRTCHNL_OP_VERSION:
return ("VERSION");
case I40E_VIRTCHNL_OP_RESET_VF:
return ("RESET_VF");
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
return ("GET_VF_RESOURCES");
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
return ("CONFIG_TX_QUEUE");
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
return ("CONFIG_RX_QUEUE");
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
return ("CONFIG_VSI_QUEUES");
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
return ("CONFIG_IRQ_MAP");
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
return ("ENABLE_QUEUES");
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
return ("DISABLE_QUEUES");
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
return ("ADD_ETHER_ADDRESS");
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
return ("DEL_ETHER_ADDRESS");
case I40E_VIRTCHNL_OP_ADD_VLAN:
return ("ADD_VLAN");
case I40E_VIRTCHNL_OP_DEL_VLAN:
return ("DEL_VLAN");
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
return ("CONFIG_PROMISCUOUS_MODE");
case I40E_VIRTCHNL_OP_GET_STATS:
return ("GET_STATS");
case I40E_VIRTCHNL_OP_FCOE:
return ("FCOE");
case I40E_VIRTCHNL_OP_EVENT:
return ("EVENT");
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
return ("CONFIG_RSS_KEY");
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
return ("CONFIG_RSS_LUT");
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
return ("GET_RSS_HENA_CAPS");
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
return ("SET_RSS_HENA");
default:
return ("UNKNOWN");
}
}
u16
i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg)
{

View File

@ -151,6 +151,7 @@ struct i40e_osdep {
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
uint32_t flush_reg;
int i2c_intfc_num;
device_t dev;
};
@ -185,6 +186,8 @@ extern void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask,
/* Non-busy-wait that uses kern_yield() */
void i40e_msec_pause(int);
const char * ixl_vc_opcode_str(uint16_t op);
/*
** This hardware supports either 16 or 32 byte rx descriptors;
** the driver only uses the 32 byte kind.

View File

@ -166,12 +166,18 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@ -517,10 +523,20 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
u16 reg, u8 phy_addr, u16 value);
enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);

View File

@ -146,15 +146,22 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
#define I40E_MDIO_STCODE I40E_MASK(0, \
#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
@ -192,7 +199,6 @@ enum i40e_memcpy_type {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
I40E_MAC_X722,
@ -251,6 +257,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@ -317,10 +324,22 @@ struct i40e_phy_info {
#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
/*
* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
* PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
* fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
* a shift is needed to adjust for this with values larger than 31. The
* only affected values are I40E_PHY_TYPE_25GBASE_*.
*/
#define I40E_PHY_TYPE_OFFSET 1
#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@ -330,9 +349,9 @@ enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
};
#define I40E_WOL_SUPPORT_MASK 1
#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
#define I40E_PROXY_SUPPORT_MASK (1 << 2)
#define I40E_WOL_SUPPORT_MASK 0x1
#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
#define I40E_PROXY_SUPPORT_MASK 0x4
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@ -342,6 +361,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
u32 mng_protocols_over_mctp;
#define I40E_MNG_PROTOCOL_PLDM 0x2
#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@ -457,6 +480,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
@ -535,6 +559,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
u16 bus_id;
};
/* Flow control (FC) parameters */
@ -1432,6 +1457,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40

View File

@ -168,6 +168,11 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;

View File

@ -35,6 +35,11 @@
#include "ixl.h"
#include "ixl_pf.h"
#ifdef IXL_IW
#include "ixl_iw.h"
#include "ixl_iw_int.h"
#endif
#ifdef PCI_IOV
#include "ixl_pf_iov.h"
#endif
@ -42,7 +47,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixl_driver_version[] = "1.6.6-k";
char ixl_driver_version[] = "1.7.12-k";
/*********************************************************************
* PCI Device ID Table
@ -70,6 +75,8 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@ -119,9 +126,11 @@ static driver_t ixl_driver = {
devclass_t ixl_devclass;
DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
MODULE_VERSION(ixl, 1);
MODULE_DEPEND(ixl, pci, 1, 1, 1);
MODULE_DEPEND(ixl, ether, 1, 1, 1);
#ifdef DEV_NETMAP
#if defined(DEV_NETMAP) && __FreeBSD_version >= 1100000
MODULE_DEPEND(ixl, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
@ -145,7 +154,7 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
** Number of descriptors per ring:
** - TX and RX are the same size
*/
static int ixl_ring_size = DEFAULT_RING;
static int ixl_ring_size = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixl_ring_size, 0, "Descriptor Ring Size");
@ -206,6 +215,11 @@ TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&ixl_tx_itr, 0, "TX Interrupt Rate");
#ifdef IXL_IW
int ixl_enable_iwarp = 0;
TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp);
#endif
#ifdef DEV_NETMAP
#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
#include <dev/netmap/if_ixl_netmap.h>
@ -296,12 +310,9 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
/* Save tunable information */
pf->enable_msix = ixl_enable_msix;
pf->max_queues = ixl_max_queues;
pf->ringsz = ixl_ring_size;
pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
pf->tx_itr = ixl_tx_itr;
pf->rx_itr = ixl_rx_itr;
pf->dbg_mask = ixl_core_debug_mask;
pf->hw.debug_mask = ixl_shared_debug_mask;
@ -313,8 +324,35 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
device_printf(dev, "ring_size must be between %d and %d, "
"inclusive, and must be a multiple of %d\n",
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
return (EINVAL);
}
device_printf(dev, "Using default value of %d instead\n",
IXL_DEFAULT_RING);
pf->ringsz = IXL_DEFAULT_RING;
} else
pf->ringsz = ixl_ring_size;
if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
device_printf(dev, "Invalid tx_itr value of %d set!\n",
ixl_tx_itr);
device_printf(dev, "tx_itr must be between %d and %d, "
"inclusive\n",
0, IXL_MAX_ITR);
device_printf(dev, "Using default value of %d instead\n",
IXL_ITR_4K);
pf->tx_itr = IXL_ITR_4K;
} else
pf->tx_itr = ixl_tx_itr;
if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) {
device_printf(dev, "Invalid rx_itr value of %d set!\n",
ixl_rx_itr);
device_printf(dev, "rx_itr must be between %d and %d, "
"inclusive\n",
0, IXL_MAX_ITR);
device_printf(dev, "Using default value of %d instead\n",
IXL_ITR_8K);
pf->rx_itr = IXL_ITR_8K;
} else
pf->rx_itr = ixl_rx_itr;
return (0);
}
@ -529,7 +567,7 @@ ixl_attach(device_t dev)
}
/* Get the bus configuration and set the shared code's config */
ixl_get_bus_info(hw, dev);
ixl_get_bus_info(pf);
/*
* In MSI-X mode, initialize the Admin Queue interrupt,
@ -539,20 +577,50 @@ ixl_attach(device_t dev)
if (pf->msix > 1) {
error = ixl_setup_adminq_msix(pf);
if (error) {
device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
error);
goto err_late;
}
error = ixl_setup_adminq_tq(pf);
if (error) {
device_printf(dev, "ixl_setup_adminq_tq error: %d\n",
device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
error);
goto err_late;
}
ixl_configure_intr0_msix(pf);
ixl_enable_adminq(hw);
ixl_enable_intr0(hw);
error = ixl_setup_queue_msix(vsi);
if (error)
device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
error);
error = ixl_setup_queue_tqs(vsi);
if (error)
device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
error);
} else {
error = ixl_setup_legacy(pf);
error = ixl_setup_adminq_tq(pf);
if (error) {
device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
error);
goto err_late;
}
error = ixl_setup_queue_tqs(vsi);
if (error)
device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
error);
}
if (error) {
device_printf(dev, "interrupt setup error: %d\n", error);
}
/* Set initial advertised speed sysctl value */
ixl_get_initial_advertised_speeds(pf);
/* Initialize statistics & add sysctls */
ixl_add_device_sysctls(pf);
@ -573,6 +641,27 @@ ixl_attach(device_t dev)
#ifdef DEV_NETMAP
ixl_netmap_attach(vsi);
#endif /* DEV_NETMAP */
#ifdef IXL_IW
if (hw->func_caps.iwarp && ixl_enable_iwarp) {
pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
if (pf->iw_enabled) {
error = ixl_iw_pf_attach(pf);
if (error) {
device_printf(dev,
"interfacing to iwarp driver failed: %d\n",
error);
goto err_late;
}
} else
device_printf(dev,
"iwarp disabled on this device (no msix vectors)\n");
} else {
pf->iw_enabled = false;
device_printf(dev, "The device is not iWARP enabled\n");
}
#endif
INIT_DEBUGOUT("ixl_attach: end");
return (0);
@ -609,7 +698,7 @@ ixl_detach(device_t dev)
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
enum i40e_status_code status;
#ifdef PCI_IOV
#if defined(PCI_IOV) || defined(IXL_IW)
int error;
#endif
@ -633,18 +722,19 @@ ixl_detach(device_t dev)
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
ixl_stop(pf);
ixl_free_queue_tqs(vsi);
/* Shutdown LAN HMC */
status = i40e_shutdown_lan_hmc(hw);
if (status)
device_printf(dev,
"Shutdown LAN HMC failed with code %d\n", status);
/* Teardown LAN queue resources */
ixl_teardown_queue_msix(vsi);
ixl_free_queue_tqs(vsi);
/* Shutdown admin queue */
ixl_disable_adminq(hw);
ixl_free_adminq_tq(pf);
ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
ixl_free_adminq_tq(pf);
status = i40e_shutdown_adminq(hw);
if (status)
device_printf(dev,
@ -657,6 +747,17 @@ ixl_detach(device_t dev)
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
callout_drain(&pf->timer);
#ifdef IXL_IW
if (ixl_enable_iwarp && pf->iw_enabled) {
error = ixl_iw_pf_detach(pf);
if (error == EBUSY) {
device_printf(dev, "iwarp in use; stop it first.\n");
return (error);
}
}
#endif
#ifdef DEV_NETMAP
netmap_detach(vsi->ifp);
#endif /* DEV_NETMAP */

View File

@ -38,7 +38,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
char ixlv_driver_version[] = "1.4.6-k";
char ixlv_driver_version[] = "1.4.12-k";
/*********************************************************************
* PCI Device ID Table
@ -53,10 +53,8 @@ char ixlv_driver_version[] = "1.4.6-k";
static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@ -90,6 +88,7 @@ static void ixlv_add_multi(struct ixl_vsi *);
static void ixlv_del_multi(struct ixl_vsi *);
static void ixlv_free_queues(struct ixl_vsi *);
static int ixlv_setup_interface(device_t, struct ixlv_sc *);
static int ixlv_teardown_adminq_msix(struct ixlv_sc *);
static int ixlv_media_change(struct ifnet *);
static void ixlv_media_status(struct ifnet *, struct ifmediareq *);
@ -170,7 +169,7 @@ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
** Number of descriptors per ring:
** - TX and RX are the same size
*/
static int ixlv_ringsz = DEFAULT_RING;
static int ixlv_ringsz = IXL_DEFAULT_RING;
TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
&ixlv_ringsz, 0, "Descriptor Ring Size");
@ -485,13 +484,14 @@ ixlv_detach(device_t dev)
{
struct ixlv_sc *sc = device_get_softc(dev);
struct ixl_vsi *vsi = &sc->vsi;
struct i40e_hw *hw = &sc->hw;
enum i40e_status_code status;
INIT_DBG_DEV(dev, "begin");
/* Make sure VLANS are not using driver */
if (vsi->ifp->if_vlantrunk != NULL) {
if_printf(vsi->ifp, "Vlan in use, detach first\n");
INIT_DBG_DEV(dev, "end");
return (EBUSY);
}
@ -512,16 +512,25 @@ ixlv_detach(device_t dev)
/* Drain VC mgr */
callout_drain(&sc->vc_mgr.callout);
i40e_shutdown_adminq(&sc->hw);
ixlv_disable_adminq_irq(hw);
ixlv_teardown_adminq_msix(sc);
/* Drain admin queue taskqueue */
taskqueue_free(sc->tq);
status = i40e_shutdown_adminq(&sc->hw);
if (status != I40E_SUCCESS) {
device_printf(dev,
"i40e_shutdown_adminq() failed with status %s\n",
i40e_stat_str(hw, status));
}
if_free(vsi->ifp);
free(sc->vf_res, M_DEVBUF);
ixlv_free_pci_resources(sc);
ixlv_free_queues(vsi);
mtx_destroy(&sc->mtx);
ixlv_free_filters(sc);
bus_generic_detach(dev);
mtx_destroy(&sc->mtx);
INIT_DBG_DEV(dev, "end");
return (0);
}
@ -963,10 +972,10 @@ ixlv_init(void *arg)
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
&& ++retries < IXLV_AQ_MAX_ERR) {
&& ++retries < IXLV_MAX_INIT_WAIT) {
i40e_msec_pause(25);
}
if (retries >= IXLV_AQ_MAX_ERR) {
if (retries >= IXLV_MAX_INIT_WAIT) {
if_printf(vsi->ifp,
"Init failed to complete in allotted time!\n");
}
@ -1177,7 +1186,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
int rid, want, vectors, queues, available;
int auto_max_queues;
rid = PCIR_BAR(IXL_BAR);
rid = PCIR_BAR(IXL_MSIX_BAR);
sc->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (!sc->msix_mem) {
@ -1263,11 +1272,11 @@ ixlv_init_msix(struct ixlv_sc *sc)
}
/* Next we need to setup the vector for the Admin Queue */
rid = 1; // zero vector + 1
rid = 1; /* zero vector + 1 */
sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
&rid, RF_SHAREABLE | RF_ACTIVE);
if (sc->res == NULL) {
device_printf(dev,"Unable to allocate"
device_printf(dev, "Unable to allocate"
" bus resource: AQ interrupt \n");
goto fail;
}
@ -1366,21 +1375,11 @@ ixlv_free_pci_resources(struct ixlv_sc *sc)
}
early:
/* Clean the AdminQ interrupt */
if (sc->tag != NULL) {
bus_teardown_intr(dev, sc->res, sc->tag);
sc->tag = NULL;
}
if (sc->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
sc->res = NULL;
}
pci_release_msi(dev);
if (sc->msix_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(IXL_BAR), sc->msix_mem);
PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem);
if (sc->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
@ -1650,8 +1649,6 @@ ixlv_setup_queues(struct ixlv_sc *sc)
que->num_desc = ixlv_ringsz;
que->me = i;
que->vsi = vsi;
/* mark the queue as active */
vsi->active_queues |= (u64)1 << que->me;
txr = &que->txr;
txr->que = que;
@ -1854,6 +1851,35 @@ ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
return (f);
}
static int
ixlv_teardown_adminq_msix(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
int error = 0;
if (sc->tag != NULL) {
bus_teardown_intr(dev, sc->res, sc->tag);
if (error) {
device_printf(dev, "bus_teardown_intr() for"
" interrupt 0 failed\n");
// return (ENXIO);
}
sc->tag = NULL;
}
if (sc->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
if (error) {
device_printf(dev, "bus_release_resource() for"
" interrupt 0 failed\n");
// return (ENXIO);
}
sc->res = NULL;
}
return (0);
}
/*
** Admin Queue interrupt handler
*/
@ -2024,7 +2050,7 @@ ixlv_set_queue_rx_itr(struct ixl_queue *que)
/* do an exponential smoothing */
rx_itr = (10 * rx_itr * rxr->itr) /
((9 * rx_itr) + rxr->itr);
rxr->itr = rx_itr & IXL_MAX_ITR;
rxr->itr = min(rx_itr, IXL_MAX_ITR);
wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
que->me), rxr->itr);
}
@ -2097,7 +2123,7 @@ ixlv_set_queue_tx_itr(struct ixl_queue *que)
/* do an exponential smoothing */
tx_itr = (10 * tx_itr * txr->itr) /
((9 * tx_itr) + txr->itr);
txr->itr = tx_itr & IXL_MAX_ITR;
txr->itr = min(tx_itr, IXL_MAX_ITR);
wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
que->me), txr->itr);
}
@ -2414,8 +2440,10 @@ ixlv_local_timer(void *arg)
struct ixl_vsi *vsi = &sc->vsi;
struct ixl_queue *que = vsi->queues;
device_t dev = sc->dev;
struct tx_ring *txr;
int hung = 0;
u32 mask, val;
s32 timer, new_timer;
IXLV_CORE_LOCK_ASSERT(sc);
@ -2445,41 +2473,40 @@ ixlv_local_timer(void *arg)
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++,que++) {
/* Any queues with outstanding work get a sw irq */
if (que->busy)
wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
/*
** Each time txeof runs without cleaning, but there
** are uncleaned descriptors it increments busy. If
** we get to 5 we declare it hung.
*/
if (que->busy == IXL_QUEUE_HUNG) {
++hung;
/* Mark the queue as inactive */
vsi->active_queues &= ~((u64)1 << que->me);
continue;
} else {
/* Check if we've come back from hung */
if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
vsi->active_queues |= ((u64)1 << que->me);
}
if (que->busy >= IXL_MAX_TX_BUSY) {
device_printf(dev,"Warning queue %d "
"appears to be hung!\n", i);
que->busy = IXL_QUEUE_HUNG;
++hung;
for (int i = 0; i < vsi->num_queues; i++, que++) {
txr = &que->txr;
timer = atomic_load_acq_32(&txr->watchdog_timer);
if (timer > 0) {
new_timer = timer - hz;
if (new_timer <= 0) {
atomic_store_rel_32(&txr->watchdog_timer, -1);
device_printf(dev, "WARNING: queue %d "
"appears to be hung!\n", que->me);
++hung;
} else {
/*
* If this fails, that means something in the TX path has updated
* the watchdog, so it means the TX path is still working and
* the watchdog doesn't need to countdown.
*/
atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer);
/* Any queues with outstanding work get a sw irq */
wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
}
}
}
/* Only reset when all queues show hung */
if (hung == vsi->num_queues)
/* Reset when a queue shows hung */
if (hung)
goto hung;
callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
return;
hung:
device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
device_printf(dev, "WARNING: Resetting!\n");
sc->init_state = IXLV_RESET_REQUIRED;
sc->watchdog_events++;
ixlv_stop(sc);
ixlv_init_locked(sc);
}
@ -2634,7 +2661,7 @@ ixlv_config_rss_reg(struct ixlv_sc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
set_hena = IXL_DEFAULT_RSS_HENA;
set_hena = IXL_DEFAULT_RSS_HENA_XL710;
#endif
hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@ -2801,6 +2828,7 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
u16 result = 0;
u32 reg, oldreg;
i40e_status ret;
bool aq_error = false;
IXLV_CORE_LOCK_ASSERT(sc);
@ -2823,14 +2851,17 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
device_printf(dev, "ARQ VF Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
aq_error = true;
}
if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
device_printf(dev, "ARQ Overflow Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
aq_error = true;
}
if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
device_printf(dev, "ARQ Critical Error detected\n");
reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
aq_error = true;
}
if (oldreg != reg)
wr32(hw, hw->aq.arq.len, reg);
@ -2839,18 +2870,28 @@ ixlv_do_adminq_locked(struct ixlv_sc *sc)
if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
device_printf(dev, "ASQ VF Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
aq_error = true;
}
if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
device_printf(dev, "ASQ Overflow Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
aq_error = true;
}
if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
device_printf(dev, "ASQ Critical Error detected\n");
reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
aq_error = true;
}
if (oldreg != reg)
wr32(hw, hw->aq.asq.len, reg);
if (aq_error) {
/* Need to reset adapter */
device_printf(dev, "WARNING: Resetting!\n");
sc->init_state = IXLV_RESET_REQUIRED;
ixlv_stop(sc);
ixlv_init_locked(sc);
}
ixlv_enable_adminq_irq(hw);
}
@ -2977,6 +3018,9 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
sizeof(struct ixl_queue),
ixlv_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
CTLFLAG_RD, &(txr.watchdog_timer), 0,
"Ticks before watchdog event is triggered");
#endif
}
}

View File

@ -39,6 +39,7 @@
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#include "opt_ixl.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -51,6 +52,7 @@
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
#include <sys/syslog.h>
#include <net/if.h>
#include <net/if_var.h>
@ -170,6 +172,7 @@ enum ixl_dbg_mask {
IXL_DBG_IOV_VC = 0x00002000,
IXL_DBG_SWITCH_INFO = 0x00010000,
IXL_DBG_I2C = 0x00020000,
IXL_DBG_ALL = 0xFFFFFFFF
};
@ -184,7 +187,7 @@ enum ixl_dbg_mask {
* Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
* The driver currently always uses 32 byte Rx descriptors.
*/
#define DEFAULT_RING 1024
#define IXL_DEFAULT_RING 1024
#define IXL_MAX_RING 8160
#define IXL_MIN_RING 32
#define IXL_RING_INCREMENT 32
@ -216,7 +219,7 @@ enum ixl_dbg_mask {
#define MAX_MULTICAST_ADDR 128
#define IXL_BAR 3
#define IXL_MSIX_BAR 3
#define IXL_ADM_LIMIT 2
#define IXL_TSO_SIZE 65535
#define IXL_AQ_BUF_SZ ((u32) 4096)
@ -231,6 +234,7 @@ enum ixl_dbg_mask {
#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
#define IXL_MIN_TSO_MSS 64
#define IXL_RSS_KEY_SIZE_REG 13
#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
@ -252,13 +256,15 @@ enum ixl_dbg_mask {
#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
/*
* Interrupt Moderation parameters
* Interrupt Moderation parameters
* Multiply ITR values by 2 for real ITR value
*/
#define IXL_MAX_ITR 0x07FF
#define IXL_MAX_ITR 0x0FF0
#define IXL_ITR_100K 0x0005
#define IXL_ITR_20K 0x0019
#define IXL_ITR_8K 0x003E
#define IXL_ITR_4K 0x007A
#define IXL_ITR_1K 0x01F4
#define IXL_ITR_DYNAMIC 0x8000
#define IXL_LOW_LATENCY 0
#define IXL_AVE_LATENCY 1
@ -311,7 +317,7 @@ enum ixl_dbg_mask {
#define IXL_END_OF_INTR_LNKLST 0x7FF
#define IXL_DEFAULT_RSS_HENA (\
#define IXL_DEFAULT_RSS_HENA_BASE (\
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
@ -324,6 +330,17 @@ enum ixl_dbg_mask {
BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
#define IXL_DEFAULT_RSS_HENA_XL710 IXL_DEFAULT_RSS_HENA_BASE
#define IXL_DEFAULT_RSS_HENA_X722 (\
IXL_DEFAULT_RSS_HENA_BASE | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@ -429,6 +446,7 @@ struct tx_ring {
bus_dma_tag_t tso_tag;
char mtx_name[16];
struct buf_ring *br;
s32 watchdog_timer;
/* Used for Dynamic ITR calculation */
u32 packets;
@ -488,7 +506,6 @@ struct ixl_queue {
struct resource *res;
void *tag;
int num_desc; /* both tx and rx */
int busy;
struct tx_ring txr;
struct rx_ring rxr;
struct task task;
@ -503,6 +520,7 @@ struct ixl_queue {
u64 mbuf_pkt_failed;
u64 tx_dmamap_failed;
u64 dropped_pkts;
u64 mss_too_small;
};
/*
@ -563,7 +581,6 @@ struct ixl_vsi {
u64 hw_filters_add;
/* Misc. */
u64 active_queues;
u64 flags;
struct sysctl_oid *vsi_node;
};

469
sys/dev/ixl/ixl_iw.c Normal file
View File

@ -0,0 +1,469 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#include "ixl.h"
#include "ixl_pf.h"
#include "ixl_iw.h"
#include "ixl_iw_int.h"
#ifdef IXL_IW
#define IXL_IW_VEC_BASE(pf) ((pf)->msix - (pf)->iw_msix)
#define IXL_IW_VEC_COUNT(pf) ((pf)->iw_msix)
#define IXL_IW_VEC_LIMIT(pf) ((pf)->msix)
extern int ixl_enable_iwarp;
static struct ixl_iw_state ixl_iw;
static int ixl_iw_ref_cnt;
static void
ixl_iw_pf_msix_reset(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 reg;
int vec;
for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
}
return;
}
static void
ixl_iw_invoke_op(void *context, int pending)
{
struct ixl_iw_pf_entry *pf_entry = (struct ixl_iw_pf_entry *)context;
struct ixl_iw_pf info;
bool initialize;
int err;
INIT_DEBUGOUT("begin");
mtx_lock(&ixl_iw.mtx);
if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) &&
(pf_entry->state.iw_current == IXL_IW_PF_STATE_OFF))
initialize = true;
else if ((pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_OFF) &&
(pf_entry->state.iw_current == IXL_IW_PF_STATE_ON))
initialize = false;
else {
/* nothing to be done, so finish here */
mtx_unlock(&ixl_iw.mtx);
return;
}
info = pf_entry->pf_info;
mtx_unlock(&ixl_iw.mtx);
if (initialize) {
err = ixl_iw.ops->init(&info);
if (err)
device_printf(pf_entry->pf->dev,
"%s: failed to initialize iwarp (err %d)\n",
__func__, err);
else
pf_entry->state.iw_current = IXL_IW_PF_STATE_ON;
} else {
err = ixl_iw.ops->stop(&info);
if (err)
device_printf(pf_entry->pf->dev,
"%s: failed to stop iwarp (err %d)\n",
__func__, err);
else {
ixl_iw_pf_msix_reset(pf_entry->pf);
pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
}
}
return;
}
static void
ixl_iw_uninit(void)
{
INIT_DEBUGOUT("begin");
mtx_destroy(&ixl_iw.mtx);
return;
}
static void
ixl_iw_init(void)
{
INIT_DEBUGOUT("begin");
LIST_INIT(&ixl_iw.pfs);
mtx_init(&ixl_iw.mtx, "ixl_iw_pfs", NULL, MTX_DEF);
ixl_iw.registered = false;
return;
}
/******************************************************************************
* if_ixl internal API
*****************************************************************************/
int
ixl_iw_pf_init(struct ixl_pf *pf)
{
struct ixl_iw_pf_entry *pf_entry;
struct ixl_iw_pf *pf_info;
int err = 0;
INIT_DEBUGOUT("begin");
mtx_lock(&ixl_iw.mtx);
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->pf == pf)
break;
if (pf_entry == NULL) {
/* attempt to initialize PF not yet attached - sth is wrong */
device_printf(pf->dev, "%s: PF not found\n", __func__);
err = ENOENT;
goto out;
}
pf_info = &pf_entry->pf_info;
pf_info->handle = (void *)pf;
pf_info->ifp = pf->vsi.ifp;
pf_info->dev = pf->dev;
pf_info->pci_mem = pf->pci_mem;
pf_info->pf_id = pf->hw.pf_id;
pf_info->mtu = pf->vsi.ifp->if_mtu;
pf_info->iw_msix.count = IXL_IW_VEC_COUNT(pf);
pf_info->iw_msix.base = IXL_IW_VEC_BASE(pf);
for (int i = 0; i < IXL_IW_MAX_USER_PRIORITY; i++)
pf_info->qs_handle[i] = le16_to_cpu(pf->vsi.info.qs_handle[0]);
pf_entry->state.pf = IXL_IW_PF_STATE_ON;
if (ixl_iw.registered) {
pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
}
out:
mtx_unlock(&ixl_iw.mtx);
return (err);
}
void
ixl_iw_pf_stop(struct ixl_pf *pf)
{
struct ixl_iw_pf_entry *pf_entry;
INIT_DEBUGOUT("begin");
mtx_lock(&ixl_iw.mtx);
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->pf == pf)
break;
if (pf_entry == NULL) {
/* attempt to stop PF which has not been attached - sth is wrong */
device_printf(pf->dev, "%s: PF not found\n", __func__);
goto out;
}
pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
if (ixl_iw.registered)
taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
}
out:
mtx_unlock(&ixl_iw.mtx);
return;
}
int
ixl_iw_pf_attach(struct ixl_pf *pf)
{
struct ixl_iw_pf_entry *pf_entry;
int err = 0;
INIT_DEBUGOUT("begin");
if (ixl_iw_ref_cnt == 0)
ixl_iw_init();
mtx_lock(&ixl_iw.mtx);
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->pf == pf) {
device_printf(pf->dev, "%s: PF already exists\n",
__func__);
err = EEXIST;
goto out;
}
pf_entry = malloc(sizeof(struct ixl_iw_pf_entry),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (pf_entry == NULL) {
device_printf(pf->dev,
"%s: failed to allocate memory to attach new PF\n",
__func__);
err = ENOMEM;
goto out;
}
pf_entry->pf = pf;
pf_entry->state.pf = IXL_IW_PF_STATE_OFF;
pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
pf_entry->state.iw_current = IXL_IW_PF_STATE_OFF;
LIST_INSERT_HEAD(&ixl_iw.pfs, pf_entry, node);
ixl_iw_ref_cnt++;
TASK_INIT(&pf_entry->iw_task, 0, ixl_iw_invoke_op, pf_entry);
out:
mtx_unlock(&ixl_iw.mtx);
return (err);
}
int
ixl_iw_pf_detach(struct ixl_pf *pf)
{
struct ixl_iw_pf_entry *pf_entry;
int err = 0;
INIT_DEBUGOUT("begin");
mtx_lock(&ixl_iw.mtx);
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->pf == pf)
break;
if (pf_entry == NULL) {
/* attempt to stop PF which has not been attached - sth is wrong */
device_printf(pf->dev, "%s: PF not found\n", __func__);
err = ENOENT;
goto out;
}
if (pf_entry->state.pf != IXL_IW_PF_STATE_OFF) {
/* attempt to detach PF which has not yet been stopped - sth is wrong */
device_printf(pf->dev, "%s: failed - PF is still active\n",
__func__);
err = EBUSY;
goto out;
}
LIST_REMOVE(pf_entry, node);
free(pf_entry, M_DEVBUF);
ixl_iw_ref_cnt--;
out:
mtx_unlock(&ixl_iw.mtx);
if (ixl_iw_ref_cnt == 0)
ixl_iw_uninit();
return (err);
}
/******************************************************************************
* API exposed to iw_ixl module
*****************************************************************************/
int
ixl_iw_pf_reset(void *pf_handle)
{
struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
INIT_DEBUGOUT("begin");
IXL_PF_LOCK(pf);
ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
return (0);
}
int
ixl_iw_pf_msix_init(void *pf_handle,
struct ixl_iw_msix_mapping *msix_info)
{
struct ixl_pf *pf = (struct ixl_pf *)pf_handle;
struct i40e_hw *hw = &pf->hw;
u32 reg;
int vec, i;
INIT_DEBUGOUT("begin");
if ((msix_info->aeq_vector < IXL_IW_VEC_BASE(pf)) ||
(msix_info->aeq_vector >= IXL_IW_VEC_LIMIT(pf))) {
printf("%s: invalid MSIX vector (%i) for AEQ\n",
__func__, msix_info->aeq_vector);
return (EINVAL);
}
reg = I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
(msix_info->aeq_vector << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
(msix_info->itr_indx << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_AEQCTL, reg);
for (vec = IXL_IW_VEC_BASE(pf); vec < IXL_IW_VEC_LIMIT(pf); vec++) {
for (i = 0; i < msix_info->ceq_cnt; i++)
if (msix_info->ceq_vector[i] == vec)
break;
if (i == msix_info->ceq_cnt) {
/* this vector has no CEQ mapped */
reg = I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK;
wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
} else {
reg = (i & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
(I40E_QUEUE_TYPE_PE_CEQ <<
I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
wr32(hw, I40E_PFINT_LNKLSTN(vec - 1), reg);
reg = I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
(vec << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
(msix_info->itr_indx <<
I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
(IXL_QUEUE_EOL <<
I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_PFINT_CEQCTL(i), reg);
}
}
return (0);
}
int
ixl_iw_register(struct ixl_iw_ops *ops)
{
struct ixl_iw_pf_entry *pf_entry;
int err = 0;
INIT_DEBUGOUT("begin");
if (ixl_enable_iwarp == 0) {
printf("%s: enable_iwarp is off, registering dropped\n",
__func__);
return (EACCES);
}
if ((ops->init == NULL) || (ops->stop == NULL)) {
printf("%s: invalid iwarp driver ops\n", __func__);
return (EINVAL);
}
mtx_lock(&ixl_iw.mtx);
if (ixl_iw.registered) {
printf("%s: iwarp driver already registered\n", __func__);
err = EBUSY;
goto out;
}
ixl_iw.tq = taskqueue_create("ixl_iw", M_NOWAIT,
taskqueue_thread_enqueue, &ixl_iw.tq);
if (ixl_iw.tq == NULL) {
printf("%s: failed to create queue\n", __func__);
err = ENOMEM;
goto out;
}
taskqueue_start_threads(&ixl_iw.tq, 1, PI_NET, "ixl iw");
ixl_iw.ops = malloc(sizeof(struct ixl_iw_ops),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (ixl_iw.ops == NULL) {
printf("%s: failed to allocate memory\n", __func__);
taskqueue_free(ixl_iw.tq);
err = ENOMEM;
goto out;
}
ixl_iw.ops->init = ops->init;
ixl_iw.ops->stop = ops->stop;
ixl_iw.registered = true;
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->state.pf == IXL_IW_PF_STATE_ON) {
pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_ON;
taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
}
out:
mtx_unlock(&ixl_iw.mtx);
return (err);
}
int
ixl_iw_unregister(void)
{
struct ixl_iw_pf_entry *pf_entry;
INIT_DEBUGOUT("begin");
mtx_lock(&ixl_iw.mtx);
if (!ixl_iw.registered) {
printf("%s: failed - iwarp driver has not been registered\n",
__func__);
mtx_unlock(&ixl_iw.mtx);
return (ENOENT);
}
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
if (pf_entry->state.iw_scheduled == IXL_IW_PF_STATE_ON) {
pf_entry->state.iw_scheduled = IXL_IW_PF_STATE_OFF;
taskqueue_enqueue(ixl_iw.tq, &pf_entry->iw_task);
}
ixl_iw.registered = false;
mtx_unlock(&ixl_iw.mtx);
LIST_FOREACH(pf_entry, &ixl_iw.pfs, node)
taskqueue_drain(ixl_iw.tq, &pf_entry->iw_task);
taskqueue_free(ixl_iw.tq);
ixl_iw.tq = NULL;
free(ixl_iw.ops, M_DEVBUF);
ixl_iw.ops = NULL;
return (0);
}
#endif /* IXL_IW */

75
sys/dev/ixl/ixl_iw.h Normal file
View File

@ -0,0 +1,75 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXL_IW_H_
#define _IXL_IW_H_
#define IXL_IW_MAX_USER_PRIORITY 8
struct ixl_iw_msix_mapping {
u8 itr_indx;
int aeq_vector;
int ceq_cnt;
int *ceq_vector;
};
struct ixl_iw_msix {
int base;
int count;
};
struct ixl_iw_pf {
void *handle;
struct ifnet *ifp;
device_t dev;
struct resource *pci_mem;
u8 pf_id;
u16 mtu;
struct ixl_iw_msix iw_msix;
u16 qs_handle[IXL_IW_MAX_USER_PRIORITY];
};
struct ixl_iw_ops {
int (*init)(struct ixl_iw_pf *pf_info);
int (*stop)(struct ixl_iw_pf *pf_info);
};
int ixl_iw_pf_reset(void *pf_handle);
int ixl_iw_pf_msix_init(void *pf_handle,
struct ixl_iw_msix_mapping *msix_info);
int ixl_iw_register(struct ixl_iw_ops *iw_ops);
int ixl_iw_unregister(void);
#endif /* _IXL_IW_H_ */

71
sys/dev/ixl/ixl_iw_int.h Normal file
View File

@ -0,0 +1,71 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXL_IW_INT_H_
#define _IXL_IW_INT_H_
enum ixl_iw_pf_state {
IXL_IW_PF_STATE_OFF,
IXL_IW_PF_STATE_ON
};
struct ixl_iw_pf_entry_state {
enum ixl_iw_pf_state pf;
enum ixl_iw_pf_state iw_scheduled;
enum ixl_iw_pf_state iw_current;
};
struct ixl_iw_pf_entry {
LIST_ENTRY(ixl_iw_pf_entry) node;
struct ixl_pf *pf;
struct ixl_iw_pf_entry_state state;
struct ixl_iw_pf pf_info;
struct task iw_task;
};
LIST_HEAD(ixl_iw_pfs_head, ixl_iw_pf_entry);
struct ixl_iw_state {
struct ixl_iw_ops *ops;
bool registered;
struct ixl_iw_pfs_head pfs;
struct mtx mtx;
struct taskqueue *tq;
};
int ixl_iw_pf_init(struct ixl_pf *pf);
void ixl_iw_pf_stop(struct ixl_pf *pf);
int ixl_iw_pf_attach(struct ixl_pf *pf);
int ixl_iw_pf_detach(struct ixl_pf *pf);
#endif /* _IXL_IW_INT_H_ */

View File

@ -79,8 +79,14 @@ struct ixl_pf {
struct callout timer;
int msix;
#ifdef IXL_IW
int iw_msix;
bool iw_enabled;
#endif
int if_flags;
int state;
bool init_in_progress;
u8 supported_speeds;
struct ixl_pf_qmgr qmgr;
struct ixl_pf_qtag qtag;
@ -107,6 +113,7 @@ struct ixl_pf {
int advertised_speed;
int fc; /* link flow ctrl setting */
enum ixl_dbg_mask dbg_mask;
bool has_i2c;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@ -145,8 +152,10 @@ struct ixl_pf {
"\t 0x2 - advertise 1G\n" \
"\t 0x4 - advertise 10G\n" \
"\t 0x8 - advertise 20G\n" \
"\t0x10 - advertise 40G\n\n" \
"Set to 0 to disable link."
"\t0x10 - advertise 25G\n" \
"\t0x20 - advertise 40G\n\n" \
"Set to 0 to disable link.\n" \
"Use \"sysctl -x\" to view flags properly."
#define IXL_SYSCTL_HELP_FC \
"\nSet flow control mode using the values below.\n" \
@ -171,10 +180,11 @@ static char *ixl_fc_string[6] = {
static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
/*** Functions / Macros ***/
#define I40E_VC_DEBUG(pf, level, ...) \
do { \
if ((pf)->vc_debug_lvl >= (level)) \
device_printf((pf)->dev, __VA_ARGS__); \
/* Adjust the level here to 10 or over to print stats messages */
#define I40E_VC_DEBUG(p, level, ...) \
do { \
if (level < 10) \
ixl_dbg(p, IXL_DBG_IOV_VC, ##__VA_ARGS__); \
} while (0)
#define i40e_send_vf_nack(pf, vf, op, st) \
@ -187,16 +197,25 @@ static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
/* Debug printing */
#define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__)
void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
/* For stats sysctl naming */
#define QUEUE_NAME_LEN 32
/* For netmap(4) compatibility */
#define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi)
/*
* PF-only function declarations
*/
void ixl_set_busmaster(device_t);
void ixl_set_msix_enable(device_t);
int ixl_setup_interface(device_t, struct ixl_vsi *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
void ixl_handle_que(void *context, int pending);
@ -223,13 +242,10 @@ void ixl_media_status(struct ifnet *, struct ifmediareq *);
int ixl_media_change(struct ifnet *);
int ixl_ioctl(struct ifnet *, u_long, caddr_t);
void ixl_enable_adminq(struct i40e_hw *);
void ixl_get_bus_info(struct i40e_hw *, device_t);
void ixl_disable_adminq(struct i40e_hw *);
void ixl_enable_queue(struct i40e_hw *, int);
void ixl_disable_queue(struct i40e_hw *, int);
void ixl_enable_legacy(struct i40e_hw *);
void ixl_disable_legacy(struct i40e_hw *);
void ixl_enable_intr0(struct i40e_hw *);
void ixl_disable_intr0(struct i40e_hw *);
void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf);
void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
u64 *, u64 *);
@ -239,6 +255,7 @@ void ixl_stat_update32(struct i40e_hw *, u32, bool,
void ixl_stop(struct ixl_pf *);
void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
int ixl_get_hw_capabilities(struct ixl_pf *);
void ixl_link_up_msg(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
int ixl_allocate_pci_resources(struct ixl_pf *);
int ixl_setup_stations(struct ixl_pf *);
@ -256,7 +273,7 @@ int ixl_teardown_adminq_msix(struct ixl_pf *);
void ixl_configure_intr0_msix(struct ixl_pf *);
void ixl_configure_queue_intr_msix(struct ixl_pf *);
void ixl_free_adminq_tq(struct ixl_pf *);
int ixl_assign_vsi_legacy(struct ixl_pf *);
int ixl_setup_legacy(struct ixl_pf *);
int ixl_init_msix(struct ixl_pf *);
void ixl_configure_itr(struct ixl_pf *);
void ixl_configure_legacy(struct ixl_pf *);
@ -271,7 +288,9 @@ void ixl_handle_mdd_event(struct ixl_pf *);
void ixl_add_hw_stats(struct ixl_pf *);
void ixl_update_stats_counters(struct ixl_pf *);
void ixl_pf_reset_stats(struct ixl_pf *);
void ixl_dbg(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
void ixl_get_bus_info(struct ixl_pf *pf);
int ixl_aq_get_link_status(struct ixl_pf *,
struct i40e_aqc_get_link_status *);
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
void ixl_handle_empr_reset(struct ixl_pf *);
@ -295,10 +314,9 @@ int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
void ixl_update_eth_stats(struct ixl_vsi *);
void ixl_disable_intr(struct ixl_vsi *);
void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
int ixl_initialize_vsi(struct ixl_vsi *);
void ixl_add_ifmedia(struct ixl_vsi *, u32);
void ixl_add_ifmedia(struct ixl_vsi *, u64);
int ixl_setup_queue_msix(struct ixl_vsi *);
int ixl_setup_queue_tqs(struct ixl_vsi *);
int ixl_teardown_queue_msix(struct ixl_vsi *);
@ -319,4 +337,13 @@ void ixl_free_mac_filters(struct ixl_vsi *vsi);
void ixl_update_vsi_stats(struct ixl_vsi *);
void ixl_vsi_reset_stats(struct ixl_vsi *);
/*
* I2C Function prototypes
*/
int ixl_find_i2c_interface(struct ixl_pf *);
s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
#endif /* _IXL_PF_H_ */

605
sys/dev/ixl/ixl_pf_i2c.c Normal file
View File

@ -0,0 +1,605 @@
/******************************************************************************
Copyright (c) 2013-2015, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#include "ixl_pf.h"
#define IXL_I2C_T_RISE 1
#define IXL_I2C_T_FALL 1
#define IXL_I2C_T_SU_DATA 1
#define IXL_I2C_T_SU_STA 5
#define IXL_I2C_T_SU_STO 4
#define IXL_I2C_T_HD_STA 4
#define IXL_I2C_T_LOW 5
#define IXL_I2C_T_HIGH 4
#define IXL_I2C_T_BUF 5
#define IXL_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXL_I2C_REG(_hw) \
I40E_GLGEN_I2CPARAMS(((struct i40e_osdep *)(_hw)->back)->i2c_intfc_num)
static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data);
static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl);
static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl);
static void ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl);
static s32 ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data);
static s32 ixl_get_i2c_ack(struct ixl_pf *pf);
static s32 ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data);
static s32 ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data);
static s32 ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data);
static void ixl_i2c_bus_clear(struct ixl_pf *pf);
static void ixl_i2c_start(struct ixl_pf *pf);
static void ixl_i2c_stop(struct ixl_pf *pf);
/**
* ixl_i2c_bus_clear - Clears the I2C bus
* @hw: pointer to hardware structure
*
* Clears the I2C bus by sending nine clock pulses.
* Used when data line is stuck low.
**/
static void
ixl_i2c_bus_clear(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
u32 i;
DEBUGFUNC("ixl_i2c_bus_clear");
ixl_i2c_start(pf);
ixl_set_i2c_data(pf, &i2cctl, 1);
for (i = 0; i < 9; i++) {
ixl_raise_i2c_clk(pf, &i2cctl);
/* Min high period of clock is 4us */
i40e_usec_delay(IXL_I2C_T_HIGH);
ixl_lower_i2c_clk(pf, &i2cctl);
/* Min low period of clock is 4.7us*/
i40e_usec_delay(IXL_I2C_T_LOW);
}
ixl_i2c_start(pf);
/* Put the i2c bus back to default state */
ixl_i2c_stop(pf);
}
/**
* ixl_i2c_stop - Sets I2C stop condition
* @hw: pointer to hardware structure
*
* Sets I2C stop condition (Low -> High on SDA while SCL is High)
**/
static void
ixl_i2c_stop(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
DEBUGFUNC("ixl_i2c_stop");
/* Stop condition must begin with data low and clock high */
ixl_set_i2c_data(pf, &i2cctl, 0);
ixl_raise_i2c_clk(pf, &i2cctl);
/* Setup time for stop condition (4us) */
i40e_usec_delay(IXL_I2C_T_SU_STO);
ixl_set_i2c_data(pf, &i2cctl, 1);
/* bus free time between stop and start (4.7us)*/
i40e_usec_delay(IXL_I2C_T_BUF);
}
/**
* ixl_clock_in_i2c_byte - Clocks in one byte via I2C
* @hw: pointer to hardware structure
* @data: data byte to clock in
*
* Clocks in one byte data via I2C data/clock
**/
static s32
ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data)
{
s32 i;
bool bit = 0;
DEBUGFUNC("ixl_clock_in_i2c_byte");
for (i = 7; i >= 0; i--) {
ixl_clock_in_i2c_bit(pf, &bit);
*data |= bit << i;
}
return I40E_SUCCESS;
}
/**
* ixl_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
* @hw: pointer to hardware structure
* @data: read data value
*
* Clocks in one bit via I2C data/clock
**/
static s32
ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data)
{
struct i40e_hw *hw = &pf->hw;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
DEBUGFUNC("ixl_clock_in_i2c_bit");
ixl_raise_i2c_clk(pf, &i2cctl);
/* Minimum high period of clock is 4us */
i40e_usec_delay(IXL_I2C_T_HIGH);
i2cctl = rd32(hw, IXL_I2C_REG(hw));
i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK;
wr32(hw, IXL_I2C_REG(hw), i2cctl);
ixl_flush(hw);
i2cctl = rd32(hw, IXL_I2C_REG(hw));
*data = ixl_get_i2c_data(pf, &i2cctl);
ixl_lower_i2c_clk(pf, &i2cctl);
/* Minimum low period of clock is 4.7 us */
i40e_usec_delay(IXL_I2C_T_LOW);
return I40E_SUCCESS;
}
/**
* ixl_get_i2c_ack - Polls for I2C ACK
* @hw: pointer to hardware structure
*
* Clocks in/out one bit via I2C data/clock
**/
static s32
ixl_get_i2c_ack(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
s32 status = I40E_SUCCESS;
u32 i = 0;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
u32 timeout = 10;
bool ack = 1;
ixl_raise_i2c_clk(pf, &i2cctl);
/* Minimum high period of clock is 4us */
i40e_usec_delay(IXL_I2C_T_HIGH);
i2cctl = rd32(hw, IXL_I2C_REG(hw));
i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK;
wr32(hw, IXL_I2C_REG(hw), i2cctl);
ixl_flush(hw);
/* Poll for ACK. Note that ACK in I2C spec is
* transition from 1 to 0 */
for (i = 0; i < timeout; i++) {
i2cctl = rd32(hw, IXL_I2C_REG(hw));
ack = ixl_get_i2c_data(pf, &i2cctl);
i40e_usec_delay(1);
if (!ack)
break;
}
if (ack) {
ixl_dbg(pf, IXL_DBG_I2C, "I2C ack was not received.\n");
status = I40E_ERR_PHY;
}
ixl_lower_i2c_clk(pf, &i2cctl);
/* Minimum low period of clock is 4.7 us */
i40e_usec_delay(IXL_I2C_T_LOW);
return status;
}
/**
* ixl_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
* @hw: pointer to hardware structure
* @data: data value to write
*
* Clocks out one bit via I2C data/clock
**/
static s32
ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data)
{
struct i40e_hw *hw = &pf->hw;
s32 status;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
status = ixl_set_i2c_data(pf, &i2cctl, data);
if (status == I40E_SUCCESS) {
ixl_raise_i2c_clk(pf, &i2cctl);
/* Minimum high period of clock is 4us */
i40e_usec_delay(IXL_I2C_T_HIGH);
ixl_lower_i2c_clk(pf, &i2cctl);
/* Minimum low period of clock is 4.7 us.
* This also takes care of the data hold time.
*/
i40e_usec_delay(IXL_I2C_T_LOW);
} else {
status = I40E_ERR_PHY;
ixl_dbg(pf, IXL_DBG_I2C, "I2C data was not set to %#x\n", data);
}
return status;
}
/**
* ixl_clock_out_i2c_byte - Clocks out one byte via I2C
* @hw: pointer to hardware structure
* @data: data byte clocked out
*
* Clocks out one byte data via I2C data/clock
**/
static s32
ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data)
{
struct i40e_hw *hw = &pf->hw;
s32 status = I40E_SUCCESS;
s32 i;
u32 i2cctl;
bool bit;
DEBUGFUNC("ixl_clock_out_i2c_byte");
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
status = ixl_clock_out_i2c_bit(pf, bit);
if (status != I40E_SUCCESS)
break;
}
/* Release SDA line (set high) */
i2cctl = rd32(hw, IXL_I2C_REG(hw));
i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK;
i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK);
wr32(hw, IXL_I2C_REG(hw), i2cctl);
ixl_flush(hw);
return status;
}
/**
* ixl_lower_i2c_clk - Lowers the I2C SCL clock
* @hw: pointer to hardware structure
* @i2cctl: Current value of I2CCTL register
*
* Lowers the I2C clock line '1'->'0'
**/
static void
ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl)
{
struct i40e_hw *hw = &pf->hw;
*i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_MASK);
*i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK);
wr32(hw, IXL_I2C_REG(hw), *i2cctl);
ixl_flush(hw);
/* SCL fall time (300ns) */
i40e_usec_delay(IXL_I2C_T_FALL);
}
/**
* ixl_raise_i2c_clk - Raises the I2C SCL clock
* @hw: pointer to hardware structure
* @i2cctl: Current value of I2CCTL register
*
* Raises the I2C clock line '0'->'1'
**/
static void
ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl)
{
struct i40e_hw *hw = &pf->hw;
u32 i = 0;
u32 timeout = IXL_I2C_CLOCK_STRETCHING_TIMEOUT;
u32 i2cctl_r = 0;
for (i = 0; i < timeout; i++) {
*i2cctl |= I40E_GLGEN_I2CPARAMS_CLK_MASK;
*i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK);
wr32(hw, IXL_I2C_REG(hw), *i2cctl);
ixl_flush(hw);
/* SCL rise time (1000ns) */
i40e_usec_delay(IXL_I2C_T_RISE);
i2cctl_r = rd32(hw, IXL_I2C_REG(hw));
if (i2cctl_r & I40E_GLGEN_I2CPARAMS_CLK_IN_MASK)
break;
}
}
/**
* ixl_get_i2c_data - Reads the I2C SDA data bit
* @hw: pointer to hardware structure
* @i2cctl: Current value of I2CCTL register
*
* Returns the I2C data bit value
**/
static bool
ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl)
{
bool data;
if (*i2cctl & I40E_GLGEN_I2CPARAMS_DATA_IN_MASK)
data = 1;
else
data = 0;
return data;
}
/**
* ixl_set_i2c_data - Sets the I2C data bit
* @hw: pointer to hardware structure
* @i2cctl: Current value of I2CCTL register
* @data: I2C data value (0 or 1) to set
*
* Sets the I2C data bit
**/
static s32
ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data)
{
struct i40e_hw *hw = &pf->hw;
s32 status = I40E_SUCCESS;
DEBUGFUNC("ixl_set_i2c_data");
if (data)
*i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK;
else
*i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK);
*i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK);
wr32(hw, IXL_I2C_REG(hw), *i2cctl);
ixl_flush(hw);
/* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
i40e_usec_delay(IXL_I2C_T_RISE + IXL_I2C_T_FALL + IXL_I2C_T_SU_DATA);
/* Verify data was set correctly */
*i2cctl = rd32(hw, IXL_I2C_REG(hw));
if (data != ixl_get_i2c_data(pf, i2cctl)) {
status = I40E_ERR_PHY;
ixl_dbg(pf, IXL_DBG_I2C, "Error - I2C data was not set to %X.\n", data);
}
return status;
}
/**
* ixl_i2c_start - Sets I2C start condition
* Sets I2C start condition (High -> Low on SDA while SCL is High)
**/
static void
ixl_i2c_start(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
DEBUGFUNC("ixl_i2c_start");
/* Start condition must begin with data and clock high */
ixl_set_i2c_data(pf, &i2cctl, 1);
ixl_raise_i2c_clk(pf, &i2cctl);
/* Setup time for start condition (4.7us) */
i40e_usec_delay(IXL_I2C_T_SU_STA);
ixl_set_i2c_data(pf, &i2cctl, 0);
/* Hold time for start condition (4us) */
i40e_usec_delay(IXL_I2C_T_HD_STA);
ixl_lower_i2c_clk(pf, &i2cctl);
/* Minimum low period of clock is 4.7 us */
i40e_usec_delay(IXL_I2C_T_LOW);
}
/**
* ixl_read_i2c_byte - Reads 8 bit word over I2C
**/
s32
ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct i40e_hw *hw = &pf->hw;
u32 max_retry = 10;
u32 retry = 0;
bool nack = 1;
s32 status;
*data = 0;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
wr32(hw, IXL_I2C_REG(hw), i2cctl);
ixl_flush(hw);
do {
ixl_i2c_start(pf);
/* Device Address and write indication */
status = ixl_clock_out_i2c_byte(pf, dev_addr);
if (status != I40E_SUCCESS) {
ixl_dbg(pf, IXL_DBG_I2C, "dev_addr clock out error\n");
goto fail;
}
status = ixl_get_i2c_ack(pf);
if (status != I40E_SUCCESS) {
ixl_dbg(pf, IXL_DBG_I2C, "dev_addr i2c ack error\n");
goto fail;
}
status = ixl_clock_out_i2c_byte(pf, byte_offset);
if (status != I40E_SUCCESS) {
ixl_dbg(pf, IXL_DBG_I2C, "byte_offset clock out error\n");
goto fail;
}
status = ixl_get_i2c_ack(pf);
if (status != I40E_SUCCESS) {
ixl_dbg(pf, IXL_DBG_I2C, "byte_offset i2c ack error\n");
goto fail;
}
ixl_i2c_start(pf);
/* Device Address and read indication */
status = ixl_clock_out_i2c_byte(pf, (dev_addr | 0x1));
if (status != I40E_SUCCESS)
goto fail;
status = ixl_get_i2c_ack(pf);
if (status != I40E_SUCCESS)
goto fail;
status = ixl_clock_in_i2c_byte(pf, data);
if (status != I40E_SUCCESS)
goto fail;
status = ixl_clock_out_i2c_bit(pf, nack);
if (status != I40E_SUCCESS)
goto fail;
ixl_i2c_stop(pf);
status = I40E_SUCCESS;
goto done;
fail:
ixl_i2c_bus_clear(pf);
i40e_msec_delay(100);
retry++;
if (retry < max_retry)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying.\n");
else
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error.\n");
} while (retry < max_retry);
done:
i2cctl = rd32(hw, IXL_I2C_REG(hw));
i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
wr32(hw, IXL_I2C_REG(hw), i2cctl);
ixl_flush(hw);
return status;
}
/**
* ixl_write_i2c_byte - Writes 8 bit word over I2C
**/
s32
ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct i40e_hw *hw = &pf->hw;
s32 status = I40E_SUCCESS;
u32 max_retry = 1;
u32 retry = 0;
u32 i2cctl = rd32(hw, IXL_I2C_REG(hw));
i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
wr32(hw, IXL_I2C_REG(hw), i2cctl);
ixl_flush(hw);
do {
ixl_i2c_start(pf);
status = ixl_clock_out_i2c_byte(pf, dev_addr);
if (status != I40E_SUCCESS)
goto fail;
status = ixl_get_i2c_ack(pf);
if (status != I40E_SUCCESS)
goto fail;
status = ixl_clock_out_i2c_byte(pf, byte_offset);
if (status != I40E_SUCCESS)
goto fail;
status = ixl_get_i2c_ack(pf);
if (status != I40E_SUCCESS)
goto fail;
status = ixl_clock_out_i2c_byte(pf, data);
if (status != I40E_SUCCESS)
goto fail;
status = ixl_get_i2c_ack(pf);
if (status != I40E_SUCCESS)
goto fail;
ixl_i2c_stop(pf);
goto write_byte_out;
fail:
ixl_i2c_bus_clear(pf);
i40e_msec_delay(100);
retry++;
if (retry < max_retry)
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying.\n");
else
ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error.\n");
} while (retry < max_retry);
write_byte_out:
i2cctl = rd32(hw, IXL_I2C_REG(hw));
i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK;
wr32(hw, IXL_I2C_REG(hw), i2cctl);
ixl_flush(hw);
return status;
}

View File

@ -42,7 +42,6 @@ static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
static bool ixl_zero_mac(const uint8_t *addr);
static bool ixl_bcast_mac(const uint8_t *addr);
static const char * ixl_vc_opcode_str(uint16_t op);
static int ixl_vc_opcode_level(uint16_t opcode);
static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
@ -421,58 +420,6 @@ ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
ixl_flush(hw);
}
static const char *
ixl_vc_opcode_str(uint16_t op)
{
switch (op) {
case I40E_VIRTCHNL_OP_VERSION:
return ("VERSION");
case I40E_VIRTCHNL_OP_RESET_VF:
return ("RESET_VF");
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
return ("GET_VF_RESOURCES");
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
return ("CONFIG_TX_QUEUE");
case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
return ("CONFIG_RX_QUEUE");
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
return ("CONFIG_VSI_QUEUES");
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
return ("CONFIG_IRQ_MAP");
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
return ("ENABLE_QUEUES");
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
return ("DISABLE_QUEUES");
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
return ("ADD_ETHER_ADDRESS");
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
return ("DEL_ETHER_ADDRESS");
case I40E_VIRTCHNL_OP_ADD_VLAN:
return ("ADD_VLAN");
case I40E_VIRTCHNL_OP_DEL_VLAN:
return ("DEL_VLAN");
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
return ("CONFIG_PROMISCUOUS_MODE");
case I40E_VIRTCHNL_OP_GET_STATS:
return ("GET_STATS");
case I40E_VIRTCHNL_OP_FCOE:
return ("FCOE");
case I40E_VIRTCHNL_OP_EVENT:
return ("EVENT");
case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
return ("CONFIG_RSS_KEY");
case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
return ("CONFIG_RSS_LUT");
case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
return ("GET_RSS_HENA_CAPS");
case I40E_VIRTCHNL_OP_SET_RSS_HENA:
return ("SET_RSS_HENA");
default:
return ("UNKNOWN");
}
}
static int
ixl_vc_opcode_level(uint16_t opcode)
{
@ -1459,7 +1406,7 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
}
} else {
for (int i = 0; i < (key->key_len / 4); i++)
i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)key->key)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
@ -1514,7 +1461,7 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
}
} else {
for (int i = 0; i < (lut->lut_entries / 4); i++)
i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)lut->lut)[i]);
}
DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
@ -1541,8 +1488,8 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
hena = msg;
/* Set HENA */
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)hena->hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)(hena->hena >> 32));
DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
vf->vf_num, hena->hena);
@ -1768,8 +1715,6 @@ ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
goto fail;
}
ixl_enable_adminq(hw);
pf->num_vfs = num_vfs;
IXL_PF_UNLOCK(pf);
return (0);
@ -1811,11 +1756,6 @@ ixl_iov_uninit(device_t dev)
pf->veb_seid = 0;
}
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
ixl_disable_intr(vsi);
ixl_flush(hw);
}
vfs = pf->vfs;
num_vfs = pf->num_vfs;

View File

@ -42,6 +42,9 @@
#include <sys/iov_schema.h>
#include <dev/pci/pci_iov.h>
#define IXL_GLOBAL_VF_NUM(hw, vf) \
(vf->vf_num + hw->func_caps.vf_base_id)
/* Public functions */
/*

File diff suppressed because it is too large Load Diff

View File

@ -218,22 +218,27 @@ static inline bool
ixl_tso_detect_sparse(struct mbuf *mp)
{
struct mbuf *m;
int num = 0, mss;
bool ret = FALSE;
int num, mss;
num = 0;
mss = mp->m_pkthdr.tso_segsz;
for (m = mp->m_next; m != NULL; m = m->m_next) {
num++;
mss -= m->m_len;
if (mss < 1)
break;
if (m->m_next == NULL)
break;
}
if (num > IXL_SPARSE_CHAIN)
ret = TRUE;
return (ret);
/* Exclude first mbuf; assume it contains all headers */
for (m = mp->m_next; m != NULL; m = m->m_next) {
if (m == NULL)
break;
num++;
mss -= m->m_len % mp->m_pkthdr.tso_segsz;
if (mss < 1) {
if (num > IXL_SPARSE_CHAIN)
return (true);
num = (mss == 0) ? 0 : 1;
mss += mp->m_pkthdr.tso_segsz;
}
}
return (false);
}
@ -312,18 +317,12 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
error = bus_dmamap_load_mbuf_sg(tag, map,
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == ENOMEM) {
que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
if (error != 0) {
que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
}
} else if (error == ENOMEM) {
que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
que->tx_dmamap_failed++;
m_freem(*m_headp);
@ -404,8 +403,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
wr32(hw, txr->tail, i);
/* Mark outstanding work */
if (que->busy == 0)
que->busy = 1;
atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG);
return (0);
xmit_fail:
@ -524,12 +522,14 @@ ixl_init_tx_ring(struct ixl_queue *que)
txr->next_avail = 0;
txr->next_to_clean = 0;
/* Reset watchdog status */
txr->watchdog_timer = 0;
#ifdef IXL_FDIR
/* Initialize flow director */
txr->atr_rate = ixl_atr_rate;
txr->atr_count = 0;
#endif
/* Free any existing tx mbufs. */
buf = txr->buffers;
for (int i = 0; i < que->num_desc; i++, buf++) {
@ -818,7 +818,11 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
type = I40E_TX_DESC_DTYPE_CONTEXT;
cmd = I40E_TX_CTX_DESC_TSO;
/* ERJ: this must not be less than 64 */
/* TSO MSS must not be less than 64 */
if (mp->m_pkthdr.tso_segsz < IXL_MIN_TSO_MSS) {
que->mss_too_small++;
mp->m_pkthdr.tso_segsz = IXL_MIN_TSO_MSS;
}
mss = mp->m_pkthdr.tso_segsz;
type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
@ -878,7 +882,7 @@ ixl_txeof(struct ixl_queue *que)
/* These are not the descriptors you seek, move along :) */
if (txr->avail == que->num_desc) {
que->busy = 0;
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
@ -956,26 +960,11 @@ ixl_txeof(struct ixl_queue *que)
txr->next_to_clean = first;
/*
** Hang detection, we know there's
** work outstanding or the first return
** would have been taken, so indicate an
** unsuccessful pass, in local_timer if
** the value is too great the queue will
** be considered hung. If anything has been
** cleaned then reset the state.
*/
if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
++que->busy;
if (processed)
que->busy = 1; /* Note this turns off HUNG */
/*
* If there are no pending descriptors, clear the timeout.
*/
if (txr->avail == que->num_desc) {
que->busy = 0;
atomic_store_rel_32(&txr->watchdog_timer, 0);
return FALSE;
}
@ -1753,8 +1742,16 @@ ixl_rxeof(struct ixl_queue *que, int count)
/*
* Flush any outstanding LRO work
*/
#if __FreeBSD_version >= 1100105
tcp_lro_flush_all(lro);
#else
struct lro_entry *queued;
while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
SLIST_REMOVE_HEAD(&lro->lro_active, next);
tcp_lro_flush(lro, queued);
}
#endif
#endif /* defined(INET6) || defined(INET) */
IXL_RX_UNLOCK(rxr);
return (FALSE);

View File

@ -38,7 +38,8 @@
#include "ixlv_vc_mgr.h"
#define IXLV_AQ_MAX_ERR 200
#define IXLV_AQ_MAX_ERR 30
#define IXLV_MAX_INIT_WAIT 120
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)

View File

@ -178,8 +178,11 @@ ixlv_send_pf_msg(struct ixlv_sc *sc,
err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
if (err)
device_printf(dev, "Unable to send opcode %d to PF, "
"error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
device_printf(dev, "Unable to send opcode %s to PF, "
"status %s, aq error %s\n",
ixl_vc_opcode_str(op),
i40e_stat_str(hw, err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@ -871,7 +874,7 @@ ixlv_set_rss_hena(struct ixlv_sc *sc)
{
struct i40e_virtchnl_rss_hena hena;
hena.hena = IXL_DEFAULT_RSS_HENA;
hena.hena = IXL_DEFAULT_RSS_HENA_X722;
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
(u8 *)&hena, sizeof(hena));
@ -972,8 +975,8 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* Catch-all error response */
if (v_retval) {
device_printf(dev,
"%s: AQ returned error %d to our request %d!\n",
__func__, v_retval, v_opcode);
"%s: AQ returned error %s to our request %s!\n",
__func__, i40e_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode));
}
#ifdef IXL_DEBUG
@ -1055,8 +1058,8 @@ ixlv_vc_completion(struct ixlv_sc *sc,
default:
#ifdef IXL_DEBUG
device_printf(dev,
"%s: Received unexpected message %d from PF.\n",
__func__, v_opcode);
"%s: Received unexpected message %s from PF.\n",
__func__, ixl_vc_opcode_str(v_opcode));
#endif
break;
}

View File

@ -4,9 +4,9 @@
KMOD = if_ixl
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c i40e_osdep.c
SRCS += ixl_pf_iov.c
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c
SRCS += ixl_pf_iov.c ixl_iw.c
# Shared source
SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c

View File

@ -3,8 +3,8 @@
.PATH: ${.CURDIR}/../../dev/ixl
KMOD = if_ixlv
SRCS = device_if.h bus_if.h pci_if.h pci_iov_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h
SRCS = device_if.h bus_if.h pci_if.h
SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h
SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c
# Shared source