From 61ae650d55553d48c55fbe023706dfa4b97483bb Mon Sep 17 00:00:00 2001 From: Jack F Vogel Date: Fri, 22 Aug 2014 18:59:19 +0000 Subject: [PATCH] Update to the Intel Base driver for the Intel XL710 Ethernet Controller Family - It was decided to change the driver name to if_ixl for FreeBSD - This release adds the VF Driver to the tree, it can be built into the kernel or as the if_ixlv module - The VF driver is independent for the first time, this will be desireable when full SRIOV capability is added to the OS. - Thanks to my new coworker Eric Joyner for his superb work in both the core and vf driver code. Enjoy everyone! Submitted by: jack.vogel@intel.com and eric.joyner@intel.com MFC after: 3 days (hoping to make 10.1) --- sys/conf/files | 36 +- sys/dev/i40e/i40e_adminq_cmd.h | 2180 ------------- sys/dev/{i40e => ixl}/README | 22 +- sys/dev/{i40e => ixl}/i40e_adminq.c | 154 +- sys/dev/{i40e => ixl}/i40e_adminq.h | 5 +- sys/dev/ixl/i40e_adminq_cmd.h | 2180 +++++++++++++ sys/dev/{i40e => ixl}/i40e_alloc.h | 0 sys/dev/{i40e => ixl}/i40e_common.c | 101 +- sys/dev/{i40e => ixl}/i40e_hmc.c | 0 sys/dev/{i40e => ixl}/i40e_hmc.h | 0 sys/dev/{i40e => ixl}/i40e_lan_hmc.c | 0 sys/dev/{i40e => ixl}/i40e_lan_hmc.h | 0 sys/dev/{i40e => ixl}/i40e_nvm.c | 0 sys/dev/{i40e => ixl}/i40e_osdep.c | 2 +- sys/dev/{i40e => ixl}/i40e_osdep.h | 51 +- sys/dev/{i40e => ixl}/i40e_prototype.h | 6 +- sys/dev/{i40e => ixl}/i40e_register.h | 0 .../{i40e => ixl}/i40e_register_x710_int.h | 0 sys/dev/{i40e => ixl}/i40e_status.h | 0 sys/dev/{i40e => ixl}/i40e_type.h | 6 +- sys/dev/{i40e => ixl}/i40e_virtchnl.h | 1 + sys/dev/{i40e/if_i40e.c => ixl/if_ixl.c} | 1585 +++++----- sys/dev/ixl/if_ixlv.c | 2742 +++++++++++++++++ sys/dev/{i40e/i40e.h => ixl/ixl.h} | 280 +- sys/dev/{i40e/i40e_pf.h => ixl/ixl_pf.h} | 24 +- sys/dev/{i40e/i40e_txrx.c => ixl/ixl_txrx.c} | 327 +- sys/dev/ixl/ixlv.h | 205 ++ sys/dev/ixl/ixlvc.c | 976 ++++++ sys/modules/{i40e => ixl}/Makefile | 11 +- sys/modules/ixlv/Makefile | 20 + 30 files changed, 7527 insertions(+), 3387 deletions(-) delete mode 100755 sys/dev/i40e/i40e_adminq_cmd.h rename sys/dev/{i40e => ixl}/README (92%) rename sys/dev/{i40e => ixl}/i40e_adminq.c (87%) rename sys/dev/{i40e => ixl}/i40e_adminq.h (98%) create mode 100755 sys/dev/ixl/i40e_adminq_cmd.h rename sys/dev/{i40e => ixl}/i40e_alloc.h (100%) rename sys/dev/{i40e => ixl}/i40e_common.c (97%) rename sys/dev/{i40e => ixl}/i40e_hmc.c (100%) rename sys/dev/{i40e => ixl}/i40e_hmc.h (100%) rename sys/dev/{i40e => ixl}/i40e_lan_hmc.c (100%) rename sys/dev/{i40e => ixl}/i40e_lan_hmc.h (100%) rename sys/dev/{i40e => ixl}/i40e_nvm.c (100%) rename sys/dev/{i40e => ixl}/i40e_osdep.c (99%) rename sys/dev/{i40e => ixl}/i40e_osdep.h (84%) rename sys/dev/{i40e => ixl}/i40e_prototype.h (99%) rename sys/dev/{i40e => ixl}/i40e_register.h (100%) rename sys/dev/{i40e => ixl}/i40e_register_x710_int.h (100%) rename sys/dev/{i40e => ixl}/i40e_status.h (100%) rename sys/dev/{i40e => ixl}/i40e_type.h (99%) rename sys/dev/{i40e => ixl}/i40e_virtchnl.h (99%) rename sys/dev/{i40e/if_i40e.c => ixl/if_ixl.c} (76%) create mode 100644 sys/dev/ixl/if_ixlv.c rename sys/dev/{i40e/i40e.h => ixl/ixl.h} (64%) mode change 100755 => 100644 rename sys/dev/{i40e/i40e_pf.h => ixl/ixl_pf.h} (84%) mode change 100755 => 100644 rename sys/dev/{i40e/i40e_txrx.c => ixl/ixl_txrx.c} (84%) create mode 100644 sys/dev/ixl/ixlv.h create mode 100644 sys/dev/ixl/ixlvc.c rename sys/modules/{i40e => ixl}/Makefile (58%) create mode 100755 sys/modules/ixlv/Makefile diff --git a/sys/conf/files b/sys/conf/files index 82b55af383b5..ace863316c08 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1424,22 +1424,26 @@ dev/hptiop/hptiop.c optional hptiop scbus dev/hwpmc/hwpmc_logging.c optional hwpmc dev/hwpmc/hwpmc_mod.c optional hwpmc dev/hwpmc/hwpmc_soft.c optional hwpmc -dev/i40e/if_i40e.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" -dev/i40e/i40e_txrx.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" -dev/i40e/i40e_osdep.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" -dev/i40e/i40e_nvm.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" -dev/i40e/i40e_lan_hmc.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" -dev/i40e/i40e_hmc.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" -dev/i40e/i40e_common.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" -dev/i40e/i40e_adminq.c optional i40e inet \ - compile-with "${NORMAL_C} -I$S/dev/i40e -DSMP" +dev/ixl/if_ixl.c optional ixl inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/if_ixlv.c optional ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/ixlvc.c optional ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/ixl_txrx.c optional ixl ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_osdep.c optional ixl ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_lan_hmc.c optional ixl ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_hmc.c optional ixl ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_common.c optional ixl ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_nvm.c optional ixl ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" +dev/ixl/i40e_adminq.c optional ixl ixlv inet \ + compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ichsmb/ichsmb.c optional ichsmb dev/ichsmb/ichsmb_pci.c optional ichsmb pci dev/ida/ida.c optional ida diff --git a/sys/dev/i40e/i40e_adminq_cmd.h b/sys/dev/i40e/i40e_adminq_cmd.h deleted file mode 100755 index 09b5887512d4..000000000000 --- a/sys/dev/i40e/i40e_adminq_cmd.h +++ /dev/null @@ -1,2180 +0,0 @@ -/****************************************************************************** - - Copyright (c) 2013-2014, Intel Corporation - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - -******************************************************************************/ -/*$FreeBSD$*/ - -#ifndef _I40E_ADMINQ_CMD_H_ -#define _I40E_ADMINQ_CMD_H_ - -/* This header file defines the i40e Admin Queue commands and is shared between - * i40e Firmware and Software. - * - * This file needs to comply with the Linux Kernel coding style. - */ - -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0002 - -struct i40e_aq_desc { - __le16 flags; - __le16 opcode; - __le16 datalen; - __le16 retval; - __le32 cookie_high; - __le32 cookie_low; - union { - struct { - __le32 param0; - __le32 param1; - __le32 param2; - __le32 param3; - } internal; - struct { - __le32 param0; - __le32 param1; - __le32 addr_high; - __le32 addr_low; - } external; - u8 raw[16]; - } params; -}; - -/* Flags sub-structure - * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | - * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | - */ - -/* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ - -/* error codes */ -enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ -}; - -/* Admin Queue command opcodes */ -enum i40e_admin_queue_opc { - /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, - - /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, - - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, - - i40e_aqc_opc_set_cppm_configuration = 0x0103, - i40e_aqc_opc_set_arp_proxy_entry = 0x0104, - i40e_aqc_opc_set_ns_proxy_entry = 0x0105, - - /* LAA */ - i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, - - /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, - - /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, - - /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, - - /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, - - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - - /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, - - /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, - - /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, - - /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, - - /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, - - /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_tunnel_key_structure = 0x0B10, - - /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, - - /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, - - /* debug commands */ - i40e_aqc_opc_debug_get_deviceid = 0xFF00, - i40e_aqc_opc_debug_set_mode = 0xFF01, - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, - i40e_aqc_opc_debug_modify_internals = 0xFF09, -}; - -/* command structures and indirect data structures */ - -/* Structure naming conventions: - * - no suffix for direct command descriptor structures - * - _data for indirect sent data - * - _resp for indirect return data (data which is both will use _data) - * - _completion for direct return data - * - _element_ for repeated elements (may also be _data or _resp) - * - * Command structures are expected to overlay the params.raw member of the basic - * descriptor, and as such cannot exceed 16 bytes in length. - */ - -/* This macro is used to generate a compilation error if a structure - * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. - */ -#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ - { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } - -/* This macro is used extensively to ensure that command structures are 16 - * bytes in length as they have to map to the raw array of that size. - */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) - -/* internal (0x00XX) commands */ - -/* Get version (direct 0x0001) */ -struct i40e_aqc_get_version { - __le32 rom_ver; - __le32 fw_build; - __le16 fw_major; - __le16 fw_minor; - __le16 api_major; - __le16 api_minor; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); - -/* Send driver version (indirect 0x0002) */ -struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); - -/* Queue Shutdown (direct 0x0003) */ -struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); - -/* Set PF context (0x0004, direct) */ -struct i40e_aqc_set_pf_context { - u8 pf_id; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); - -/* Request resource ownership (direct 0x0008) - * Release resource ownership (direct 0x0009) - */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - -struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); - -/* Get function capabilities (indirect 0x000A) - * Get device capabilities (indirect 0x000B) - */ -struct i40e_aqc_list_capabilites { - u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 - u8 pf_index; - u8 reserved[2]; - __le32 count; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); - -struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; -}; - -/* list of caps */ - -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 - -/* Set CPPM Configuration (direct 0x0103) */ -struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); - -/* Set ARP Proxy command / response (indirect 0x0104) */ -struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 - __le16 table_id; - __le32 pfpm_proxyfc; - __le32 ip_addr; - u8 mac_addr[6]; -}; - -/* Set NS Proxy Table Entry Command (indirect 0x0105) */ -struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; -}; - -/* Manage LAA Command (0x0106) - obsolete */ -struct i40e_aqc_mng_laa { - __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; -}; - -/* Manage MAC Address Read Command (indirect 0x0107) */ -struct i40e_aqc_mac_address_read { - __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_ADDR_VALID_MASK 0xf0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); - -struct i40e_aqc_mac_address_read_data { - u8 pf_lan_mac[6]; - u8 pf_san_mac[6]; - u8 port_mac[6]; - u8 pf_wol_mac[6]; -}; - -I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); - -/* Manage MAC Address Write Command (0x0108) */ -struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_MASK 0xc000 - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); - -/* PXE commands (0x011x) */ - -/* Clear PXE Command and response (direct 0x0110) */ -struct i40e_aqc_clear_pxe { - u8 rx_cnt; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); - -/* Switch configuration commands (0x02xx) */ - -/* Used by many indirect commands that only pass an seid and a buffer in the - * command - */ -struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); - -/* Get Switch Configuration command (indirect 0x0200) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; -}; - -struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; -}; - -/* Get Switch Configuration (indirect 0x0200) - * an array of elements are returned in the response buffer - * the first in the array is the header, remainder are elements - */ -struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; -}; - -/* Add Statistics (direct 0x0201) - * Remove Statistics (direct 0x0202) - */ -struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); - -/* Set Port Parameters command (direct 0x0203) */ -struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); - -/* Get Switch Resource Allocation (indirect 0x0204) */ -struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); - -/* expect an array of these structs in the response buffer */ -struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; -}; - -/* Add VSI (indirect 0x0210) - * this indirect command uses struct i40e_aqc_vsi_properties_data - * as the indirect buffer (128 bytes) - * - * Update VSI (indirect 0x211) - * uses the same data structure as Add VSI - * - * Get VSI (indirect 0x0212) - * uses the same completion and data structure as Add VSI - */ -struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); - -struct i40e_aqc_add_get_update_vsi_completion { - __le16 seid; - __le16 vsi_number; - __le16 vsi_used; - __le16 vsi_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); - -struct i40e_aqc_vsi_properties_data { - /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 - /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; - /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; - /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; - /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ - /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; - /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) - /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 - u8 queueing_opt_reserved[3]; - /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; - /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ - u8 cmd_reserved[8]; - /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; -#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; -}; - -I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); - -/* Add Port Virtualizer (direct 0x0220) - * also used for update PV (direct 0x0221) but only flags are used - * (IS_CTRL_PORT only works on add PV) - */ -struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); - -struct i40e_aqc_add_update_pv_completion { - /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); - -/* Get PV Params (direct 0x0222) - * uses i40e_aqc_switch_seid for the descriptor - */ - -struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); - -/* Add VEB (direct 0x0230) */ -struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 - u8 enable_tcs; - u8 reserved[9]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); - -struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; - /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); - -/* Get VEB Parameters (direct 0x0232) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); - -/* Delete Element (direct 0x0243) - * uses the generic i40e_aqc_switch_seid - */ - -/* Add MAC-VLAN (indirect 0x0250) */ - -/* used for the command for most vlan commands */ -struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); - -/* indirect data for command and response */ -struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) - /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_macvlan_completion { - __le16 perfect_mac_used; - __le16 perfect_mac_free; - __le16 unicast_hash_free; - __le16 multicast_hash_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); - -/* Remove MAC-VLAN (indirect 0x0251) - * uses i40e_aqc_macvlan for the descriptor - * data points to an array of num_addresses of elements - */ - -struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; - /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; -}; - -/* Add VLAN (indirect 0x0252) - * Remove VLAN (indirect 0x0253) - * use the generic i40e_aqc_macvlan for the command - */ -struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; -}; - -/* Set VSI Promiscuous Modes (direct 0x0254) */ -struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; -/* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); - -/* Add S/E-tag command (direct 0x0255) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); - -struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); - -/* Remove S/E-tag command (direct 0x0256) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; -}; - -/* Add multicast E-Tag (direct 0x0257) - * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields - * and no external data - */ -struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); - -struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; - -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); - -/* Update S/E-Tag (direct 0x0259) */ -struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); - -struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); - -/* Add Control Packet filter (direct 0x025A) - * Remove Control Packet filter (direct 0x025B) - * uses the i40e_aqc_add_oveb_cloud, - * and the generic direct completion structure - */ -struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); - -struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); - -/* Add Cloud filters (indirect 0x025C) - * Remove Cloud filters (indirect 0x025D) - * uses the i40e_aqc_add_remove_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); - -struct i40e_aqc_add_remove_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; - union { - struct { - u8 reserved[12]; - u8 data[4]; - } v4; - struct { - u8 data[16]; - } v6; - } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -/* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 -/* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 -/* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 -/* 0x0007 reserved */ -/* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; - /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; -}; - -struct i40e_aqc_remove_cloud_filters_completion { - __le16 perfect_ovlan_used; - __le16 perfect_ovlan_free; - __le16 vlan_used; - __le16 vlan_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); - -/* Add Mirror Rule (indirect or direct 0x0260) - * Delete Mirror Rule (indirect or direct 0x0261) - * note: some rule types (4,5) do not use an external buffer. - * take care to set the flags correctly. - */ -struct i40e_aqc_add_delete_mirror_rule { - __le16 seid; - __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ - I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 - __le16 num_entries; - __le16 destination; /* VSI for add, rule id for delete */ - __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); - -struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); - -/* DCB 0x03xx*/ - -/* PFC Ignore (direct 0x0301) - * the command and response use the same descriptor structure - */ -struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); - -/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure - * with no parameters - */ - -/* TX scheduler 0x04xx */ - -/* Almost all the indirect commands use - * this generic struct to pass the SEID in param0 - */ -struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); - -/* Several commands respond with a set of queue set handles */ -struct i40e_aqc_qs_handles_resp { - __le16 qs_handles[8]; -}; - -/* Configure VSI BW limits (direct 0x0400) */ -struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); - -/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; -}; - -/* Query vsi bw configuration (indirect 0x0408) */ -struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; -}; - -/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ -struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ -struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); - -/* Enable Physical Port ETS (indirect 0x0413) - * Modify Physical Port ETS (indirect 0x0414) - * Disable Physical Port ETS (indirect 0x0415) - */ -struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; -}; - -/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ -struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -/* Configure Switching Component Bandwidth Allocation per Tc - * (indirect 0x0417) - */ -struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; -}; - -/* Query Switching Component Configuration (indirect 0x0418) */ -struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; -}; - -/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ -struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; -}; - -/* Query Switching Component Bandwidth Allocation per Traffic Type - * (indirect 0x041A) - */ -struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -/* Suspend/resume port TX traffic - * (direct 0x041B and 0x041C) uses the generic SEID struct - */ - -/* Configure partition BW - * (indirect 0x041D) - */ -struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ -}; - -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F - -/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ - -/* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 - -enum i40e_aq_phy_type { - I40E_PHY_TYPE_SGMII = 0x0, - I40E_PHY_TYPE_1000BASE_KX = 0x1, - I40E_PHY_TYPE_10GBASE_KX4 = 0x2, - I40E_PHY_TYPE_10GBASE_KR = 0x3, - I40E_PHY_TYPE_40GBASE_KR4 = 0x4, - I40E_PHY_TYPE_XAUI = 0x5, - I40E_PHY_TYPE_XFI = 0x6, - I40E_PHY_TYPE_SFI = 0x7, - I40E_PHY_TYPE_XLAUI = 0x8, - I40E_PHY_TYPE_XLPPI = 0x9, - I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, - I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, - I40E_PHY_TYPE_10GBASE_AOC = 0xC, - I40E_PHY_TYPE_40GBASE_AOC = 0xD, - I40E_PHY_TYPE_100BASE_TX = 0x11, - I40E_PHY_TYPE_1000BASE_T = 0x12, - I40E_PHY_TYPE_10GBASE_T = 0x13, - I40E_PHY_TYPE_10GBASE_SR = 0x14, - I40E_PHY_TYPE_10GBASE_LR = 0x15, - I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, - I40E_PHY_TYPE_10GBASE_CR1 = 0x17, - I40E_PHY_TYPE_40GBASE_CR4 = 0x18, - I40E_PHY_TYPE_40GBASE_SR4 = 0x19, - I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, - I40E_PHY_TYPE_1000BASE_SX = 0x1B, - I40E_PHY_TYPE_1000BASE_LX = 0x1C, - I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, - I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, - I40E_PHY_TYPE_MAX -}; - -#define I40E_LINK_SPEED_100MB_SHIFT 0x1 -#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 -#define I40E_LINK_SPEED_10GB_SHIFT 0x3 -#define I40E_LINK_SPEED_40GB_SHIFT 0x4 -#define I40E_LINK_SPEED_20GB_SHIFT 0x5 - -enum i40e_aq_link_speed { - I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) -}; - -struct i40e_aqc_module_desc { - u8 oui[3]; - u8 reserved1; - u8 part_number[16]; - u8 revision[4]; - u8 reserved2[8]; -}; - -struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; -}; - -/* Set PHY Config (direct 0x0601) */ -struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; -/* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 -#define I40E_AQ_PHY_ENABLE_AN 0x10 -#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 reserved[3]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); - -/* Set MAC Config command data structure (direct 0x0603) */ -struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); - -/* Restart Auto-Negotiation (direct 0x605) */ -struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); - -/* Get Link Status cmd & response data structure (direct 0x0607) */ -struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 -/* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); - -/* Set event mask command (direct 0x613) */ -struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); - -/* Get Local AN advt register (direct 0x0614) - * Set Local AN advt register (direct 0x0615) - * Get Link Partner AN advt register (direct 0x0616) - */ -struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); - -/* Set Loopback mode (0x0618) */ -struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); - -/* Set PHY Debug command (0x0622) */ -struct i40e_aqc_set_phy_debug { - u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 -#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); - -enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 -}; - -/* NVM Read command (indirect 0x0701) - * NVM Erase commands (direct 0x0702) - * NVM Update commands (indirect 0x0703) - */ -struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); - -/* NVM Config Read (indirect 0x0704) */ -struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; -#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define ANVM_READ_SINGLE_FEATURE 0 -#define ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - u8 reserved[2]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); - -/* NVM Config Write (indirect 0x0705) */ -struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); - -struct i40e_aqc_nvm_config_data_feature { - __le16 feature_id; - __le16 instance_id; - __le16 feature_options; - __le16 feature_selection; -}; - -struct i40e_aqc_nvm_config_data_immediate_field { -#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2 - __le16 field_id; - __le16 instance_id; - __le16 field_options; - __le16 field_value; -}; - -/* Send to PF command (indirect 0x0801) id is only used by PF - * Send to VF command (indirect 0x0802) id is only used by PF - * Send to Peer PF command (indirect 0x0803) - */ -struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); - -/* Alternate structure */ - -/* Direct write (direct 0x0900) - * Direct read (direct 0x0902) - */ -struct i40e_aqc_alternate_write { - __le32 address0; - __le32 data0; - __le32 address1; - __le32 data1; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); - -/* Indirect write (indirect 0x0901) - * Indirect read (indirect 0x0903) - */ - -struct i40e_aqc_alternate_ind_write { - __le32 address; - __le32 length; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); - -/* Done alternate write (direct 0x0904) - * uses i40e_aq_desc - */ -struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); - -/* Set OEM mode (direct 0x0905) */ -struct i40e_aqc_alternate_set_mode { - __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); - -/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ - -/* async events 0x10xx */ - -/* Lan Queue Overflow Event (direct, 0x1001) */ -struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); - -/* Get LLDP MIB (indirect 0x0A00) */ -struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) -/* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); - -/* Configure LLDP MIB Change Event (direct 0x0A01) - * also used for the event (with type in the command field) - */ -struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); - -/* Add LLDP TLV (indirect 0x0A02) - * Delete LLDP TLV (indirect 0x0A04) - */ -struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); - -/* Update LLDP TLV (indirect 0x0A03) */ -struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); - -/* Stop LLDP (direct 0x0A05) */ -struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); - -/* Start LLDP (direct 0x0A06) */ - -struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); - -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data - */ - -/* Add Udp Tunnel command and completion (direct 0x0B00) */ -struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; -#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 -#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 - u8 reserved1[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); - -struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); - -/* remove UDP Tunnel command (0x0B01) */ -struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); - -struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); - -/* tunnel key structure 0x0B10 */ - -struct i40e_aqc_tunnel_key_structure { - u8 key1_off; - u8 key2_off; - u8 key1_len; /* 0 to 15 */ - u8 key2_len; /* 0 to 15 */ - u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); - -/* OEM mode commands (direct 0xFE0x) */ -struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - u8 param_value2[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); - -struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); - -/* debug commands */ - -/* get device id (0xFF00) uses the generic structure */ - -/* set test more (0xFF01, internal) */ - -struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); - -/* Debug Read Register command (0xFF03) - * Debug Write Register command (0xFF04) - */ -struct i40e_aqc_debug_reg_read_write { - __le32 reserved; - __le32 address; - __le32 value_high; - __le32 value_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); - -/* Scatter/gather Reg Read (indirect 0xFF05) - * Scatter/gather Reg Write (indirect 0xFF06) - */ - -/* i40e_aq_desc is used for the command */ -struct i40e_aqc_debug_reg_sg_element_data { - __le32 address; - __le32 value; -}; - -/* Debug Modify register (direct 0xFF07) */ -struct i40e_aqc_debug_modify_reg { - __le32 address; - __le32 value; - __le32 clear_mask; - __le32 set_mask; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); - -/* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - -struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); - -struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); - -#endif diff --git a/sys/dev/i40e/README b/sys/dev/ixl/README similarity index 92% rename from sys/dev/i40e/README rename to sys/dev/ixl/README index e52b09962cbd..066e4e4c9c0f 100644 --- a/sys/dev/i40e/README +++ b/sys/dev/ixl/README @@ -1,7 +1,7 @@ -FreeBSD Base Driver for the Intel® XL710 Ethernet Controller Family -================================================================ +ixl FreeBSD* Base Driver for the Intel® XL710 Ethernet Controller Family /*$FreeBSD$*/ +================================================================ July 21, 2014 @@ -19,7 +19,7 @@ Contents Overview ======== -This file describes the i40e FreeBSD* Base driver for the XL710 Ethernet Family of Adapters. The Driver has been developed for use with FreeBSD 10.0 or later, but should be compatible with any supported release. +This file describes the IXL FreeBSD* Base driver for the XL710 Ethernet Family of Adapters. The Driver has been developed for use with FreeBSD 10.0 or later, but should be compatible with any supported release. For questions related to hardware requirements, refer to the documentation supplied with your Intel XL710 adapter. All hardware requirements listed apply for use with FreeBSD. @@ -60,17 +60,17 @@ NOTE: You must have kernel sources installed to compile the driver module. In the instructions below, x.x.x is the driver version as indicated in thename of the driver tar. -1. Move the base driver tar file to the directory of your choice. For example, use /home/username/i40e or /usr/local/src/i40e. +1. Move the base driver tar file to the directory of your choice. For example, use /home/username/ixl or /usr/local/src/ixl. 2. Untar/unzip the archive: - tar xfz i40e-x.x.x.tar.gz + tar xfz ixl-x.x.x.tar.gz 3. To install man page: - cd i40e-x.x.x - gzip -c i40e.4 > /usr/share/man/man4/i40e.4.gz + cd ixl-x.x.x + gzip -c ixl.4 > /usr/share/man/man4/ixl.4.gz 4. To load the driver onto a running system: - cd i40e-x.x.x/src + cd ixl-x.x.x/src make load 5. To assign an IP address to the interface, enter the following: @@ -82,12 +82,12 @@ as indicated in thename of the driver tar. 7. If you want the driver to load automatically when the system is booted: - cd i40e-x.x.x/src + cd ixl-x.x.x/src make make install Edit /boot/loader.conf, and add the following line: - if_i40e_load="YES" + if_ixl_load="YES" Edit /etc/rc.conf, and create the appropriate ifconfig_ixl entry: @@ -304,7 +304,7 @@ Also, increasing the follwing in /etc/sysctl.conf could help increase network UDP Stress Test Dropped Packet Issue ------------------------------------ - Under small packet UDP stress test with the i40e driver, the FreeBSD system will drop UDP packets due to the fullness of socket buffers. You may want to change the driver's Flow Control variables to the minimum value for controlling packet reception. + Under small packet UDP stress test with the ixl driver, the FreeBSD system will drop UDP packets due to the fullness of socket buffers. You may want to change the driver's Flow Control variables to the minimum value for controlling packet reception. Disable LRO when routing/bridging diff --git a/sys/dev/i40e/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c similarity index 87% rename from sys/dev/i40e/i40e_adminq.c rename to sys/dev/ixl/i40e_adminq.c index cca01a6d0b35..e0f87252e5a8 100755 --- a/sys/dev/i40e/i40e_adminq.c +++ b/sys/dev/ixl/i40e_adminq.c @@ -57,7 +57,7 @@ static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) static void i40e_adminq_init_regs(struct i40e_hw *hw) { /* set head and tail registers in our local struct */ - if (hw->mac.type == I40E_MAC_VF) { + if (i40e_is_vf(hw)) { hw->aq.asq.tail = I40E_VF_ATQT1; hw->aq.asq.head = I40E_VF_ATQH1; hw->aq.asq.len = I40E_VF_ATQLEN1; @@ -68,19 +68,6 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw) hw->aq.arq.len = I40E_VF_ARQLEN1; hw->aq.arq.bal = I40E_VF_ARQBAL1; hw->aq.arq.bah = I40E_VF_ARQBAH1; -#ifdef I40E_QV - } else if (hw->aq_dbg_ena) { - hw->aq.asq.tail = I40E_GL_ATQT; - hw->aq.asq.head = I40E_GL_ATQH; - hw->aq.asq.len = I40E_GL_ATQLEN; - hw->aq.asq.bal = I40E_GL_ATQBAL; - hw->aq.asq.bah = I40E_GL_ATQBAH; - hw->aq.arq.tail = I40E_GL_ARQT; - hw->aq.arq.head = I40E_GL_ARQH; - hw->aq.arq.len = I40E_GL_ARQLEN; - hw->aq.arq.bal = I40E_GL_ARQBAL; - hw->aq.arq.bah = I40E_GL_ARQBAH; -#endif } else { hw->aq.asq.tail = I40E_PF_ATQT; hw->aq.asq.head = I40E_PF_ATQH; @@ -169,10 +156,6 @@ void i40e_free_adminq_arq(struct i40e_hw *hw) **/ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) { -#ifdef I40E_QV - struct i40e_aq_desc qv_desc; - struct i40e_aq_desc *qv_desc_on_ring; -#endif enum i40e_status_code ret_code; struct i40e_aq_desc *desc; struct i40e_dma_mem *bi; @@ -201,13 +184,6 @@ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) /* now configure the descriptors for use */ desc = I40E_ADMINQ_DESC(hw->aq.arq, i); -#ifdef I40E_QV - /* swap the descriptor with userspace version */ - i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc), - I40E_DMA_TO_NONDMA); - qv_desc_on_ring = desc; - desc = &qv_desc; -#endif desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF); if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF) @@ -226,11 +202,6 @@ static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw) CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); desc->params.external.param0 = 0; desc->params.external.param1 = 0; -#ifdef I40E_QV - /* put the initialized descriptor back to the ring */ - i40e_memcpy(qv_desc_on_ring, desc, sizeof(struct i40e_aq_desc), - I40E_NONDMA_TO_DMA); -#endif } alloc_arq_bufs: @@ -521,22 +492,11 @@ enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw) return I40E_ERR_NOT_READY; /* Stop firmware AdminQ processing */ -#ifdef I40E_QV - /* Do not reset registers, as Tools AQ is shared resource for QV */ - if (!hw->aq_dbg_ena) { - wr32(hw, hw->aq.asq.head, 0); - wr32(hw, hw->aq.asq.tail, 0); - wr32(hw, hw->aq.asq.len, 0); - wr32(hw, hw->aq.asq.bal, 0); - wr32(hw, hw->aq.asq.bah, 0); - } -#else wr32(hw, hw->aq.asq.head, 0); wr32(hw, hw->aq.asq.tail, 0); wr32(hw, hw->aq.asq.len, 0); wr32(hw, hw->aq.asq.bal, 0); wr32(hw, hw->aq.asq.bah, 0); -#endif /* make sure spinlock is available */ i40e_acquire_spinlock(&hw->aq.asq_spinlock); @@ -565,22 +525,11 @@ enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw) return I40E_ERR_NOT_READY; /* Stop firmware AdminQ processing */ -#ifdef I40E_QV - /* Do not reset registers, as Tools AQ is shared resource for QV */ - if (!hw->aq_dbg_ena) { - wr32(hw, hw->aq.arq.head, 0); - wr32(hw, hw->aq.arq.tail, 0); - wr32(hw, hw->aq.arq.len, 0); - wr32(hw, hw->aq.arq.bal, 0); - wr32(hw, hw->aq.arq.bah, 0); - } -#else wr32(hw, hw->aq.arq.head, 0); wr32(hw, hw->aq.arq.tail, 0); wr32(hw, hw->aq.arq.len, 0); wr32(hw, hw->aq.arq.bal, 0); wr32(hw, hw->aq.arq.bah, 0); -#endif /* make sure spinlock is available */ i40e_acquire_spinlock(&hw->aq.arq_spinlock); @@ -611,7 +560,6 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) enum i40e_status_code ret_code; u16 eetrack_lo, eetrack_hi; int retry = 0; - /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.num_asq_entries == 0) || @@ -641,7 +589,10 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) if (ret_code != I40E_SUCCESS) goto init_adminq_free_asq; - /* There are some cases where the firmware may not be quite ready + if (i40e_is_vf(hw)) /* VF has no need of firmware */ + goto init_adminq_exit; + +/* There are some cases where the firmware may not be quite ready * for AdminQ operations, so we retry the AdminQ setup a few times * if we see timeouts in this first AQ call. */ @@ -667,19 +618,10 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi); hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo; -#ifdef I40E_QV - if (!hw->qv_force_init) { - if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { - ret_code = I40E_ERR_FIRMWARE_API_VERSION; - goto init_adminq_free_arq; - } - } -#else if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; goto init_adminq_free_arq; } -#endif /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); @@ -714,16 +656,8 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) { enum i40e_status_code ret_code = I40E_SUCCESS; -#ifdef I40E_QV - /* This command is not supported for Tools AQ */ - if (!hw->aq_dbg_ena) { - if (i40e_check_asq_alive(hw)) - i40e_aq_queue_shutdown(hw, TRUE); - } -#else if (i40e_check_asq_alive(hw)) i40e_aq_queue_shutdown(hw, TRUE); -#endif i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); @@ -743,10 +677,6 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) **/ u16 i40e_clean_asq(struct i40e_hw *hw) { -#ifdef I40E_QV - struct i40e_aq_desc qv_desc = {0}; - struct i40e_aq_desc *qv_desc_on_ring; -#endif /* I40E_QV */ struct i40e_adminq_ring *asq = &(hw->aq.asq); struct i40e_asq_cmd_details *details; u16 ntc = asq->next_to_clean; @@ -755,13 +685,6 @@ u16 i40e_clean_asq(struct i40e_hw *hw) desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); -#ifdef I40E_QV - /* copy the descriptor from ring to userspace buffer */ - i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc), - I40E_DMA_TO_NONDMA); - qv_desc_on_ring = desc; - desc = &qv_desc; -#endif /* I40E_QV */ while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "%s: ntc %d head %d.\n", __FUNCTION__, ntc, @@ -776,23 +699,11 @@ u16 i40e_clean_asq(struct i40e_hw *hw) } i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM); i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM); -#ifdef I40E_QV - /* copy the descriptor from userspace buffer to ring */ - i40e_memcpy(qv_desc_on_ring, desc, - sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); -#endif /* I40E_QV */ ntc++; if (ntc == asq->count) ntc = 0; desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); -#ifdef I40E_QV - /* copy the descriptor from ring to userspace buffer */ - i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc), - I40E_DMA_TO_NONDMA); - qv_desc_on_ring = desc; - desc = &qv_desc; -#endif /* I40E_QV */ } asq->next_to_clean = ntc; @@ -833,10 +744,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, u16 buff_size, struct i40e_asq_cmd_details *cmd_details) { -#ifdef I40E_QV - struct i40e_aq_desc qv_desc = {0}; - struct i40e_aq_desc *qv_desc_on_ring; -#endif /* I40E_QV */ enum i40e_status_code status = I40E_SUCCESS; struct i40e_dma_mem *dma_buff = NULL; struct i40e_asq_cmd_details *details; @@ -933,13 +840,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, /* if the desc is available copy the temp desc to the right place */ i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); -#ifdef I40E_QV - /* copy the descriptor from ring to userspace buffer */ - i40e_memcpy(&qv_desc, desc_on_ring, sizeof(struct i40e_aq_desc), - I40E_DMA_TO_NONDMA); - qv_desc_on_ring = desc_on_ring; - desc_on_ring = &qv_desc; -#endif /* I40E_QV */ /* if buff is not NULL assume indirect command */ if (buff != NULL) { @@ -956,11 +856,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa)); desc_on_ring->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa)); -#ifdef I40E_QV - /* copy the descriptor from userspace buffer to ring */ - i40e_memcpy(qv_desc_on_ring, desc_on_ring, - sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); -#endif /* I40E_QV */ } /* bump the tail */ @@ -978,31 +873,21 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, */ if (!details->async && !details->postpone) { u32 total_delay = 0; - u32 delay_len = 10; do { -#ifdef I40E_QV - /* copy the descriptor from ring to user buffer */ - i40e_memcpy(desc_on_ring, qv_desc_on_ring, - sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); -#endif /* I40E_QV */ /* AQ designers suggest use of head for better * timing reliability than DD bit */ if (i40e_asq_done(hw)) break; /* ugh! delay while spin_lock */ - i40e_usec_delay(delay_len); - total_delay += delay_len; + i40e_msec_delay(1); + total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); } /* if ready, copy the desc back to temp */ if (i40e_asq_done(hw)) { -#ifdef I40E_QV - /* Swap pointer back */ - desc_on_ring = qv_desc_on_ring; -#endif /* I40E_QV */ i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); if (buff != NULL) @@ -1079,10 +964,6 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, struct i40e_arq_event_info *e, u16 *pending) { -#ifdef I40E_QV - struct i40e_aq_desc qv_desc = {0}; - struct i40e_aq_desc *qv_desc_on_ring; -#endif /* I40E_QV */ enum i40e_status_code ret_code = I40E_SUCCESS; u16 ntc = hw->aq.arq.next_to_clean; struct i40e_aq_desc *desc; @@ -1099,22 +980,12 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQRX: Queue is empty.\n"); ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; goto clean_arq_element_out; } /* now clean the next descriptor */ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); -#ifdef I40E_QV - /* copy the descriptor from ring to userspace buffer */ - i40e_memcpy(&qv_desc, desc, sizeof(struct i40e_aq_desc), - I40E_DMA_TO_NONDMA); - qv_desc_on_ring = desc; - desc = &qv_desc; -#endif /* I40E_QV */ desc_idx = ntc; flags = LE16_TO_CPU(desc->flags); @@ -1131,11 +1002,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA); datalen = LE16_TO_CPU(desc->datalen); - e->msg_size = min(datalen, e->msg_size); - if (e->msg_buf != NULL && (e->msg_size != 0)) + e->msg_len = min(datalen, e->buf_len); + if (e->msg_buf != NULL && (e->msg_len != 0)) i40e_memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va, - e->msg_size, I40E_DMA_TO_NONDMA); + e->msg_len, I40E_DMA_TO_NONDMA); i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, @@ -1154,11 +1025,6 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, desc->datalen = CPU_TO_LE16((u16)bi->size); desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa)); desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa)); -#ifdef I40E_QV - /* copy the descriptor from userspace buffer to ring */ - i40e_memcpy(qv_desc_on_ring, desc, - sizeof(struct i40e_aq_desc), I40E_NONDMA_TO_DMA); -#endif /* I40E_QV */ /* set tail = the last cleaned desc index. */ wr32(hw, hw->aq.arq.tail, ntc); diff --git a/sys/dev/i40e/i40e_adminq.h b/sys/dev/ixl/i40e_adminq.h similarity index 98% rename from sys/dev/i40e/i40e_adminq.h rename to sys/dev/ixl/i40e_adminq.h index f5a33ea04113..bebbebc9711a 100755 --- a/sys/dev/i40e/i40e_adminq.h +++ b/sys/dev/ixl/i40e_adminq.h @@ -84,7 +84,8 @@ struct i40e_asq_cmd_details { /* ARQ event information */ struct i40e_arq_event_info { struct i40e_aq_desc desc; - u16 msg_size; + u16 msg_len; + u16 buf_len; u8 *msg_buf; }; @@ -114,7 +115,7 @@ struct i40e_adminq_info { /* general information */ #define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */ +#define I40E_ASQ_CMD_TIMEOUT 100 /* msecs */ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h new file mode 100755 index 000000000000..431463d428b1 --- /dev/null +++ b/sys/dev/ixl/i40e_adminq_cmd.h @@ -0,0 +1,2180 @@ +/****************************************************************************** + + Copyright (c) 2013-2014, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR 0x0002 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + i40e_aqc_opc_set_cppm_configuration = 0x0103, + i40e_aqc_opc_set_arp_proxy_entry = 0x0104, + i40e_aqc_opc_set_ns_proxy_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mng_laa = 0x0106, /* AQ obsolete */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_tunnel_key_structure = 0x0B10, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + + /* debug commands */ + i40e_aqc_opc_debug_get_deviceid = 0xFF00, + i40e_aqc_opc_debug_set_mode = 0xFF01, + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, + i40e_aqc_opc_debug_modify_internals = 0xFF09, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* internal (0x00XX) commands */ + +/* Get version (direct 0x0001) */ +struct i40e_aqc_get_version { + __le32 rom_ver; + __le32 fw_build; + __le16 fw_major; + __le16 fw_minor; + __le16 api_major; + __le16 api_minor; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); + +/* Send driver version (indirect 0x0002) */ +struct i40e_aqc_driver_version { + u8 driver_major_ver; + u8 driver_minor_ver; + u8 driver_build_ver; + u8 driver_subbuild_ver; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +/* Set PF context (0x0004, direct) */ +struct i40e_aqc_set_pf_context { + u8 pf_id; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); + +/* Request resource ownership (direct 0x0008) + * Release resource ownership (direct 0x0009) + */ +#define I40E_AQ_RESOURCE_NVM 1 +#define I40E_AQ_RESOURCE_SDP 2 +#define I40E_AQ_RESOURCE_ACCESS_READ 1 +#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 +#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 +#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 + +struct i40e_aqc_request_resource { + __le16 resource_id; + __le16 access_type; + __le32 timeout; + __le32 resource_number; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); + +/* Get function capabilities (indirect 0x000A) + * Get device capabilities (indirect 0x000B) + */ +struct i40e_aqc_list_capabilites { + u8 command_flags; +#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 + u8 pf_index; + u8 reserved[2]; + __le32 count; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); + +struct i40e_aqc_list_capabilities_element_resp { + __le16 id; + u8 major_rev; + u8 minor_rev; + __le32 number; + __le32 logical_id; + __le32 phys_id; + u8 reserved[16]; +}; + +/* list of caps */ + +#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 +#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 +#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 +#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 +#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 +#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_SRIOV 0x0012 +#define I40E_AQ_CAP_ID_VF 0x0013 +#define I40E_AQ_CAP_ID_VMDQ 0x0014 +#define I40E_AQ_CAP_ID_8021QBG 0x0015 +#define I40E_AQ_CAP_ID_8021QBR 0x0016 +#define I40E_AQ_CAP_ID_VSI 0x0017 +#define I40E_AQ_CAP_ID_DCB 0x0018 +#define I40E_AQ_CAP_ID_FCOE 0x0021 +#define I40E_AQ_CAP_ID_RSS 0x0040 +#define I40E_AQ_CAP_ID_RXQ 0x0041 +#define I40E_AQ_CAP_ID_TXQ 0x0042 +#define I40E_AQ_CAP_ID_MSIX 0x0043 +#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 +#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 +#define I40E_AQ_CAP_ID_1588 0x0046 +#define I40E_AQ_CAP_ID_IWARP 0x0051 +#define I40E_AQ_CAP_ID_LED 0x0061 +#define I40E_AQ_CAP_ID_SDP 0x0062 +#define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_FLEX10 0x00F1 +#define I40E_AQ_CAP_ID_CEM 0x00F2 + +/* Set CPPM Configuration (direct 0x0103) */ +struct i40e_aqc_cppm_configuration { + __le16 command_flags; +#define I40E_AQ_CPPM_EN_LTRC 0x0800 +#define I40E_AQ_CPPM_EN_DMCTH 0x1000 +#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 +#define I40E_AQ_CPPM_EN_HPTC 0x4000 +#define I40E_AQ_CPPM_EN_DMARC 0x8000 + __le16 ttlx; + __le32 dmacr; + __le16 dmcth; + u8 hptc; + u8 reserved; + __le32 pfltrc; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); + +/* Set ARP Proxy command / response (indirect 0x0104) */ +struct i40e_aqc_arp_proxy_data { + __le16 command_flags; +#define I40E_AQ_ARP_INIT_IPV4 0x0008 +#define I40E_AQ_ARP_UNSUP_CTL 0x0010 +#define I40E_AQ_ARP_ENA 0x0020 +#define I40E_AQ_ARP_ADD_IPV4 0x0040 +#define I40E_AQ_ARP_DEL_IPV4 0x0080 + __le16 table_id; + __le32 pfpm_proxyfc; + __le32 ip_addr; + u8 mac_addr[6]; +}; + +/* Set NS Proxy Table Entry Command (indirect 0x0105) */ +struct i40e_aqc_ns_proxy_data { + __le16 table_idx_mac_addr_0; + __le16 table_idx_mac_addr_1; + __le16 table_idx_ipv6_0; + __le16 table_idx_ipv6_1; + __le16 control; +#define I40E_AQ_NS_PROXY_ADD_0 0x0100 +#define I40E_AQ_NS_PROXY_DEL_0 0x0200 +#define I40E_AQ_NS_PROXY_ADD_1 0x0400 +#define I40E_AQ_NS_PROXY_DEL_1 0x0800 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 + u8 mac_addr_0[6]; + u8 mac_addr_1[6]; + u8 local_mac_addr[6]; + u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ + u8 ipv6_addr_1[16]; +}; + +/* Manage LAA Command (0x0106) - obsolete */ +struct i40e_aqc_mng_laa { + __le16 command_flags; +#define I40E_AQ_LAA_FLAG_WR 0x8000 + u8 reserved[2]; + __le32 sal; + __le16 sah; + u8 reserved2[6]; +}; + +/* Manage MAC Address Read Command (indirect 0x0107) */ +struct i40e_aqc_mac_address_read { + __le16 command_flags; +#define I40E_AQC_LAN_ADDR_VALID 0x10 +#define I40E_AQC_SAN_ADDR_VALID 0x20 +#define I40E_AQC_PORT_ADDR_VALID 0x40 +#define I40E_AQC_WOL_ADDR_VALID 0x80 +#define I40E_AQC_ADDR_VALID_MASK 0xf0 + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); + +struct i40e_aqc_mac_address_read_data { + u8 pf_lan_mac[6]; + u8 pf_san_mac[6]; + u8 port_mac[6]; + u8 pf_wol_mac[6]; +}; + +I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); + +/* Manage MAC Address Write Command (0x0108) */ +struct i40e_aqc_mac_address_write { + __le16 command_flags; +#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 +#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 +#define I40E_AQC_WRITE_TYPE_PORT 0x8000 +#define I40E_AQC_WRITE_TYPE_MASK 0xc000 + __le16 mac_sah; + __le32 mac_sal; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); + +/* PXE commands (0x011x) */ + +/* Clear PXE Command and response (direct 0x0110) */ +struct i40e_aqc_clear_pxe { + u8 rx_cnt; + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); + +/* Switch configuration commands (0x02xx) */ + +/* Used by many indirect commands that only pass an seid and a buffer in the + * command + */ +struct i40e_aqc_switch_seid { + __le16 seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); + +/* Get Switch Configuration command (indirect 0x0200) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_switch_config_header_resp { + __le16 num_reported; + __le16 num_total; + u8 reserved[12]; +}; + +struct i40e_aqc_switch_config_element_resp { + u8 element_type; +#define I40E_AQ_SW_ELEM_TYPE_MAC 1 +#define I40E_AQ_SW_ELEM_TYPE_PF 2 +#define I40E_AQ_SW_ELEM_TYPE_VF 3 +#define I40E_AQ_SW_ELEM_TYPE_EMP 4 +#define I40E_AQ_SW_ELEM_TYPE_BMC 5 +#define I40E_AQ_SW_ELEM_TYPE_PV 16 +#define I40E_AQ_SW_ELEM_TYPE_VEB 17 +#define I40E_AQ_SW_ELEM_TYPE_PA 18 +#define I40E_AQ_SW_ELEM_TYPE_VSI 19 + u8 revision; +#define I40E_AQ_SW_ELEM_REV_1 1 + __le16 seid; + __le16 uplink_seid; + __le16 downlink_seid; + u8 reserved[3]; + u8 connection_type; +#define I40E_AQ_CONN_TYPE_REGULAR 0x1 +#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_CONN_TYPE_CASCADED 0x3 + __le16 scheduler_id; + __le16 element_info; +}; + +/* Get Switch Configuration (indirect 0x0200) + * an array of elements are returned in the response buffer + * the first in the array is the header, remainder are elements + */ +struct i40e_aqc_get_switch_config_resp { + struct i40e_aqc_get_switch_config_header_resp header; + struct i40e_aqc_switch_config_element_resp element[1]; +}; + +/* Add Statistics (direct 0x0201) + * Remove Statistics (direct 0x0202) + */ +struct i40e_aqc_add_remove_statistics { + __le16 seid; + __le16 vlan; + __le16 stat_index; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); + +/* Set Port Parameters command (direct 0x0203) */ +struct i40e_aqc_set_port_parameters { + __le16 command_flags; +#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 +#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ +#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 + __le16 bad_frame_vsi; + __le16 default_seid; /* reserved for command */ + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); + +/* Get Switch Resource Allocation (indirect 0x0204) */ +struct i40e_aqc_get_switch_resource_alloc { + u8 num_entries; /* reserved for command */ + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); + +/* expect an array of these structs in the response buffer */ +struct i40e_aqc_switch_resource_alloc_element_resp { + u8 resource_type; +#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 +#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 +#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 +#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 +#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 +#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 +#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 +#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 +#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 +#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 +#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA +#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB +#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC +#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD +#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF +#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 +#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 +#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 +#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 + u8 reserved1; + __le16 guaranteed; + __le16 total; + __le16 used; + __le16 total_unalloced; + u8 reserved2[6]; +}; + +/* Add VSI (indirect 0x0210) + * this indirect command uses struct i40e_aqc_vsi_properties_data + * as the indirect buffer (128 bytes) + * + * Update VSI (indirect 0x211) + * uses the same data structure as Add VSI + * + * Get VSI (indirect 0x0212) + * uses the same completion and data structure as Add VSI + */ +struct i40e_aqc_add_get_update_vsi { + __le16 uplink_seid; + u8 connection_type; +#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 +#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 +#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 + u8 reserved1; + u8 vf_id; + u8 reserved2; + __le16 vsi_flags; +#define I40E_AQ_VSI_TYPE_SHIFT 0x0 +#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) +#define I40E_AQ_VSI_TYPE_VF 0x0 +#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 +#define I40E_AQ_VSI_TYPE_PF 0x2 +#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 +#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); + +struct i40e_aqc_add_get_update_vsi_completion { + __le16 seid; + __le16 vsi_number; + __le16 vsi_used; + __le16 vsi_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress table */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Add Port Virtualizer (direct 0x0220) + * also used for update PV (direct 0x0221) but only flags are used + * (IS_CTRL_PORT only works on add PV) + */ +struct i40e_aqc_add_update_pv { + __le16 command_flags; +#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 +#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 +#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 + __le16 uplink_seid; + __le16 connected_seid; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); + +struct i40e_aqc_add_update_pv_completion { + /* reserved for update; for add also encodes error if rc == ENOSPC */ + __le16 pv_seid; +#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 +#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); + +/* Get PV Params (direct 0x0222) + * uses i40e_aqc_switch_seid for the descriptor + */ + +struct i40e_aqc_get_pv_params_completion { + __le16 seid; + __le16 default_stag; + __le16 pv_flags; /* same flags as add_pv */ +#define I40E_AQC_GET_PV_PV_TYPE 0x1 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 +#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 + u8 reserved[8]; + __le16 default_port_seid; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); + +/* Add VEB (direct 0x0230) */ +struct i40e_aqc_add_veb { + __le16 uplink_seid; + __le16 downlink_seid; + __le16 veb_flags; +#define I40E_AQC_ADD_VEB_FLOATING 0x1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 +#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ + I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) +#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 +#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 + u8 enable_tcs; + u8 reserved[9]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); + +struct i40e_aqc_add_veb_completion { + u8 reserved[6]; + __le16 switch_seid; + /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ + __le16 veb_seid; +#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 +#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 +#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 +#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +/* Delete Element (direct 0x0243) + * uses the generic i40e_aqc_switch_seid + */ + +/* Add MAC-VLAN (indirect 0x0250) */ + +/* used for the command for most vlan commands */ +struct i40e_aqc_macvlan { + __le16 num_addresses; + __le16 seid[3]; +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) +#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); + +/* indirect data for command and response */ +struct i40e_aqc_add_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + __le16 flags; +#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 +#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 +#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 +#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 + __le16 queue_number; +#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 +#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ + I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) + /* response section */ + u8 match_method; +#define I40E_AQC_MM_PERFECT_MATCH 0x01 +#define I40E_AQC_MM_HASH_MATCH 0x02 +#define I40E_AQC_MM_ERR_NO_RES 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_macvlan_completion { + __le16 perfect_mac_used; + __le16 perfect_mac_free; + __le16 unicast_hash_free; + __le16 multicast_hash_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); + +/* Remove MAC-VLAN (indirect 0x0251) + * uses i40e_aqc_macvlan for the descriptor + * data points to an array of num_addresses of elements + */ + +struct i40e_aqc_remove_macvlan_element_data { + u8 mac_addr[6]; + __le16 vlan_tag; + u8 flags; +#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 +#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 +#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 +#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 + u8 reserved[3]; + /* reply section */ + u8 error_code; +#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF + u8 reply_reserved[3]; +}; + +/* Add VLAN (indirect 0x0252) + * Remove VLAN (indirect 0x0253) + * use the generic i40e_aqc_macvlan for the command + */ +struct i40e_aqc_add_remove_vlan_element_data { + __le16 vlan_tag; + u8 vlan_flags; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_LOCAL 0x1 +#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 +#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) +#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 +#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 +#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 +#define I40E_AQC_VLAN_PTYPE_SHIFT 3 +#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) +#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 +#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 +#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 +#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_ALL 0x1 + u8 reserved; + u8 result; +/* flags for add VLAN */ +#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 +#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE +#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF +/* flags for remove VLAN */ +#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 +#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF + u8 reserved1[3]; +}; + +struct i40e_aqc_add_remove_vlan_completion { + u8 reserved[4]; + __le16 vlans_used; + __le16 vlans_free; + __le32 addr_high; + __le32 addr_low; +}; + +/* Set VSI Promiscuous Modes (direct 0x0254) */ +struct i40e_aqc_set_vsi_promiscuous_modes { + __le16 promiscuous_flags; + __le16 valid_flags; +/* flags used for both fields above */ +#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 +#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 +#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 +#define I40E_AQC_SET_VSI_DEFAULT 0x08 +#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 + __le16 seid; +#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF + __le16 vlan_tag; +#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); + +/* Add S/E-tag command (direct 0x0255) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_add_tag { + __le16 flags; +#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 + __le16 seid; +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + __le16 queue_number; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); + +struct i40e_aqc_add_remove_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); + +/* Remove S/E-tag command (direct 0x0256) + * Uses generic i40e_aqc_add_remove_tag_completion for completion + */ +struct i40e_aqc_remove_tag { + __le16 seid; +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) + __le16 tag; + u8 reserved[12]; +}; + +/* Add multicast E-Tag (direct 0x0257) + * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields + * and no external data + */ +struct i40e_aqc_add_remove_mcast_etag { + __le16 pv_seid; + __le16 etag; + u8 num_unicast_etags; + u8 reserved[3]; + __le32 addr_high; /* address of array of 2-byte s-tags */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); + +struct i40e_aqc_add_remove_mcast_etag_completion { + u8 reserved[4]; + __le16 mcast_etags_used; + __le16 mcast_etags_free; + __le32 addr_high; + __le32 addr_low; + +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); + +/* Update S/E-Tag (direct 0x0259) */ +struct i40e_aqc_update_tag { + __le16 seid; +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) + __le16 old_tag; + __le16 new_tag; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); + +struct i40e_aqc_update_tag_completion { + u8 reserved[12]; + __le16 tags_used; + __le16 tags_free; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); + +/* Add Control Packet filter (direct 0x025A) + * Remove Control Packet filter (direct 0x025B) + * uses the i40e_aqc_add_oveb_cloud, + * and the generic direct completion structure + */ +struct i40e_aqc_add_remove_control_packet_filter { + u8 mac[6]; + __le16 etype; + __le16 flags; +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 +#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 + __le16 seid; +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) + __le16 queue; + u8 reserved[2]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); + +struct i40e_aqc_add_remove_control_packet_filter_completion { + __le16 mac_etype_used; + __le16 etype_used; + __le16 mac_etype_free; + __le16 etype_free; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); + +/* Add Cloud filters (indirect 0x025C) + * Remove Cloud filters (indirect 0x025D) + * uses the i40e_aqc_add_remove_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_aqc_add_remove_cloud_filters { + u8 num_filters; + u8 reserved; + __le16 seid; +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ + I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); + +struct i40e_aqc_add_remove_cloud_filters_element_data { + u8 outer_mac[6]; + u8 inner_mac[6]; + __le16 inner_vlan; + union { + struct { + u8 reserved[12]; + u8 data[4]; + } v4; + struct { + u8 data[16]; + } v6; + } ipaddr; + __le16 flags; +#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_FILTER_SHIFT) +/* 0x0000 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 +/* 0x0002 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 +/* 0x0005 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 +/* 0x0007 reserved */ +/* 0x0008 reserved */ +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 +#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A +#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B +#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C + +#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 +#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 +#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 +#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 + +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 + + __le32 tenant_id; + u8 reserved[4]; + __le16 queue_number; +#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 +#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \ + I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) + u8 reserved2[14]; + /* response section */ + u8 allocation_result; +#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 +#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF + u8 response_reserved[7]; +}; + +struct i40e_aqc_remove_cloud_filters_completion { + __le16 perfect_ovlan_used; + __le16 perfect_ovlan_free; + __le16 vlan_used; + __le16 vlan_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); + +/* Add Mirror Rule (indirect or direct 0x0260) + * Delete Mirror Rule (indirect or direct 0x0261) + * note: some rule types (4,5) do not use an external buffer. + * take care to set the flags correctly. + */ +struct i40e_aqc_add_delete_mirror_rule { + __le16 seid; + __le16 rule_type; +#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 +#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ + I40E_AQC_MIRROR_RULE_TYPE_SHIFT) +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 +#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 +#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 +#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 + __le16 num_entries; + __le16 destination; /* VSI for add, rule id for delete */ + __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); + +struct i40e_aqc_add_delete_mirror_rule_completion { + u8 reserved[2]; + __le16 rule_id; /* only used on add */ + __le16 mirror_rules_used; + __le16 mirror_rules_free; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); + +/* DCB 0x03xx*/ + +/* PFC Ignore (direct 0x0301) + * the command and response use the same descriptor structure + */ +struct i40e_aqc_pfc_ignore { + u8 tc_bitmap; + u8 command_flags; /* unused on response */ +#define I40E_AQC_PFC_IGNORE_SET 0x80 +#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); + +/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure + * with no parameters + */ + +/* TX scheduler 0x04xx */ + +/* Almost all the indirect commands use + * this generic struct to pass the SEID in param0 + */ +struct i40e_aqc_tx_sched_ind { + __le16 vsi_seid; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); + +/* Several commands respond with a set of queue set handles */ +struct i40e_aqc_qs_handles_resp { + __le16 qs_handles[8]; +}; + +/* Configure VSI BW limits (direct 0x0400) */ +struct i40e_aqc_configure_vsi_bw_limit { + __le16 vsi_seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_credit; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); + +/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_ets_sla_bw_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) + * responds with i40e_aqc_qs_handles_resp + */ +struct i40e_aqc_configure_vsi_tc_bw_data { + u8 tc_valid_bits; + u8 reserved[3]; + u8 tc_bw_credits[8]; + u8 reserved1[4]; + __le16 qs_handles[8]; +}; + +/* Query vsi bw configuration (indirect 0x0408) */ +struct i40e_aqc_query_vsi_bw_config_resp { + u8 tc_valid_bits; + u8 tc_suspended_bits; + u8 reserved[14]; + __le16 qs_handles[8]; + u8 reserved1[4]; + __le16 port_bw_limit; + u8 reserved2[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved3[23]; +}; + +/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ +struct i40e_aqc_query_vsi_ets_sla_config_resp { + u8 tc_valid_bits; + u8 reserved[3]; + u8 share_credits[8]; + __le16 credits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ +struct i40e_aqc_configure_switching_comp_bw_limit { + __le16 seid; + u8 reserved[2]; + __le16 credit; + u8 reserved1[2]; + u8 max_bw; /* 0-3, limit = 2^max */ + u8 reserved2[7]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); + +/* Enable Physical Port ETS (indirect 0x0413) + * Modify Physical Port ETS (indirect 0x0414) + * Disable Physical Port ETS (indirect 0x0415) + */ +struct i40e_aqc_configure_switching_comp_ets_data { + u8 reserved[4]; + u8 tc_valid_bits; + u8 seepage; +#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 + u8 tc_strict_priority_flags; + u8 reserved1[17]; + u8 tc_bw_share_credits[8]; + u8 reserved2[96]; +}; + +/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ +struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { + u8 tc_valid_bits; + u8 reserved[15]; + __le16 tc_bw_credit[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved1[28]; +}; + +/* Configure Switching Component Bandwidth Allocation per Tc + * (indirect 0x0417) + */ +struct i40e_aqc_configure_switching_comp_bw_config_data { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits; /* bool */ + u8 tc_bw_share_credits[8]; + u8 reserved1[20]; +}; + +/* Query Switching Component Configuration (indirect 0x0418) */ +struct i40e_aqc_query_switching_comp_ets_config_resp { + u8 tc_valid_bits; + u8 reserved[35]; + __le16 port_bw_limit; + u8 reserved1[2]; + u8 tc_bw_max; /* 0-3, limit = 2^max */ + u8 reserved2[23]; +}; + +/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ +struct i40e_aqc_query_port_ets_config_resp { + u8 reserved[4]; + u8 tc_valid_bits; + u8 reserved1; + u8 tc_strict_priority_bits; + u8 reserved2; + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ + __le16 tc_bw_max[2]; + u8 reserved3[32]; +}; + +/* Query Switching Component Bandwidth Allocation per Traffic Type + * (indirect 0x041A) + */ +struct i40e_aqc_query_switching_comp_bw_config_resp { + u8 tc_valid_bits; + u8 reserved[2]; + u8 absolute_credits_enable; /* bool */ + u8 tc_bw_share_credits[8]; + __le16 tc_bw_limits[8]; + + /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ + __le16 tc_bw_max[2]; +}; + +/* Suspend/resume port TX traffic + * (direct 0x041B and 0x041C) uses the generic SEID struct + */ + +/* Configure partition BW + * (indirect 0x041D) + */ +struct i40e_aqc_configure_partition_bw_data { + __le16 pf_valid_bits; + u8 min_bw[16]; /* guaranteed bandwidth */ + u8 max_bw[16]; /* bandwidth limit */ +}; + +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF +#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F + +/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ + +/* set in param0 for get phy abilities to report qualified modules */ +#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 +#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 + +enum i40e_aq_phy_type { + I40E_PHY_TYPE_SGMII = 0x0, + I40E_PHY_TYPE_1000BASE_KX = 0x1, + I40E_PHY_TYPE_10GBASE_KX4 = 0x2, + I40E_PHY_TYPE_10GBASE_KR = 0x3, + I40E_PHY_TYPE_40GBASE_KR4 = 0x4, + I40E_PHY_TYPE_XAUI = 0x5, + I40E_PHY_TYPE_XFI = 0x6, + I40E_PHY_TYPE_SFI = 0x7, + I40E_PHY_TYPE_XLAUI = 0x8, + I40E_PHY_TYPE_XLPPI = 0x9, + I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, + I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, + I40E_PHY_TYPE_10GBASE_AOC = 0xC, + I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_100BASE_TX = 0x11, + I40E_PHY_TYPE_1000BASE_T = 0x12, + I40E_PHY_TYPE_10GBASE_T = 0x13, + I40E_PHY_TYPE_10GBASE_SR = 0x14, + I40E_PHY_TYPE_10GBASE_LR = 0x15, + I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, + I40E_PHY_TYPE_10GBASE_CR1 = 0x17, + I40E_PHY_TYPE_40GBASE_CR4 = 0x18, + I40E_PHY_TYPE_40GBASE_SR4 = 0x19, + I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, + I40E_PHY_TYPE_1000BASE_SX = 0x1B, + I40E_PHY_TYPE_1000BASE_LX = 0x1C, + I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, + I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_MAX +}; + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) +}; + +struct i40e_aqc_module_desc { + u8 oui[3]; + u8 reserved1; + u8 part_number[16]; + u8 revision[4]; + u8 reserved2[8]; +}; + +struct i40e_aq_get_phy_abilities_resp { + __le32 phy_type; /* bitmap using the above enum for offsets */ + u8 link_speed; /* bitmap using the above enum bit patterns */ + u8 abilities; +#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 +#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 +#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 +#define I40E_AQ_PHY_LINK_ENABLED 0x08 +#define I40E_AQ_PHY_AN_ENABLED 0x10 +#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 + __le16 eee_capability; +#define I40E_AQ_EEE_100BASE_TX 0x0002 +#define I40E_AQ_EEE_1000BASE_T 0x0004 +#define I40E_AQ_EEE_10GBASE_T 0x0008 +#define I40E_AQ_EEE_1000BASE_KX 0x0010 +#define I40E_AQ_EEE_10GBASE_KX4 0x0020 +#define I40E_AQ_EEE_10GBASE_KR 0x0040 + __le32 eeer_val; + u8 d3_lpan; +#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 + u8 reserved[3]; + u8 phy_id[4]; + u8 module_type[3]; + u8 qualified_module_count; +#define I40E_AQ_PHY_MAX_QMS 16 + struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; +}; + +/* Set PHY Config (direct 0x0601) */ +struct i40e_aq_set_phy_config { /* same bits as above in all */ + __le32 phy_type; + u8 link_speed; + u8 abilities; +/* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 +#define I40E_AQ_PHY_ENABLE_AN 0x10 +#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 + __le16 eee_capability; + __le32 eeer; + u8 low_power_ctrl; + u8 reserved[3]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); + +/* Set MAC Config command data structure (direct 0x0603) */ +struct i40e_aq_set_mac_config { + __le16 max_frame_size; + u8 params; +#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 +#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 +#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 +#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 +#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 +#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 +#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 +#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 +#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 + u8 tx_timer_priority; /* bitmap */ + __le16 tx_timer_value; + __le16 fc_refresh_threshold; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); + +/* Restart Auto-Negotiation (direct 0x605) */ +struct i40e_aqc_set_link_restart_an { + u8 command; +#define I40E_AQ_PHY_RESTART_AN 0x02 +#define I40E_AQ_PHY_LINK_ENABLE 0x04 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); + +/* Get Link Status cmd & response data structure (direct 0x0607) */ +struct i40e_aqc_get_link_status { + __le16 command_flags; /* only field set on command */ +#define I40E_AQ_LSE_MASK 0x3 +#define I40E_AQ_LSE_NOP 0x0 +#define I40E_AQ_LSE_DISABLE 0x2 +#define I40E_AQ_LSE_ENABLE 0x3 +/* only response uses this flag */ +#define I40E_AQ_LSE_IS_ENABLED 0x1 + u8 phy_type; /* i40e_aq_phy_type */ + u8 link_speed; /* i40e_aq_link_speed */ + u8 link_info; +#define I40E_AQ_LINK_UP 0x01 +#define I40E_AQ_LINK_FAULT 0x02 +#define I40E_AQ_LINK_FAULT_TX 0x04 +#define I40E_AQ_LINK_FAULT_RX 0x08 +#define I40E_AQ_LINK_FAULT_REMOTE 0x10 +#define I40E_AQ_MEDIA_AVAILABLE 0x40 +#define I40E_AQ_SIGNAL_DETECT 0x80 + u8 an_info; +#define I40E_AQ_AN_COMPLETED 0x01 +#define I40E_AQ_LP_AN_ABILITY 0x02 +#define I40E_AQ_PD_FAULT 0x04 +#define I40E_AQ_FEC_EN 0x08 +#define I40E_AQ_PHY_LOW_POWER 0x10 +#define I40E_AQ_LINK_PAUSE_TX 0x20 +#define I40E_AQ_LINK_PAUSE_RX 0x40 +#define I40E_AQ_QUALIFIED_MODULE 0x80 + u8 ext_info; +#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 +#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 +#define I40E_AQ_LINK_TX_SHIFT 0x02 +#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) +#define I40E_AQ_LINK_TX_ACTIVE 0x00 +#define I40E_AQ_LINK_TX_DRAINED 0x01 +#define I40E_AQ_LINK_TX_FLUSHED 0x03 +#define I40E_AQ_LINK_FORCED_40G 0x10 + u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ + __le16 max_frame_size; + u8 config; +#define I40E_AQ_CONFIG_CRC_ENA 0x04 +#define I40E_AQ_CONFIG_PACING_MASK 0x78 + u8 reserved[5]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); + +/* Set event mask command (direct 0x613) */ +struct i40e_aqc_set_phy_int_mask { + u8 reserved[8]; + __le16 event_mask; +#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 +#define I40E_AQ_EVENT_MEDIA_NA 0x0004 +#define I40E_AQ_EVENT_LINK_FAULT 0x0008 +#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 +#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 +#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 +#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 +#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 +#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 + u8 reserved1[6]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); + +/* Get Local AN advt register (direct 0x0614) + * Set Local AN advt register (direct 0x0615) + * Get Link Partner AN advt register (direct 0x0616) + */ +struct i40e_aqc_an_advt_reg { + __le32 local_an_reg0; + __le16 local_an_reg1; + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); + +/* Set Loopback mode (0x0618) */ +struct i40e_aqc_set_lb_mode { + __le16 lb_mode; +#define I40E_AQ_LB_PHY_LOCAL 0x01 +#define I40E_AQ_LB_PHY_REMOTE 0x02 +#define I40E_AQ_LB_MAC_LOCAL 0x04 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); + +/* Set PHY Debug command (0x0622) */ +struct i40e_aqc_set_phy_debug { + u8 command_flags; +#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ + I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 +#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); + +enum i40e_aq_phy_reg_type { + I40E_AQC_PHY_REG_INTERNAL = 0x1, + I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, + I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 +}; + +/* NVM Read command (indirect 0x0701) + * NVM Erase commands (direct 0x0702) + * NVM Update commands (indirect 0x0703) + */ +struct i40e_aqc_nvm_update { + u8 command_flags; +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 + u8 module_pointer; + __le16 length; + __le32 offset; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); + +/* NVM Config Read (indirect 0x0704) */ +struct i40e_aqc_nvm_config_read { + __le16 cmd_flags; +#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 +#define ANVM_READ_SINGLE_FEATURE 0 +#define ANVM_READ_MULTIPLE_FEATURES 1 + __le16 element_count; + __le16 element_id; /* Feature/field ID */ + u8 reserved[2]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); + +/* NVM Config Write (indirect 0x0705) */ +struct i40e_aqc_nvm_config_write { + __le16 cmd_flags; + __le16 element_count; + u8 reserved[4]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); + +struct i40e_aqc_nvm_config_data_feature { + __le16 feature_id; + __le16 instance_id; + __le16 feature_options; + __le16 feature_selection; +}; + +struct i40e_aqc_nvm_config_data_immediate_field { +#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2 + __le16 field_id; + __le16 instance_id; + __le16 field_options; + __le16 field_value; +}; + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +/* Alternate structure */ + +/* Direct write (direct 0x0900) + * Direct read (direct 0x0902) + */ +struct i40e_aqc_alternate_write { + __le32 address0; + __le32 data0; + __le32 address1; + __le32 data1; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); + +/* Indirect write (indirect 0x0901) + * Indirect read (indirect 0x0903) + */ + +struct i40e_aqc_alternate_ind_write { + __le32 address; + __le32 length; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); + +/* Done alternate write (direct 0x0904) + * uses i40e_aq_desc + */ +struct i40e_aqc_alternate_write_done { + __le16 cmd_flags; +#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 +#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 +#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 +#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); + +/* Set OEM mode (direct 0x0905) */ +struct i40e_aqc_alternate_set_mode { + __le32 mode; +#define I40E_AQ_ALTERNATE_MODE_NONE 0 +#define I40E_AQ_ALTERNATE_MODE_OEM 1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); + +/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ + +/* async events 0x10xx */ + +/* Lan Queue Overflow Event (direct, 0x1001) */ +struct i40e_aqc_lan_overflow { + __le32 prtdcb_rupto; + __le32 otx_ctl; + u8 reserved[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); + +/* Get LLDP MIB (indirect 0x0A00) */ +struct i40e_aqc_lldp_get_mib { + u8 type; + u8 reserved1; +#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 +#define I40E_AQ_LLDP_MIB_LOCAL 0x0 +#define I40E_AQ_LLDP_MIB_REMOTE 0x1 +#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC +#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 +#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 +#define I40E_AQ_LLDP_TX_SHIFT 0x4 +#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) +/* TX pause flags use I40E_AQ_LINK_TX_* above */ + __le16 local_len; + __le16 remote_len; + u8 reserved2[2]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); + +/* Configure LLDP MIB Change Event (direct 0x0A01) + * also used for the event (with type in the command field) + */ +struct i40e_aqc_lldp_update_mib { + u8 command; +#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 +#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); + +/* Add LLDP TLV (indirect 0x0A02) + * Delete LLDP TLV (indirect 0x0A04) + */ +struct i40e_aqc_lldp_add_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved1[1]; + __le16 len; + u8 reserved2[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); + +/* Update LLDP TLV (indirect 0x0A03) */ +struct i40e_aqc_lldp_update_tlv { + u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ + u8 reserved; + __le16 old_len; + __le16 new_offset; + __le16 new_len; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); + +/* Stop LLDP (direct 0x0A05) */ +struct i40e_aqc_lldp_stop { + u8 command; +#define I40E_AQ_LLDP_AGENT_STOP 0x0 +#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); + +/* Start LLDP (direct 0x0A06) */ + +struct i40e_aqc_lldp_start { + u8 command; +#define I40E_AQ_LLDP_AGENT_START 0x1 + u8 reserved[15]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); + +/* Apply MIB changes (0x0A07) + * uses the generic struc as it contains no data + */ + +/* Add Udp Tunnel command and completion (direct 0x0B00) */ +struct i40e_aqc_add_udp_tunnel { + __le16 udp_port; + u8 reserved0[3]; + u8 protocol_type; +#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 +#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 +#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 + u8 reserved1[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); + +struct i40e_aqc_add_udp_tunnel_completion { + __le16 udp_port; + u8 filter_entry_index; + u8 multiple_pfs; +#define I40E_AQC_SINGLE_PF 0x0 +#define I40E_AQC_MULTIPLE_PFS 0x1 + u8 total_filters; + u8 reserved[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); + +/* remove UDP Tunnel command (0x0B01) */ +struct i40e_aqc_remove_udp_tunnel { + u8 reserved[2]; + u8 index; /* 0 to 15 */ + u8 reserved2[13]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); + +struct i40e_aqc_del_udp_tunnel_completion { + __le16 udp_port; + u8 index; /* 0 to 15 */ + u8 multiple_pfs; + u8 total_filters_used; + u8 reserved1[11]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); + +/* tunnel key structure 0x0B10 */ + +struct i40e_aqc_tunnel_key_structure { + u8 key1_off; + u8 key2_off; + u8 key1_len; /* 0 to 15 */ + u8 key2_len; /* 0 to 15 */ + u8 flags; +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 +/* response flags */ +#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 +#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 +#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 + u8 network_key_index; +#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 +#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 +#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 +#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 + u8 reserved[10]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); + +/* OEM mode commands (direct 0xFE0x) */ +struct i40e_aqc_oem_param_change { + __le32 param_type; +#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 +#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 +#define I40E_AQ_OEM_PARAM_MAC 2 + __le32 param_value1; + u8 param_value2[8]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); + +struct i40e_aqc_oem_state_change { + __le32 state; +#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 +#define I40E_AQ_OEM_STATE_LINK_UP 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); + +/* debug commands */ + +/* get device id (0xFF00) uses the generic structure */ + +/* set test more (0xFF01, internal) */ + +struct i40e_acq_set_test_mode { + u8 mode; +#define I40E_AQ_TEST_PARTIAL 0 +#define I40E_AQ_TEST_FULL 1 +#define I40E_AQ_TEST_NVM 2 + u8 reserved[3]; + u8 command; +#define I40E_AQ_TEST_OPEN 0 +#define I40E_AQ_TEST_CLOSE 1 +#define I40E_AQ_TEST_INC 2 + u8 reserved2[3]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); + +/* Debug Read Register command (0xFF03) + * Debug Write Register command (0xFF04) + */ +struct i40e_aqc_debug_reg_read_write { + __le32 reserved; + __le32 address; + __le32 value_high; + __le32 value_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); + +/* Scatter/gather Reg Read (indirect 0xFF05) + * Scatter/gather Reg Write (indirect 0xFF06) + */ + +/* i40e_aq_desc is used for the command */ +struct i40e_aqc_debug_reg_sg_element_data { + __le32 address; + __le32 value; +}; + +/* Debug Modify register (direct 0xFF07) */ +struct i40e_aqc_debug_modify_reg { + __le32 address; + __le32 value; + __le32 clear_mask; + __le32 set_mask; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); + +/* dump internal data (0xFF08, indirect) */ + +#define I40E_AQ_CLUSTER_ID_AUX 0 +#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 +#define I40E_AQ_CLUSTER_ID_TXSCHED 2 +#define I40E_AQ_CLUSTER_ID_HMC 3 +#define I40E_AQ_CLUSTER_ID_MAC0 4 +#define I40E_AQ_CLUSTER_ID_MAC1 5 +#define I40E_AQ_CLUSTER_ID_MAC2 6 +#define I40E_AQ_CLUSTER_ID_MAC3 7 +#define I40E_AQ_CLUSTER_ID_DCB 8 +#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 +#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 +#define I40E_AQ_CLUSTER_ID_ALTRAM 11 + +struct i40e_aqc_debug_dump_internals { + u8 cluster_id; + u8 table_id; + __le16 data_size; + __le32 idx; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); + +struct i40e_aqc_debug_modify_internals { + u8 cluster_id; + u8 cluster_specific_params[7]; + __le32 address_high; + __le32 address_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); + +#endif diff --git a/sys/dev/i40e/i40e_alloc.h b/sys/dev/ixl/i40e_alloc.h similarity index 100% rename from sys/dev/i40e/i40e_alloc.h rename to sys/dev/ixl/i40e_alloc.h diff --git a/sys/dev/i40e/i40e_common.c b/sys/dev/ixl/i40e_common.c similarity index 97% rename from sys/dev/i40e/i40e_common.c rename to sys/dev/ixl/i40e_common.c index ec0fd0cfb2bc..ad1f9457c98e 100755 --- a/sys/dev/i40e/i40e_common.c +++ b/sys/dev/ixl/i40e_common.c @@ -44,7 +44,7 @@ * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ -static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) +enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) { enum i40e_status_code status = I40E_SUCCESS; @@ -60,6 +60,7 @@ static enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_10G_BASE_T: hw->mac.type = I40E_MAC_XL710; break; case I40E_DEV_ID_VF: @@ -4686,3 +4687,101 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, return status; } + +/** + * i40e_aq_send_msg_to_pf + * @hw: pointer to the hardware structure + * @v_opcode: opcodes for VF-PF communication + * @v_retval: return error code + * @msg: pointer to the msg buffer + * @msglen: msg length + * @cmd_details: pointer to command details + * + * Send message to PF driver using admin queue. By default, this message + * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for + * completion before returning. + **/ +enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum i40e_virtchnl_ops v_opcode, + enum i40e_status_code v_retval, + u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_asq_cmd_details details; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI); + desc.cookie_high = CPU_TO_LE32(v_opcode); + desc.cookie_low = CPU_TO_LE32(v_retval); + if (msglen) { + desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF + | I40E_AQ_FLAG_RD)); + if (msglen > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.datalen = CPU_TO_LE16(msglen); + } + if (!cmd_details) { + i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM); + details.async = TRUE; + cmd_details = &details; + } + status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg, + msglen, cmd_details); + return status; +} + +/** + * i40e_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct i40e_virtchnl_vf_resource *msg) +{ + struct i40e_virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + hw->dev_caps.dcb = msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_L2; + hw->dev_caps.fcoe = (msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; + hw->dev_caps.iwarp = (msg->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == I40E_VSI_SRIOV) { + i40e_memcpy(hw->mac.perm_addr, + vsi_res->default_mac_addr, + I40E_ETH_LENGTH_OF_ADDRESS, + I40E_NONDMA_TO_NONDMA); + i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr, + I40E_ETH_LENGTH_OF_ADDRESS, + I40E_NONDMA_TO_NONDMA); + } + vsi_res++; + } +} + +/** + * i40e_vf_reset + * @hw: pointer to the hardware structure + * + * Send a VF_RESET message to the PF. Does not wait for response from PF + * as none will be forthcoming. Immediately after calling this function, + * the admin queue should be shut down and (optionally) reinitialized. + **/ +enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) +{ + return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, + I40E_SUCCESS, NULL, 0, NULL); +} diff --git a/sys/dev/i40e/i40e_hmc.c b/sys/dev/ixl/i40e_hmc.c similarity index 100% rename from sys/dev/i40e/i40e_hmc.c rename to sys/dev/ixl/i40e_hmc.c diff --git a/sys/dev/i40e/i40e_hmc.h b/sys/dev/ixl/i40e_hmc.h similarity index 100% rename from sys/dev/i40e/i40e_hmc.h rename to sys/dev/ixl/i40e_hmc.h diff --git a/sys/dev/i40e/i40e_lan_hmc.c b/sys/dev/ixl/i40e_lan_hmc.c similarity index 100% rename from sys/dev/i40e/i40e_lan_hmc.c rename to sys/dev/ixl/i40e_lan_hmc.c diff --git a/sys/dev/i40e/i40e_lan_hmc.h b/sys/dev/ixl/i40e_lan_hmc.h similarity index 100% rename from sys/dev/i40e/i40e_lan_hmc.h rename to sys/dev/ixl/i40e_lan_hmc.h diff --git a/sys/dev/i40e/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c similarity index 100% rename from sys/dev/i40e/i40e_nvm.c rename to sys/dev/ixl/i40e_nvm.c diff --git a/sys/dev/i40e/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c similarity index 99% rename from sys/dev/i40e/i40e_osdep.c rename to sys/dev/ixl/i40e_osdep.c index cea801cbfeec..30e2e57fcef3 100755 --- a/sys/dev/i40e/i40e_osdep.c +++ b/sys/dev/ixl/i40e_osdep.c @@ -34,7 +34,7 @@ #include -#include "i40e.h" +#include "ixl.h" /******************************************************************** * Manage DMA'able memory. diff --git a/sys/dev/i40e/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h similarity index 84% rename from sys/dev/i40e/i40e_osdep.h rename to sys/dev/ixl/i40e_osdep.h index 2a81cda5575d..5479dd2e5523 100755 --- a/sys/dev/i40e/i40e_osdep.h +++ b/sys/dev/ixl/i40e_osdep.h @@ -54,9 +54,8 @@ #include #include -#define ASSERT(x) if(!(x)) panic("I40E: x") +#define ASSERT(x) if(!(x)) panic("IXL: x") -/* The happy-fun DELAY macro is defined in /usr/src/sys/i386/include/clock.h */ #define i40e_usec_delay(x) DELAY(x) #define i40e_msec_delay(x) DELAY(1000*(x)) @@ -146,9 +145,10 @@ void prefetch(void *x) struct i40e_osdep { - bus_space_tag_t mem_bus_space_tag; - bus_space_handle_t mem_bus_space_handle; - struct device *dev; + bus_space_tag_t mem_bus_space_tag; + bus_space_handle_t mem_bus_space_handle; + bus_size_t mem_bus_space_size; + struct device *dev; }; struct i40e_dma_mem { @@ -166,8 +166,6 @@ struct i40e_hw; /* forward decl */ u16 i40e_read_pci_cfg(struct i40e_hw *, u32); void i40e_write_pci_cfg(struct i40e_hw *, u32, u16); -#define I40E_READ_PCIE_WORD i40e_read_pci_cfg - #define i40e_allocate_dma_mem(h, m, unused, s, a) i40e_allocate_dma(h, m, s, a) #define i40e_free_dma_mem(h, m) i40e_free_dma(h, m) @@ -181,17 +179,38 @@ struct i40e_virt_mem { #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt(h, m, s) #define i40e_free_virt_mem(h, m) i40e_free_virt(h, m) +/* +** This hardware supports either 16 or 32 byte rx descriptors +** we default here to the larger size. +*/ #define i40e_rx_desc i40e_32byte_rx_desc -#define rd32(a, reg) (\ - bus_space_read_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ - ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ - reg)) +static __inline uint32_t +rd32_osdep(struct i40e_osdep *osdep, uint32_t reg) +{ -#define wr32(a, reg, value) (\ - bus_space_write_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ - ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ - reg, value)) + KASSERT(reg < osdep->mem_bus_space_size, + ("ixl: register offset %#jx too large (max is %#jx", + (uintmax_t)a, (uintmax_t)osdep->mem_bus_space_size)); + + return (bus_space_read_4(osdep->mem_bus_space_tag, + osdep->mem_bus_space_handle, reg)); +} + +static __inline void +wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value) +{ + + KASSERT(reg < osdep->mem_bus_space_size, + ("ixl: register offset %#jx too large (max is %#jx", + (uintmax_t)a, (uintmax_t)osdep->mem_bus_space_size)); + + bus_space_write_4(osdep->mem_bus_space_tag, + osdep->mem_bus_space_handle, reg, value); +} + +#define rd32(a, reg) rd32_osdep((a)->back, (reg)) +#define wr32(a, reg, value) wr32_osdep((a)->back, (reg), (value)) #define rd64(a, reg) (\ bus_space_read_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ @@ -203,7 +222,7 @@ struct i40e_virt_mem { ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ reg, value)) -#define i40e_flush(a) (\ +#define ixl_flush(a) (\ bus_space_read_4( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ I40E_GLGEN_STAT)) diff --git a/sys/dev/i40e/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h similarity index 99% rename from sys/dev/i40e/i40e_prototype.h rename to sys/dev/ixl/i40e_prototype.h index 9b2d7fc33144..db9e3cc5ebe8 100755 --- a/sys/dev/i40e/i40e_prototype.h +++ b/sys/dev/ixl/i40e_prototype.h @@ -104,11 +104,11 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, bool atomic_reset); +enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size, bool crc_en, u16 pacing, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask, - struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw, u64 *advt_reg, struct i40e_asq_cmd_details *cmd_details); @@ -393,10 +393,8 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, u8 *bytes, int *); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); -#if defined(I40E_QV) || defined(VF_DRIVER) enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw); -#endif extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) diff --git a/sys/dev/i40e/i40e_register.h b/sys/dev/ixl/i40e_register.h similarity index 100% rename from sys/dev/i40e/i40e_register.h rename to sys/dev/ixl/i40e_register.h diff --git a/sys/dev/i40e/i40e_register_x710_int.h b/sys/dev/ixl/i40e_register_x710_int.h similarity index 100% rename from sys/dev/i40e/i40e_register_x710_int.h rename to sys/dev/ixl/i40e_register_x710_int.h diff --git a/sys/dev/i40e/i40e_status.h b/sys/dev/ixl/i40e_status.h similarity index 100% rename from sys/dev/i40e/i40e_status.h rename to sys/dev/ixl/i40e_status.h diff --git a/sys/dev/i40e/i40e_type.h b/sys/dev/ixl/i40e_type.h similarity index 99% rename from sys/dev/i40e/i40e_type.h rename to sys/dev/ixl/i40e_type.h index 093cfbd09606..c1d13f24ca8b 100755 --- a/sys/dev/i40e/i40e_type.h +++ b/sys/dev/ixl/i40e_type.h @@ -56,6 +56,7 @@ #define I40E_DEV_ID_QSFP_A 0x1583 #define I40E_DEV_ID_QSFP_B 0x1584 #define I40E_DEV_ID_QSFP_C 0x1585 +#define I40E_DEV_ID_10G_BASE_T 0x1586 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -532,10 +533,6 @@ struct i40e_hw { /* Admin Queue info */ struct i40e_adminq_info aq; -#ifdef I40E_QV - bool aq_dbg_ena; /* use Tools AQ instead of PF AQ */ - bool qv_force_init; -#endif /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; @@ -553,6 +550,7 @@ struct i40e_hw { /* debug mask */ u32 debug_mask; }; +#define i40e_is_vf(_hw) ((_hw)->mac.type == I40E_MAC_VF) struct i40e_driver_version { u8 major_version; diff --git a/sys/dev/i40e/i40e_virtchnl.h b/sys/dev/ixl/i40e_virtchnl.h similarity index 99% rename from sys/dev/i40e/i40e_virtchnl.h rename to sys/dev/ixl/i40e_virtchnl.h index 87e48aa99eb1..034d276265f4 100755 --- a/sys/dev/i40e/i40e_virtchnl.h +++ b/sys/dev/ixl/i40e_virtchnl.h @@ -87,6 +87,7 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_VIRTCHNL_OP_GET_STATS, I40E_VIRTCHNL_OP_FCOE, + I40E_VIRTCHNL_OP_CONFIG_RSS, /* PF sends status change events to vfs using * the following op. */ diff --git a/sys/dev/i40e/if_i40e.c b/sys/dev/ixl/if_ixl.c similarity index 76% rename from sys/dev/i40e/if_i40e.c rename to sys/dev/ixl/if_ixl.c index 74e64bf698c4..de3f81770409 100755 --- a/sys/dev/i40e/if_i40e.c +++ b/sys/dev/ixl/if_ixl.c @@ -32,30 +32,27 @@ ******************************************************************************/ /*$FreeBSD$*/ -#ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_inet.h" #include "opt_inet6.h" -#endif - -#include "i40e.h" -#include "i40e_pf.h" +#include "ixl.h" +#include "ixl_pf.h" /********************************************************************* * Driver version *********************************************************************/ -char i40e_driver_version[] = "1.0.0"; +char ixl_driver_version[] = "1.2.2"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on - * Last field stores an index into i40e_strings + * Last field stores an index into ixl_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ -static i40e_vendor_info_t i40e_vendor_info_array[] = +static ixl_vendor_info_t ixl_vendor_info_array[] = { {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0}, @@ -64,6 +61,7 @@ static i40e_vendor_info_t i40e_vendor_info_array[] = {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, + {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; @@ -72,7 +70,7 @@ static i40e_vendor_info_t i40e_vendor_info_array[] = * Table of branding strings *********************************************************************/ -static char *i40e_strings[] = { +static char *ixl_strings[] = { "Intel(R) Ethernet Connection XL710 Driver" }; @@ -80,182 +78,209 @@ static char *i40e_strings[] = { /********************************************************************* * Function prototypes *********************************************************************/ -static int i40e_probe(device_t); -static int i40e_attach(device_t); -static int i40e_detach(device_t); -static int i40e_shutdown(device_t); -static int i40e_get_hw_capabilities(struct i40e_pf *); -static void i40e_cap_txcsum_tso(struct i40e_vsi *, struct ifnet *, int); -static int i40e_ioctl(struct ifnet *, u_long, caddr_t); -static void i40e_init(void *); -static void i40e_init_locked(struct i40e_pf *); -static void i40e_stop(struct i40e_pf *); -static void i40e_media_status(struct ifnet *, struct ifmediareq *); -static int i40e_media_change(struct ifnet *); -static void i40e_update_link_status(struct i40e_pf *); -static int i40e_allocate_pci_resources(struct i40e_pf *); -static u16 i40e_get_bus_info(struct i40e_hw *, device_t); -static int i40e_setup_stations(struct i40e_pf *); -static int i40e_setup_vsi(struct i40e_vsi *); -static int i40e_initialize_vsi(struct i40e_vsi *); -static int i40e_assign_vsi_msix(struct i40e_pf *); -static int i40e_assign_vsi_legacy(struct i40e_pf *); -static int i40e_init_msix(struct i40e_pf *); -static void i40e_configure_msix(struct i40e_pf *); -static void i40e_configure_itr(struct i40e_pf *); -static void i40e_configure_legacy(struct i40e_pf *); -static void i40e_free_pci_resources(struct i40e_pf *); -static void i40e_local_timer(void *); -static int i40e_setup_interface(device_t, struct i40e_vsi *); -static bool i40e_config_link(struct i40e_hw *); -static void i40e_config_rss(struct i40e_vsi *); -static void i40e_set_queue_rx_itr(struct i40e_queue *); -static void i40e_set_queue_tx_itr(struct i40e_queue *); +static int ixl_probe(device_t); +static int ixl_attach(device_t); +static int ixl_detach(device_t); +static int ixl_shutdown(device_t); +static int ixl_get_hw_capabilities(struct ixl_pf *); +static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); +static int ixl_ioctl(struct ifnet *, u_long, caddr_t); +static void ixl_init(void *); +static void ixl_init_locked(struct ixl_pf *); +static void ixl_stop(struct ixl_pf *); +static void ixl_media_status(struct ifnet *, struct ifmediareq *); +static int ixl_media_change(struct ifnet *); +static void ixl_update_link_status(struct ixl_pf *); +static int ixl_allocate_pci_resources(struct ixl_pf *); +static u16 ixl_get_bus_info(struct i40e_hw *, device_t); +static int ixl_setup_stations(struct ixl_pf *); +static int ixl_setup_vsi(struct ixl_vsi *); +static int ixl_initialize_vsi(struct ixl_vsi *); +static int ixl_assign_vsi_msix(struct ixl_pf *); +static int ixl_assign_vsi_legacy(struct ixl_pf *); +static int ixl_init_msix(struct ixl_pf *); +static void ixl_configure_msix(struct ixl_pf *); +static void ixl_configure_itr(struct ixl_pf *); +static void ixl_configure_legacy(struct ixl_pf *); +static void ixl_free_pci_resources(struct ixl_pf *); +static void ixl_local_timer(void *); +static int ixl_setup_interface(device_t, struct ixl_vsi *); +static bool ixl_config_link(struct i40e_hw *); +static void ixl_config_rss(struct ixl_vsi *); +static void ixl_set_queue_rx_itr(struct ixl_queue *); +static void ixl_set_queue_tx_itr(struct ixl_queue *); -static void i40e_enable_rings(struct i40e_vsi *); -static void i40e_disable_rings(struct i40e_vsi *); -static void i40e_enable_intr(struct i40e_vsi *); -static void i40e_disable_intr(struct i40e_vsi *); +static void ixl_enable_rings(struct ixl_vsi *); +static void ixl_disable_rings(struct ixl_vsi *); +static void ixl_enable_intr(struct ixl_vsi *); +static void ixl_disable_intr(struct ixl_vsi *); -static void i40e_enable_adminq(struct i40e_hw *); -static void i40e_disable_adminq(struct i40e_hw *); -static void i40e_enable_queue(struct i40e_hw *, int); -static void i40e_disable_queue(struct i40e_hw *, int); -static void i40e_enable_legacy(struct i40e_hw *); -static void i40e_disable_legacy(struct i40e_hw *); +static void ixl_enable_adminq(struct i40e_hw *); +static void ixl_disable_adminq(struct i40e_hw *); +static void ixl_enable_queue(struct i40e_hw *, int); +static void ixl_disable_queue(struct i40e_hw *, int); +static void ixl_enable_legacy(struct i40e_hw *); +static void ixl_disable_legacy(struct i40e_hw *); -static void i40e_set_promisc(struct i40e_vsi *); -static void i40e_add_multi(struct i40e_vsi *); -static void i40e_del_multi(struct i40e_vsi *); -static void i40e_register_vlan(void *, struct ifnet *, u16); -static void i40e_unregister_vlan(void *, struct ifnet *, u16); -static void i40e_setup_vlan_filters(struct i40e_vsi *); +static void ixl_set_promisc(struct ixl_vsi *); +static void ixl_add_multi(struct ixl_vsi *); +static void ixl_del_multi(struct ixl_vsi *); +static void ixl_register_vlan(void *, struct ifnet *, u16); +static void ixl_unregister_vlan(void *, struct ifnet *, u16); +static void ixl_setup_vlan_filters(struct ixl_vsi *); -static void i40e_init_filters(struct i40e_vsi *); -static void i40e_add_filter(struct i40e_vsi *, u8 *, s16 vlan); -static void i40e_del_filter(struct i40e_vsi *, u8 *, s16 vlan); -static void i40e_add_hw_filters(struct i40e_vsi *, int, int); -static void i40e_del_hw_filters(struct i40e_vsi *, int); -static struct i40e_mac_filter * - i40e_find_filter(struct i40e_vsi *, u8 *, s16); -static void i40e_add_mc_filter(struct i40e_vsi *, u8 *); +static void ixl_init_filters(struct ixl_vsi *); +static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan); +static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan); +static void ixl_add_hw_filters(struct ixl_vsi *, int, int); +static void ixl_del_hw_filters(struct ixl_vsi *, int); +static struct ixl_mac_filter * + ixl_find_filter(struct ixl_vsi *, u8 *, s16); +static void ixl_add_mc_filter(struct ixl_vsi *, u8 *); /* Sysctl debug interface */ -static int i40e_debug_info(SYSCTL_HANDLER_ARGS); -static void i40e_print_debug_info(struct i40e_pf *); +static int ixl_debug_info(SYSCTL_HANDLER_ARGS); +static void ixl_print_debug_info(struct ixl_pf *); /* The MSI/X Interrupt handlers */ -static void i40e_intr(void *); -static void i40e_msix_que(void *); -static void i40e_msix_adminq(void *); -static void i40e_handle_mdd_event(struct i40e_pf *); +static void ixl_intr(void *); +static void ixl_msix_que(void *); +static void ixl_msix_adminq(void *); +static void ixl_handle_mdd_event(struct ixl_pf *); /* Deferred interrupt tasklets */ -static void i40e_do_adminq(void *, int); +static void ixl_do_adminq(void *, int); /* Sysctl handlers */ -static int i40e_set_flowcntl(SYSCTL_HANDLER_ARGS); -static int i40e_set_advertise(SYSCTL_HANDLER_ARGS); +static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS); +static int ixl_set_advertise(SYSCTL_HANDLER_ARGS); +static int ixl_current_speed(SYSCTL_HANDLER_ARGS); /* Statistics */ -static void i40e_add_hw_stats(struct i40e_pf *); -static void i40e_add_sysctls_mac_stats(struct sysctl_ctx_list *, +static void ixl_add_hw_stats(struct ixl_pf *); +static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_hw_port_stats *); -static void i40e_add_sysctls_eth_stats(struct sysctl_ctx_list *, +static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_eth_stats *); -static void i40e_update_stats_counters(struct i40e_pf *); -static void i40e_update_eth_stats(struct i40e_vsi *); -static void i40e_pf_reset_stats(struct i40e_pf *); -static void i40e_vsi_reset_stats(struct i40e_vsi *); -static void i40e_stat_update48(struct i40e_hw *, u32, u32, bool, +static void ixl_update_stats_counters(struct ixl_pf *); +static void ixl_update_eth_stats(struct ixl_vsi *); +static void ixl_pf_reset_stats(struct ixl_pf *); +static void ixl_vsi_reset_stats(struct ixl_vsi *); +static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, u64 *, u64 *); -static void i40e_stat_update32(struct i40e_hw *, u32, bool, +static void ixl_stat_update32(struct i40e_hw *, u32, bool, u64 *, u64 *); -#ifdef I40E_DEBUG -static int i40e_sysctl_link_status(SYSCTL_HANDLER_ARGS); -static int i40e_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); -static int i40e_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); -static int i40e_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS); -static int i40e_sysctl_dump_txd(SYSCTL_HANDLER_ARGS); +#ifdef IXL_DEBUG +static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS); #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ -static device_method_t i40e_methods[] = { +static device_method_t ixl_methods[] = { /* Device interface */ - DEVMETHOD(device_probe, i40e_probe), - DEVMETHOD(device_attach, i40e_attach), - DEVMETHOD(device_detach, i40e_detach), - DEVMETHOD(device_shutdown, i40e_shutdown), + DEVMETHOD(device_probe, ixl_probe), + DEVMETHOD(device_attach, ixl_attach), + DEVMETHOD(device_detach, ixl_detach), + DEVMETHOD(device_shutdown, ixl_shutdown), {0, 0} }; -static driver_t i40e_driver = { - "ixl", i40e_methods, sizeof(struct i40e_pf), +static driver_t ixl_driver = { + "ixl", ixl_methods, sizeof(struct ixl_pf), }; -devclass_t i40e_devclass; -DRIVER_MODULE(i40e, pci, i40e_driver, i40e_devclass, 0, 0); +devclass_t ixl_devclass; +DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); -MODULE_DEPEND(i40e, pci, 1, 1, 1); -MODULE_DEPEND(i40e, ether, 1, 1, 1); +MODULE_DEPEND(ixl, pci, 1, 1, 1); +MODULE_DEPEND(ixl, ether, 1, 1, 1); /* ** Global reset mutex */ -static struct mtx i40e_reset_mtx; +static struct mtx ixl_reset_mtx; + +/* +** TUNEABLE PARAMETERS: +*/ + +static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, + "IXL driver parameters"); /* * MSIX should be the default for best performance, * but this allows it to be forced off for testing. */ -static int i40e_enable_msix = 1; -TUNABLE_INT("hw.i40e.enable_msix", &i40e_enable_msix); +static int ixl_enable_msix = 1; +TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix); +SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0, + "Enable MSI-X interrupts"); /* ** Number of descriptors per ring: ** - TX and RX are the same size */ -static int i40e_ringsz = DEFAULT_RING; -TUNABLE_INT("hw.i40e.ringsz", &i40e_ringsz); +static int ixl_ringsz = DEFAULT_RING; +TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz); +SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN, + &ixl_ringsz, 0, "Descriptor Ring Size"); /* ** This can be set manually, if left as 0 the ** number of queues will be calculated based ** on cpus and msix vectors available. */ -int i40e_max_queues = 0; -TUNABLE_INT("hw.i40e.max_queues", &i40e_max_queues); +int ixl_max_queues = 0; +TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues); +SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN, + &ixl_max_queues, 0, "Number of Queues"); /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ -int i40e_dynamic_rx_itr = 0; -TUNABLE_INT("hw.i40e.dynamic_rx_itr", &i40e_dynamic_rx_itr); -int i40e_dynamic_tx_itr = 0; -TUNABLE_INT("hw.i40e.dynamic_tx_itr", &i40e_dynamic_tx_itr); +int ixl_dynamic_rx_itr = 0; +TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); +SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, + &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); -int i40e_rx_itr = I40E_ITR_8K; -TUNABLE_INT("hw.i40e.rx_itr", &i40e_rx_itr); -int i40e_tx_itr = I40E_ITR_4K; -TUNABLE_INT("hw.i40e.tx_itr", &i40e_tx_itr); +int ixl_dynamic_tx_itr = 0; +TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); +SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, + &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); -#ifdef I40E_FDIR -static int i40e_enable_fdir = 1; -TUNABLE_INT("hw.i40e.enable_fdir", &i40e_enable_fdir); +int ixl_rx_itr = IXL_ITR_8K; +TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); +SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, + &ixl_rx_itr, 0, "RX Interrupt Rate"); + +int ixl_tx_itr = IXL_ITR_4K; +TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); +SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, + &ixl_tx_itr, 0, "TX Interrupt Rate"); + +#ifdef IXL_FDIR +static int ixl_enable_fdir = 1; +TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir); /* Rate at which we sample */ -int i40e_atr_rate = 20; -TUNABLE_INT("hw.i40e.atr_rate", &i40e_atr_rate); +int ixl_atr_rate = 20; +TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate); #endif +#ifdef DEV_NETMAP +#include +#endif /* DEV_NETMAP */ -static char *i40e_fc_string[6] = { +static char *ixl_fc_string[6] = { "None", "Rx", "Tx", @@ -268,23 +293,23 @@ static char *i40e_fc_string[6] = { /********************************************************************* * Device identification routine * - * i40e_probe determines if the driver should be loaded on + * ixl_probe determines if the driver should be loaded on * the hardware based on PCI vendor/device id of the device. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int -i40e_probe(device_t dev) +ixl_probe(device_t dev) { - i40e_vendor_info_t *ent; + ixl_vendor_info_t *ent; u16 pci_vendor_id, pci_device_id; u16 pci_subvendor_id, pci_subdevice_id; char device_name[256]; static bool lock_init = FALSE; - INIT_DEBUGOUT("i40e_probe: begin"); + INIT_DEBUGOUT("ixl_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != I40E_INTEL_VENDOR_ID) @@ -294,7 +319,7 @@ i40e_probe(device_t dev) pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); - ent = i40e_vendor_info_array; + ent = ixl_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && @@ -305,15 +330,15 @@ i40e_probe(device_t dev) ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == 0))) { sprintf(device_name, "%s, Version - %s", - i40e_strings[ent->index], - i40e_driver_version); + ixl_strings[ent->index], + ixl_driver_version); device_set_desc_copy(dev, device_name); /* One shot mutex init */ if (lock_init == FALSE) { lock_init = TRUE; - mtx_init(&i40e_reset_mtx, - "i40e_reset", - "I40E RESET Lock", MTX_DEF); + mtx_init(&ixl_reset_mtx, + "ixl_reset", + "IXL RESET Lock", MTX_DEF); } return (BUS_PROBE_DEFAULT); } @@ -333,15 +358,15 @@ i40e_probe(device_t dev) *********************************************************************/ static int -i40e_attach(device_t dev) +ixl_attach(device_t dev) { - struct i40e_pf *pf; + struct ixl_pf *pf; struct i40e_hw *hw; - struct i40e_vsi *vsi; + struct ixl_vsi *vsi; u16 bus; int error = 0; - INIT_DEBUGOUT("i40e_attach: begin"); + INIT_DEBUGOUT("ixl_attach: begin"); /* Allocate, clear, and link in our primary soft structure */ pf = device_get_softc(dev); @@ -356,7 +381,7 @@ i40e_attach(device_t dev) vsi->dev = pf->dev; /* Core Lock Init*/ - I40E_PF_LOCK_INIT(pf, device_get_nameunit(dev)); + IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); /* Set up the timer callout */ callout_init_mtx(&pf->timer, &pf->pf_mtx, 0); @@ -365,58 +390,63 @@ i40e_attach(device_t dev) SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, - pf, 0, i40e_set_flowcntl, "I", "Flow Control"); + pf, 0, ixl_set_flowcntl, "I", "Flow Control"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, - pf, 0, i40e_set_advertise, "I", "Advertised Speed"); + pf, 0, ixl_set_advertise, "I", "Advertised Speed"); + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, + pf, 0, ixl_current_speed, "A", "Current Port Speed"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW, - &i40e_rx_itr, I40E_ITR_8K, "RX ITR"); + &ixl_rx_itr, IXL_ITR_8K, "RX ITR"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dynamic_rx_itr", CTLTYPE_INT | CTLFLAG_RW, - &i40e_dynamic_rx_itr, 0, "Dynamic RX ITR"); + &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW, - &i40e_tx_itr, I40E_ITR_4K, "TX ITR"); + &ixl_tx_itr, IXL_ITR_4K, "TX ITR"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dynamic_tx_itr", CTLTYPE_INT | CTLFLAG_RW, - &i40e_dynamic_tx_itr, 0, "Dynamic TX ITR"); + &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR"); -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD, - pf, 0, i40e_sysctl_link_status, "A", "Current Link Status"); + pf, 0, ixl_sysctl_link_status, "A", "Current Link Status"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD, - pf, 0, i40e_sysctl_phy_abilities, "A", "PHY Abilities"); + pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, - pf, 0, i40e_sysctl_sw_filter_list, "A", "SW Filter List"); + pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_res_info", CTLTYPE_STRING | CTLFLAG_RD, - pf, 0, i40e_sysctl_hw_res_info, "A", "HW Resource Allocation"); + pf, 0, ixl_sysctl_hw_res_info, "A", "HW Resource Allocation"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dump_desc", CTLTYPE_INT | CTLFLAG_WR, - pf, 0, i40e_sysctl_dump_txd, "I", "Desc dump"); + pf, 0, ixl_sysctl_dump_txd, "I", "Desc dump"); #endif /* Save off the information about this board */ @@ -432,7 +462,7 @@ i40e_attach(device_t dev) hw->bus.func = pci_get_function(dev); /* Do PCI setup - map BAR0, etc */ - if (i40e_allocate_pci_resources(pf)) { + if (ixl_allocate_pci_resources(pf)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_out; @@ -442,7 +472,7 @@ i40e_attach(device_t dev) SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0, - i40e_debug_info, "I", "Debug Information"); + ixl_debug_info, "I", "Debug Information"); /* Establish a clean starting point */ @@ -456,29 +486,29 @@ i40e_attach(device_t dev) /* For now always do an initial CORE reset on first device */ { - static int i40e_dev_count; - static int i40e_dev_track[32]; + static int ixl_dev_count; + static int ixl_dev_track[32]; u32 my_dev; int i, found = FALSE; u16 bus = pci_get_bus(dev); - mtx_lock(&i40e_reset_mtx); + mtx_lock(&ixl_reset_mtx); my_dev = (bus << 8) | hw->bus.device; - for (i = 0; i < i40e_dev_count; i++) { - if (i40e_dev_track[i] == my_dev) + for (i = 0; i < ixl_dev_count; i++) { + if (ixl_dev_track[i] == my_dev) found = TRUE; } if (!found) { u32 reg; - i40e_dev_track[i40e_dev_count] = my_dev; - i40e_dev_count++; + ixl_dev_track[ixl_dev_count] = my_dev; + ixl_dev_count++; - device_printf(dev, "Initial CORE RESET\n"); + INIT_DEBUGOUT("Initial CORE RESET\n"); wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); - i40e_flush(hw); + ixl_flush(hw); i = 50; do { i40e_msec_delay(50); @@ -493,14 +523,14 @@ i40e_attach(device_t dev) wr32(hw, I40E_PF_ATQBAH, 0); i40e_clear_pxe_mode(hw); } - mtx_unlock(&i40e_reset_mtx); + mtx_unlock(&ixl_reset_mtx); } /* Set admin queue parameters */ - hw->aq.num_arq_entries = I40E_AQ_LEN; - hw->aq.num_asq_entries = I40E_AQ_LEN; - hw->aq.arq_buf_size = I40E_AQ_BUFSZ; - hw->aq.asq_buf_size = I40E_AQ_BUFSZ; + hw->aq.num_arq_entries = IXL_AQ_LEN; + hw->aq.num_asq_entries = IXL_AQ_LEN; + hw->aq.arq_buf_size = IXL_AQ_BUFSZ; + hw->aq.asq_buf_size = IXL_AQ_BUFSZ; /* Initialize the shared code */ error = i40e_init_shared_code(hw); @@ -519,7 +549,7 @@ i40e_attach(device_t dev) " the network driver.\n"); goto err_out; } - device_printf(dev, "%s\n", i40e_fw_version_str(hw)); + device_printf(dev, "%s\n", ixl_fw_version_str(hw)); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) @@ -536,7 +566,7 @@ i40e_attach(device_t dev) i40e_clear_pxe_mode(hw); /* Get capabilities from the device */ - error = i40e_get_hw_capabilities(pf); + error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "HW capabilities failure!\n"); goto err_get_cap; @@ -567,7 +597,7 @@ i40e_attach(device_t dev) bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); i40e_get_port_mac_addr(hw, hw->mac.port_addr); - if (i40e_setup_stations(pf) != 0) { + if (ixl_setup_stations(pf) != 0) { device_printf(dev, "setup stations failed!\n"); error = ENOMEM; goto err_mac_hmc; @@ -578,14 +608,21 @@ i40e_attach(device_t dev) /* Set up interrupt routing here */ if (pf->msix > 1) - error = i40e_assign_vsi_msix(pf); + error = ixl_assign_vsi_msix(pf); else - error = i40e_assign_vsi_legacy(pf); + error = ixl_assign_vsi_legacy(pf); if (error) goto err_late; + i40e_msec_delay(75); + error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); + if (error) { + device_printf(dev, "link restart failed, aq_err=%d\n", + pf->hw.aq.asq_last_status); + } + /* Determine link state */ - vsi->link_up = i40e_config_link(hw); + vsi->link_up = ixl_config_link(hw); /* Report if Unqualified modules are found */ if ((vsi->link_up == FALSE) && @@ -597,30 +634,33 @@ i40e_attach(device_t dev) "an unqualified module was detected\n"); /* Setup OS specific network interface */ - if (i40e_setup_interface(dev, vsi) != 0) + if (ixl_setup_interface(dev, vsi) != 0) goto err_late; /* Get the bus configuration and set the shared code */ - bus = i40e_get_bus_info(hw, dev); + bus = ixl_get_bus_info(hw, dev); i40e_set_pci_config_data(hw, bus); /* Initialize statistics */ - i40e_pf_reset_stats(pf); - i40e_update_stats_counters(pf); - i40e_add_hw_stats(pf); + ixl_pf_reset_stats(pf); + ixl_update_stats_counters(pf); + ixl_add_hw_stats(pf); /* Register for VLAN events */ vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - i40e_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); + ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, - i40e_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); + ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); +#ifdef DEV_NETMAP + ixl_netmap_attach(pf); +#endif /* DEV_NETMAP */ - INIT_DEBUGOUT("i40e_attach: end"); + INIT_DEBUGOUT("ixl_attach: end"); return (0); err_late: - i40e_free_vsi(vsi); + ixl_free_vsi(vsi); err_mac_hmc: i40e_shutdown_lan_hmc(hw); err_get_cap: @@ -628,8 +668,8 @@ i40e_attach(device_t dev) err_out: if (vsi->ifp != NULL) if_free(vsi->ifp); - i40e_free_pci_resources(pf); - I40E_PF_LOCK_DESTROY(pf); + ixl_free_pci_resources(pf); + IXL_PF_LOCK_DESTROY(pf); return (error); } @@ -644,16 +684,15 @@ i40e_attach(device_t dev) *********************************************************************/ static int -i40e_detach(device_t dev) +ixl_detach(device_t dev) { - struct i40e_pf *pf = device_get_softc(dev); + struct ixl_pf *pf = device_get_softc(dev); struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; i40e_status status; - u32 reg; - INIT_DEBUGOUT("i40e_detach: begin"); + INIT_DEBUGOUT("ixl_detach: begin"); /* Make sure VLANS are not using driver */ if (vsi->ifp->if_vlantrunk != NULL) { @@ -661,9 +700,9 @@ i40e_detach(device_t dev) return (EBUSY); } - I40E_PF_LOCK(pf); - i40e_stop(pf); - I40E_PF_UNLOCK(pf); + IXL_PF_LOCK(pf); + ixl_stop(pf); + IXL_PF_UNLOCK(pf); for (int i = 0; i < vsi->num_queues; i++, que++) { if (que->tq) { @@ -673,8 +712,6 @@ i40e_detach(device_t dev) } } - /* Drain other tasks here */ - /* Shutdown LAN HMC */ status = i40e_shutdown_lan_hmc(hw); if (status) @@ -687,13 +724,6 @@ i40e_detach(device_t dev) device_printf(dev, "Shutdown Admin queue failed with code %d\n", status); - /* Now force a pf reset */ - reg = rd32(hw, I40E_PFGEN_CTRL); - reg |= I40E_PFGEN_CTRL_PFSWR_MASK; - wr32(hw, I40E_PFGEN_CTRL, reg); - //i40e_pf_reset(hw); - i40e_flush(hw); - /* Unregister VLAN events */ if (vsi->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); @@ -703,12 +733,15 @@ i40e_detach(device_t dev) ether_ifdetach(vsi->ifp); callout_drain(&pf->timer); +#ifdef DEV_NETMAP + netmap_detach(vsi->ifp); +#endif /* DEV_NETMAP */ - i40e_free_pci_resources(pf); + ixl_free_pci_resources(pf); bus_generic_detach(dev); if_free(vsi->ifp); - i40e_free_vsi(vsi); - I40E_PF_LOCK_DESTROY(pf); + ixl_free_vsi(vsi); + IXL_PF_LOCK_DESTROY(pf); return (0); } @@ -719,12 +752,12 @@ i40e_detach(device_t dev) **********************************************************************/ static int -i40e_shutdown(device_t dev) +ixl_shutdown(device_t dev) { - struct i40e_pf *pf = device_get_softc(dev); - I40E_PF_LOCK(pf); - i40e_stop(pf); - I40E_PF_UNLOCK(pf); + struct ixl_pf *pf = device_get_softc(dev); + IXL_PF_LOCK(pf); + ixl_stop(pf); + IXL_PF_UNLOCK(pf); return (0); } @@ -736,7 +769,7 @@ i40e_shutdown(device_t dev) **********************************************************************/ static int -i40e_get_hw_capabilities(struct i40e_pf *pf) +ixl_get_hw_capabilities(struct ixl_pf *pf) { struct i40e_aqc_list_capabilities_element_resp *buf; struct i40e_hw *hw = &pf->hw; @@ -772,7 +805,7 @@ i40e_get_hw_capabilities(struct i40e_pf *pf) /* Capture this PF's starting queue pair */ pf->qbase = hw->func_caps.base_queue; -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, " "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n", hw->pf_id, hw->func_caps.num_vfs, @@ -788,7 +821,7 @@ i40e_get_hw_capabilities(struct i40e_pf *pf) } static void -i40e_cap_txcsum_tso(struct i40e_vsi *vsi, struct ifnet *ifp, int mask) +ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) { device_t dev = vsi->dev; @@ -798,14 +831,14 @@ i40e_cap_txcsum_tso(struct i40e_vsi *vsi, struct ifnet *ifp, int mask) if (mask & IFCAP_TXCSUM) { ifp->if_capenable |= IFCAP_TXCSUM; /* enable TXCSUM, restore TSO if previously enabled */ - if (vsi->flags & I40E_FLAGS_KEEP_TSO4) { - vsi->flags &= ~I40E_FLAGS_KEEP_TSO4; + if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { + vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; ifp->if_capenable |= IFCAP_TSO4; } } else if (mask & IFCAP_TSO4) { ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); - vsi->flags &= ~I40E_FLAGS_KEEP_TSO4; + vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; device_printf(dev, "TSO4 requires txcsum, enabling both...\n"); } @@ -818,7 +851,7 @@ i40e_cap_txcsum_tso(struct i40e_vsi *vsi, struct ifnet *ifp, int mask) } else if((ifp->if_capenable & IFCAP_TXCSUM) && (ifp->if_capenable & IFCAP_TSO4)) { if (mask & IFCAP_TXCSUM) { - vsi->flags |= I40E_FLAGS_KEEP_TSO4; + vsi->flags |= IXL_FLAGS_KEEP_TSO4; ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); device_printf(dev, "TSO4 requires txcsum, disabling both...\n"); @@ -831,13 +864,13 @@ i40e_cap_txcsum_tso(struct i40e_vsi *vsi, struct ifnet *ifp, int mask) && !(ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; - if (vsi->flags & I40E_FLAGS_KEEP_TSO6) { - vsi->flags &= ~I40E_FLAGS_KEEP_TSO6; + if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { + vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; ifp->if_capenable |= IFCAP_TSO6; } } else if (mask & IFCAP_TSO6) { ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); - vsi->flags &= ~I40E_FLAGS_KEEP_TSO6; + vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; device_printf(dev, "TSO6 requires txcsum6, enabling both...\n"); } @@ -850,7 +883,7 @@ i40e_cap_txcsum_tso(struct i40e_vsi *vsi, struct ifnet *ifp, int mask) } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) && (ifp->if_capenable & IFCAP_TSO6)) { if (mask & IFCAP_TXCSUM_IPV6) { - vsi->flags |= I40E_FLAGS_KEEP_TSO6; + vsi->flags |= IXL_FLAGS_KEEP_TSO6; ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); device_printf(dev, "TSO6 requires txcsum6, disabling both...\n"); @@ -862,17 +895,17 @@ i40e_cap_txcsum_tso(struct i40e_vsi *vsi, struct ifnet *ifp, int mask) /********************************************************************* * Ioctl entry point * - * i40e_ioctl is called when the user wants to configure the + * ixl_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int -i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data) +ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { - struct i40e_vsi *vsi = ifp->if_softc; - struct i40e_pf *pf = (struct i40e_pf *)vsi->back; + struct ixl_vsi *vsi = ifp->if_softc; + struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct ifreq *ifr = (struct ifreq *) data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; @@ -899,7 +932,7 @@ i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data) if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - i40e_init(pf); + ixl_init(pf); if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); } else @@ -908,54 +941,54 @@ i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data) #endif case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); - if (ifr->ifr_mtu > I40E_MAX_FRAME - + if (ifr->ifr_mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { error = EINVAL; } else { - I40E_PF_LOCK(pf); + IXL_PF_LOCK(pf); ifp->if_mtu = ifr->ifr_mtu; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; - i40e_init_locked(pf); - I40E_PF_UNLOCK(pf); + ixl_init_locked(pf); + IXL_PF_UNLOCK(pf); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); - I40E_PF_LOCK(pf); + IXL_PF_LOCK(pf); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ pf->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { - i40e_set_promisc(vsi); + ixl_set_promisc(vsi); } } else - i40e_init_locked(pf); + ixl_init_locked(pf); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) - i40e_stop(pf); + ixl_stop(pf); pf->if_flags = ifp->if_flags; - I40E_PF_UNLOCK(pf); + IXL_PF_UNLOCK(pf); break; case SIOCADDMULTI: IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - I40E_PF_LOCK(pf); - i40e_disable_intr(vsi); - i40e_add_multi(vsi); - i40e_enable_intr(vsi); - I40E_PF_UNLOCK(pf); + IXL_PF_LOCK(pf); + ixl_disable_intr(vsi); + ixl_add_multi(vsi); + ixl_enable_intr(vsi); + IXL_PF_UNLOCK(pf); } break; case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - I40E_PF_LOCK(pf); - i40e_disable_intr(vsi); - i40e_del_multi(vsi); - i40e_enable_intr(vsi); - I40E_PF_UNLOCK(pf); + IXL_PF_LOCK(pf); + ixl_disable_intr(vsi); + ixl_del_multi(vsi); + ixl_enable_intr(vsi); + IXL_PF_UNLOCK(pf); } break; case SIOCSIFMEDIA: @@ -968,7 +1001,7 @@ i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data) int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); - i40e_cap_txcsum_tso(vsi, ifp, mask); + ixl_cap_txcsum_tso(vsi, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; @@ -983,9 +1016,9 @@ i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data) if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - I40E_PF_LOCK(pf); - i40e_init_locked(pf); - I40E_PF_UNLOCK(pf); + IXL_PF_LOCK(pf); + ixl_init_locked(pf); + IXL_PF_UNLOCK(pf); } VLAN_CAPABILITIES(ifp); @@ -993,7 +1026,7 @@ i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data) } default: - IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); + IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command); error = ether_ioctl(ifp, command, data); break; } @@ -1014,10 +1047,10 @@ i40e_ioctl(struct ifnet * ifp, u_long command, caddr_t data) **********************************************************************/ static void -i40e_init_locked(struct i40e_pf *pf) +ixl_init_locked(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; struct i40e_filter_control_settings filter; @@ -1025,8 +1058,8 @@ i40e_init_locked(struct i40e_pf *pf) int ret; mtx_assert(&pf->pf_mtx, MA_OWNED); - INIT_DEBUGOUT("i40e_init: begin"); - i40e_stop(pf); + INIT_DEBUGOUT("ixl_init: begin"); + ixl_stop(pf); /* Get the latest mac address... User might use a LAA */ bcopy(IF_LLADDR(vsi->ifp), tmpaddr, @@ -1058,63 +1091,56 @@ i40e_init_locked(struct i40e_pf *pf) bzero(&filter, sizeof(filter)); filter.enable_ethtype = TRUE; filter.enable_macvlan = TRUE; -#ifdef I40E_FDIR +#ifdef IXL_FDIR filter.enable_fdir = TRUE; #endif if (i40e_set_filter_control(hw, &filter)) device_printf(dev, "set_filter_control() failed\n"); /* Set up RSS */ - i40e_config_rss(vsi); + ixl_config_rss(vsi); /* Setup the VSI */ - i40e_setup_vsi(vsi); + ixl_setup_vsi(vsi); /* ** Prepare the rings, hmc contexts, etc... */ - if (i40e_initialize_vsi(vsi)) { - device_printf(dev,"initialize vsi failed!!\n"); + if (ixl_initialize_vsi(vsi)) { + device_printf(dev, "initialize vsi failed!!\n"); return; } /* Add protocol filters to list */ - i40e_init_filters(vsi); + ixl_init_filters(vsi); /* Setup vlan's if needed */ - i40e_setup_vlan_filters(vsi); + ixl_setup_vlan_filters(vsi); /* Start the local timer */ - callout_reset(&pf->timer, hz, i40e_local_timer, pf); + callout_reset(&pf->timer, hz, ixl_local_timer, pf); /* Set up MSI/X routing and the ITR settings */ - if (i40e_enable_msix) { - i40e_configure_msix(pf); - i40e_configure_itr(pf); + if (ixl_enable_msix) { + ixl_configure_msix(pf); + ixl_configure_itr(pf); } else - i40e_configure_legacy(pf); + ixl_configure_legacy(pf); - i40e_enable_rings(vsi); + ixl_enable_rings(vsi); i40e_aq_set_default_vsi(hw, vsi->seid, NULL); - /* Flow control setup */ - /* NOTE: flow control currently doesn't work correctly */ - // i40e_set_fc_mode(pf, I40E_FC_FULL); - /* Set MTU in hardware*/ - if (ifp->if_mtu > ETHERMTU) { - int aq_error = - i40e_aq_set_mac_config(hw, vsi->max_frame_size, - TRUE, 0, NULL); - if (aq_error) - device_printf(vsi->dev, - "aq_set_mac_config in init error, code %d\n", - aq_error); - } + int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size, + TRUE, 0, NULL); + if (aq_error) + device_printf(vsi->dev, + "aq_set_mac_config in init error, code %d\n", + aq_error); /* And now turn on interrupts */ - i40e_enable_intr(vsi); + ixl_enable_intr(vsi); /* Now inform the stack we're ready */ ifp->if_drv_flags |= IFF_DRV_RUNNING; @@ -1124,13 +1150,13 @@ i40e_init_locked(struct i40e_pf *pf) } static void -i40e_init(void *arg) +ixl_init(void *arg) { - struct i40e_pf *pf = arg; + struct ixl_pf *pf = arg; - I40E_PF_LOCK(pf); - i40e_init_locked(pf); - I40E_PF_UNLOCK(pf); + IXL_PF_LOCK(pf); + ixl_init_locked(pf); + IXL_PF_UNLOCK(pf); return; } @@ -1140,22 +1166,22 @@ i40e_init(void *arg) ** */ static void -i40e_handle_que(void *context, int pending) +ixl_handle_que(void *context, int pending) { - struct i40e_queue *que = context; - struct i40e_vsi *vsi = que->vsi; + struct ixl_queue *que = context; + struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ifnet *ifp = vsi->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - more = i40e_rxeof(que, I40E_RX_LIMIT); - I40E_TX_LOCK(txr); - i40e_txeof(que); + more = ixl_rxeof(que, IXL_RX_LIMIT); + IXL_TX_LOCK(txr); + ixl_txeof(que); if (!drbr_empty(ifp, txr->br)) - i40e_mq_start_locked(ifp, txr); - I40E_TX_UNLOCK(txr); + ixl_mq_start_locked(ifp, txr); + IXL_TX_UNLOCK(txr); if (more) { taskqueue_enqueue(que->tq, &que->task); return; @@ -1163,7 +1189,7 @@ i40e_handle_que(void *context, int pending) } /* Reenable this interrupt - hmmm */ - i40e_enable_queue(hw, que->me); + ixl_enable_queue(hw, que->me); return; } @@ -1174,12 +1200,12 @@ i40e_handle_que(void *context, int pending) * **********************************************************************/ void -i40e_intr(void *arg) +ixl_intr(void *arg) { - struct i40e_pf *pf = arg; + struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; struct ifnet *ifp = vsi->ifp; struct tx_ring *txr = &que->txr; u32 reg, icr0, mask; @@ -1204,13 +1230,13 @@ i40e_intr(void *arg) return; } - more_rx = i40e_rxeof(que, I40E_RX_LIMIT); + more_rx = ixl_rxeof(que, IXL_RX_LIMIT); - I40E_TX_LOCK(txr); - more_tx = i40e_txeof(que); + IXL_TX_LOCK(txr); + more_tx = ixl_txeof(que); if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; - I40E_TX_UNLOCK(txr); + IXL_TX_UNLOCK(txr); /* re-enable other interrupt causes */ wr32(hw, I40E_PFINT_ICR0_ENA, mask); @@ -1225,7 +1251,7 @@ i40e_intr(void *arg) reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK; wr32(hw, I40E_QINT_TQCTL(0), reg); - i40e_enable_legacy(hw); + ixl_enable_legacy(hw); return; } @@ -1237,10 +1263,10 @@ i40e_intr(void *arg) * **********************************************************************/ void -i40e_msix_que(void *arg) +ixl_msix_que(void *arg) { - struct i40e_queue *que = arg; - struct i40e_vsi *vsi = que->vsi; + struct ixl_queue *que = arg; + struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; bool more_tx, more_rx; @@ -1251,10 +1277,10 @@ i40e_msix_que(void *arg) ++que->irqs; - more_rx = i40e_rxeof(que, I40E_RX_LIMIT); + more_rx = ixl_rxeof(que, IXL_RX_LIMIT); - I40E_TX_LOCK(txr); - more_tx = i40e_txeof(que); + IXL_TX_LOCK(txr); + more_tx = ixl_txeof(que); /* ** Make certain that if the stack ** has anything queued the task gets @@ -1262,15 +1288,15 @@ i40e_msix_que(void *arg) */ if (!drbr_empty(vsi->ifp, txr->br)) more_tx = 1; - I40E_TX_UNLOCK(txr); + IXL_TX_UNLOCK(txr); - i40e_set_queue_rx_itr(que); - i40e_set_queue_tx_itr(que); + ixl_set_queue_rx_itr(que); + ixl_set_queue_tx_itr(que); if (more_tx || more_rx) taskqueue_enqueue(que->tq, &que->task); else - i40e_enable_queue(hw, que->me); + ixl_enable_queue(hw, que->me); return; } @@ -1282,9 +1308,9 @@ i40e_msix_que(void *arg) * **********************************************************************/ static void -i40e_msix_adminq(void *arg) +ixl_msix_adminq(void *arg) { - struct i40e_pf *pf = arg; + struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; u32 reg, mask; @@ -1298,7 +1324,7 @@ i40e_msix_adminq(void *arg) mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { - i40e_handle_mdd_event(pf); + ixl_handle_mdd_event(pf); mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; } @@ -1322,22 +1348,22 @@ i40e_msix_adminq(void *arg) * **********************************************************************/ static void -i40e_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) +ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { - struct i40e_vsi *vsi = ifp->if_softc; - struct i40e_pf *pf = (struct i40e_pf *)vsi->back; + struct ixl_vsi *vsi = ifp->if_softc; + struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = &pf->hw; - INIT_DEBUGOUT("i40e_media_status: begin"); - I40E_PF_LOCK(pf); + INIT_DEBUGOUT("ixl_media_status: begin"); + IXL_PF_LOCK(pf); - i40e_update_link_status(pf); + ixl_update_link_status(pf); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!vsi->link_up) { - I40E_PF_UNLOCK(pf); + IXL_PF_UNLOCK(pf); return; } @@ -1371,6 +1397,9 @@ i40e_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) case I40E_PHY_TYPE_10GBASE_LR: ifmr->ifm_active |= IFM_10G_LR; break; + case I40E_PHY_TYPE_10GBASE_T: + ifmr->ifm_active |= IFM_10G_T; + break; /* 40 G */ case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: @@ -1392,7 +1421,7 @@ i40e_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; - I40E_PF_UNLOCK(pf); + IXL_PF_UNLOCK(pf); return; } @@ -1406,12 +1435,12 @@ i40e_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) * **********************************************************************/ static int -i40e_media_change(struct ifnet * ifp) +ixl_media_change(struct ifnet * ifp) { - struct i40e_vsi *vsi = ifp->if_softc; + struct ixl_vsi *vsi = ifp->if_softc; struct ifmedia *ifm = &vsi->media; - INIT_DEBUGOUT("i40e_media_change: begin"); + INIT_DEBUGOUT("ixl_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); @@ -1422,7 +1451,7 @@ i40e_media_change(struct ifnet * ifp) } -#ifdef I40E_FDIR +#ifdef IXL_FDIR /* ** ATR: Application Targetted Receive - creates a filter ** based on TX flow info that will keep the receive @@ -1430,16 +1459,16 @@ i40e_media_change(struct ifnet * ifp) ** implementation this is only available for TCP connections */ void -i40e_atr(struct i40e_queue *que, struct tcphdr *th, int etype) +ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct tx_ring *txr = &que->txr; struct i40e_filter_program_desc *FDIR; u32 ptype, dtype; int idx; /* check if ATR is enabled and sample rate */ - if ((!i40e_enable_fdir) || (!txr->atr_rate)) + if ((!ixl_enable_fdir) || (!txr->atr_rate)) return; /* ** We sample all TCP SYN/FIN packets, @@ -1496,7 +1525,7 @@ i40e_atr(struct i40e_queue *que, struct tcphdr *th, int etype) static void -i40e_set_promisc(struct i40e_vsi *vsi) +ixl_set_promisc(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; @@ -1537,14 +1566,14 @@ i40e_set_promisc(struct i40e_vsi *vsi) * *********************************************************************/ static void -i40e_add_multi(struct i40e_vsi *vsi) +ixl_add_multi(struct ixl_vsi *vsi) { struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int mcnt = 0, flags; - IOCTL_DEBUGOUT("i40e_add_multi: begin"); + IOCTL_DEBUGOUT("ixl_add_multi: begin"); if_maddr_rlock(ifp); /* @@ -1560,7 +1589,7 @@ i40e_add_multi(struct i40e_vsi *vsi) if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete existing MC filters */ - i40e_del_hw_filters(vsi, mcnt); + ixl_del_hw_filters(vsi, mcnt); i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; @@ -1571,35 +1600,35 @@ i40e_add_multi(struct i40e_vsi *vsi) TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; - i40e_add_mc_filter(vsi, + ixl_add_mc_filter(vsi, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); mcnt++; } if_maddr_runlock(ifp); if (mcnt > 0) { - flags = (I40E_FILTER_ADD | I40E_FILTER_USED | I40E_FILTER_MC); - i40e_add_hw_filters(vsi, flags, mcnt); + flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); + ixl_add_hw_filters(vsi, flags, mcnt); } - IOCTL_DEBUGOUT("i40e_add_multi: end"); + IOCTL_DEBUGOUT("ixl_add_multi: end"); return; } static void -i40e_del_multi(struct i40e_vsi *vsi) +ixl_del_multi(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct ifmultiaddr *ifma; - struct i40e_mac_filter *f; + struct ixl_mac_filter *f; int mcnt = 0; bool match = FALSE; - IOCTL_DEBUGOUT("i40e_del_multi: begin"); + IOCTL_DEBUGOUT("ixl_del_multi: begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, &vsi->ftl, next) { - if ((f->flags & I40E_FILTER_USED) && (f->flags & I40E_FILTER_MC)) { + if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { match = FALSE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) @@ -1611,7 +1640,7 @@ i40e_del_multi(struct i40e_vsi *vsi) } } if (match == FALSE) { - f->flags |= I40E_FILTER_DEL; + f->flags |= IXL_FILTER_DEL; mcnt++; } } @@ -1619,7 +1648,7 @@ i40e_del_multi(struct i40e_vsi *vsi) if_maddr_runlock(ifp); if (mcnt > 0) - i40e_del_hw_filters(vsi, mcnt); + ixl_del_hw_filters(vsi, mcnt); } @@ -1632,12 +1661,12 @@ i40e_del_multi(struct i40e_vsi *vsi) **********************************************************************/ static void -i40e_local_timer(void *arg) +ixl_local_timer(void *arg) { - struct i40e_pf *pf = arg; + struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; device_t dev = pf->dev; int hung = 0; u32 mask; @@ -1648,7 +1677,7 @@ i40e_local_timer(void *arg) taskqueue_enqueue(pf->tq, &pf->adminq); /* Update stats */ - i40e_update_stats_counters(pf); + ixl_update_stats_counters(pf); /* ** Check status of the queues @@ -1665,7 +1694,7 @@ i40e_local_timer(void *arg) ** are uncleaned descriptors it increments busy. If ** we get to 5 we declare it hung. */ - if (que->busy == I40E_QUEUE_HUNG) { + if (que->busy == IXL_QUEUE_HUNG) { ++hung; /* Mark the queue as inactive */ vsi->active_queues &= ~((u64)1 << que->me); @@ -1675,10 +1704,10 @@ i40e_local_timer(void *arg) if ((vsi->active_queues & ((u64)1 << que->me)) == 0) vsi->active_queues |= ((u64)1 << que->me); } - if (que->busy >= I40E_MAX_TX_BUSY) { + if (que->busy >= IXL_MAX_TX_BUSY) { device_printf(dev,"Warning queue %d " "appears to be hung!\n", i); - que->busy = I40E_QUEUE_HUNG; + que->busy = IXL_QUEUE_HUNG; ++hung; } } @@ -1686,12 +1715,12 @@ i40e_local_timer(void *arg) if (hung == vsi->num_queues) goto hung; - callout_reset(&pf->timer, hz, i40e_local_timer, pf); + callout_reset(&pf->timer, hz, ixl_local_timer, pf); return; hung: device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n"); - i40e_init_locked(pf); + ixl_init_locked(pf); } /* @@ -1700,9 +1729,9 @@ i40e_local_timer(void *arg) ** a link interrupt. */ static void -i40e_update_link_status(struct i40e_pf *pf) +ixl_update_link_status(struct ixl_pf *pf) { - struct i40e_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = vsi->ifp; device_t dev = pf->dev; @@ -1717,7 +1746,7 @@ i40e_update_link_status(struct i40e_pf *pf) device_printf(dev,"Link is up %d Gbps %s," " Flow Control: %s\n", ((vsi->link_speed == I40E_LINK_SPEED_40GB)? 40:10), - "Full Duplex", i40e_fc_string[fc]); + "Full Duplex", ixl_fc_string[fc]); } vsi->link_active = TRUE; if_link_state_change(ifp, LINK_STATE_UP); @@ -1742,16 +1771,16 @@ i40e_update_link_status(struct i40e_pf *pf) **********************************************************************/ static void -i40e_stop(struct i40e_pf *pf) +ixl_stop(struct ixl_pf *pf) { - struct i40e_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; mtx_assert(&pf->pf_mtx, MA_OWNED); - INIT_DEBUGOUT("i40e_stop: begin\n"); - i40e_disable_intr(vsi); - i40e_disable_rings(vsi); + INIT_DEBUGOUT("ixl_stop: begin\n"); + ixl_disable_intr(vsi); + ixl_disable_rings(vsi); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); @@ -1769,11 +1798,11 @@ i40e_stop(struct i40e_pf *pf) * **********************************************************************/ static int -i40e_assign_vsi_legacy(struct i40e_pf *pf) +ixl_assign_vsi_legacy(struct ixl_pf *pf) { device_t dev = pf->dev; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; int error, rid = 0; if (pf->msix == 1) @@ -1789,21 +1818,21 @@ i40e_assign_vsi_legacy(struct i40e_pf *pf) /* Set the handler function */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, - i40e_intr, pf, &pf->tag); + ixl_intr, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "Failed to register legacy/msi handler"); return (error); } bus_describe_intr(dev, pf->res, pf->tag, "irq0"); - TASK_INIT(&que->tx_task, 0, i40e_deferred_mq_start, que); - TASK_INIT(&que->task, 0, i40e_handle_que, que); - que->tq = taskqueue_create_fast("i40e_que", M_NOWAIT, + TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); + TASK_INIT(&que->task, 0, ixl_handle_que, que); + que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(dev)); - TASK_INIT(&pf->adminq, 0, i40e_do_adminq, pf); - pf->tq = taskqueue_create_fast("i40e_adm", M_NOWAIT, + TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); + pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", device_get_nameunit(dev)); @@ -1818,11 +1847,11 @@ i40e_assign_vsi_legacy(struct i40e_pf *pf) * **********************************************************************/ static int -i40e_assign_vsi_msix(struct i40e_pf *pf) +ixl_assign_vsi_msix(struct ixl_pf *pf) { device_t dev = pf->dev; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; struct tx_ring *txr; int error, rid, vector = 0; @@ -1838,7 +1867,7 @@ i40e_assign_vsi_msix(struct i40e_pf *pf) /* Set the adminq vector and handler */ error = bus_setup_intr(dev, pf->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, - i40e_msix_adminq, pf, &pf->tag); + ixl_msix_adminq, pf, &pf->tag); if (error) { pf->res = NULL; device_printf(dev, "Failed to register Admin que handler"); @@ -1847,8 +1876,8 @@ i40e_assign_vsi_msix(struct i40e_pf *pf) bus_describe_intr(dev, pf->res, pf->tag, "aq"); pf->admvec = vector; /* Tasklet for Admin Queue */ - TASK_INIT(&pf->adminq, 0, i40e_do_adminq, pf); - pf->tq = taskqueue_create_fast("i40e_adm", M_NOWAIT, + TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); + pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, taskqueue_thread_enqueue, &pf->tq); taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", device_get_nameunit(pf->dev)); @@ -1868,7 +1897,7 @@ i40e_assign_vsi_msix(struct i40e_pf *pf) /* Set the handler function */ error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, - i40e_msix_que, que, &que->tag); + ixl_msix_que, que, &que->tag); if (error) { que->res = NULL; device_printf(dev, "Failed to register que handler"); @@ -1878,9 +1907,9 @@ i40e_assign_vsi_msix(struct i40e_pf *pf) /* Bind the vector to a CPU */ bus_bind_intr(dev, que->res, i); que->msix = vector; - TASK_INIT(&que->tx_task, 0, i40e_deferred_mq_start, que); - TASK_INIT(&que->task, 0, i40e_handle_que, que); - que->tq = taskqueue_create_fast("i40e_que", M_NOWAIT, + TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); + TASK_INIT(&que->task, 0, ixl_handle_que, que); + que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(pf->dev)); @@ -1894,13 +1923,13 @@ i40e_assign_vsi_msix(struct i40e_pf *pf) * Allocate MSI/X vectors */ static int -i40e_init_msix(struct i40e_pf *pf) +ixl_init_msix(struct ixl_pf *pf) { device_t dev = pf->dev; int rid, want, vectors, queues, available; /* Override by tuneable */ - if (i40e_enable_msix == 0) + if (ixl_enable_msix == 0) goto msi; /* @@ -1925,7 +1954,7 @@ i40e_init_msix(struct i40e_pf *pf) } /* First try MSI/X */ - rid = PCIR_BAR(I40E_BAR); + rid = PCIR_BAR(IXL_BAR); pf->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!pf->msix_mem) { @@ -1947,8 +1976,8 @@ i40e_init_msix(struct i40e_pf *pf) queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus; /* Override with hardcoded value if sane */ - if ((i40e_max_queues != 0) && (i40e_max_queues <= queues)) - queues = i40e_max_queues; + if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) + queues = ixl_max_queues; /* ** Want one vector (RX/TX pair) per queue @@ -1976,8 +2005,8 @@ i40e_init_msix(struct i40e_pf *pf) vectors = pci_msi_count(dev); pf->vsi.num_queues = 1; pf->msix = 1; - i40e_max_queues = 1; - i40e_enable_msix = 0; + ixl_max_queues = 1; + ixl_enable_msix = 0; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) device_printf(pf->dev,"Using an MSI interrupt\n"); else { @@ -1992,10 +2021,10 @@ i40e_init_msix(struct i40e_pf *pf) * Plumb MSI/X vectors */ static void -i40e_configure_msix(struct i40e_pf *pf) +ixl_configure_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->vsi; u32 reg; u16 vector = 1; @@ -2013,7 +2042,7 @@ i40e_configure_msix(struct i40e_pf *pf) wr32(hw, I40E_PFINT_ICR0_ENA, reg); wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); - wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), 0x003E); + wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E); wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | @@ -2027,19 +2056,19 @@ i40e_configure_msix(struct i40e_pf *pf) wr32(hw, I40E_PFINT_LNKLSTN(i), i); reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | - (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | + (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(i), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | - (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | + (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); if (i == (vsi->num_queues - 1)) - reg |= (I40E_QUEUE_EOL + reg |= (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(i), reg); } @@ -2049,7 +2078,7 @@ i40e_configure_msix(struct i40e_pf *pf) * Configure for MSI single vector operation */ static void -i40e_configure_legacy(struct i40e_pf *pf) +ixl_configure_legacy(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; @@ -2085,13 +2114,13 @@ i40e_configure_legacy(struct i40e_pf *pf) /* Associate the queue pair to the vector and enable the q int */ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK - | (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) + | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(0), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK - | (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) - | (I40E_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); + | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) + | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(0), reg); /* Next enable the queue pair */ @@ -2109,37 +2138,37 @@ i40e_configure_legacy(struct i40e_pf *pf) * Set the Initial ITR state */ static void -i40e_configure_itr(struct i40e_pf *pf) +ixl_configure_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; - vsi->rx_itr_setting = i40e_rx_itr; - if (i40e_dynamic_rx_itr) - vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; - vsi->tx_itr_setting = i40e_tx_itr; - if (i40e_dynamic_tx_itr) - vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; + vsi->rx_itr_setting = ixl_rx_itr; + if (ixl_dynamic_rx_itr) + vsi->rx_itr_setting |= IXL_ITR_DYNAMIC; + vsi->tx_itr_setting = ixl_tx_itr; + if (ixl_dynamic_tx_itr) + vsi->tx_itr_setting |= IXL_ITR_DYNAMIC; for (int i = 0; i < vsi->num_queues; i++, que++) { struct tx_ring *txr = &que->txr; struct rx_ring *rxr = &que->rxr; - wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, i), + wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; - rxr->latency = I40E_AVE_LATENCY; - wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, i), + rxr->latency = IXL_AVE_LATENCY; + wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; - txr->latency = I40E_AVE_LATENCY; + txr->latency = IXL_AVE_LATENCY; } } static int -i40e_allocate_pci_resources(struct i40e_pf *pf) +ixl_allocate_pci_resources(struct ixl_pf *pf) { int rid; device_t dev = pf->dev; @@ -2157,6 +2186,7 @@ i40e_allocate_pci_resources(struct i40e_pf *pf) rman_get_bustag(pf->pci_mem); pf->osdep.mem_bus_space_handle = rman_get_bushandle(pf->pci_mem); + pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; pf->hw.back = &pf->osdep; @@ -2166,22 +2196,22 @@ i40e_allocate_pci_resources(struct i40e_pf *pf) ** return us the number of supported ** vectors. (Will be 1 for MSI) */ - pf->msix = i40e_init_msix(pf); + pf->msix = ixl_init_msix(pf); return (0); } static void -i40e_free_pci_resources(struct i40e_pf * pf) +ixl_free_pci_resources(struct ixl_pf * pf) { - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; device_t dev = pf->dev; int rid, memrid; - memrid = PCIR_BAR(I40E_BAR); + memrid = PCIR_BAR(IXL_BAR); /* We may get here before stations are setup */ - if ((!i40e_enable_msix) || (que == NULL)) + if ((!ixl_enable_msix) || (que == NULL)) goto early; /* @@ -2232,15 +2262,15 @@ i40e_free_pci_resources(struct i40e_pf * pf) * **********************************************************************/ static int -i40e_setup_interface(device_t dev, struct i40e_vsi *vsi) +ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) { struct ifnet *ifp; struct i40e_hw *hw = vsi->hw; - struct i40e_queue *que = vsi->queues; + struct ixl_queue *que = vsi->queues; struct i40e_aq_get_phy_abilities_resp abilities_resp; enum i40e_status_code aq_error = 0; - INIT_DEBUGOUT("i40e_setup_interface: begin"); + INIT_DEBUGOUT("ixl_setup_interface: begin"); ifp = vsi->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { @@ -2250,14 +2280,14 @@ i40e_setup_interface(device_t dev, struct i40e_vsi *vsi) if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 4000000000; // ?? - ifp->if_init = i40e_init; + ifp->if_init = ixl_init; ifp->if_softc = vsi; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_ioctl = i40e_ioctl; + ifp->if_ioctl = ixl_ioctl; - ifp->if_transmit = i40e_mq_start; + ifp->if_transmit = ixl_mq_start; - ifp->if_qflush = i40e_qflush; + ifp->if_qflush = ixl_qflush; ifp->if_snd.ifq_maxlen = que->num_desc - 2; @@ -2290,7 +2320,7 @@ i40e_setup_interface(device_t dev, struct i40e_vsi *vsi) ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If - ** using vlans directly on the i40e driver you can + ** using vlans directly on the ixl driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; @@ -2299,8 +2329,8 @@ i40e_setup_interface(device_t dev, struct i40e_vsi *vsi) * Specify the media types supported by this adapter and register * callbacks to update media and link information */ - ifmedia_init(&vsi->media, IFM_IMASK, i40e_media_change, - i40e_media_status); + ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, + ixl_media_status); aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities_resp, NULL); if (aq_error) { @@ -2322,6 +2352,8 @@ i40e_setup_interface(device_t dev, struct i40e_vsi *vsi) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR)) ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); + if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_10GBASE_T)) + ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); if (abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) || abilities_resp.phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4)) @@ -2339,13 +2371,13 @@ i40e_setup_interface(device_t dev, struct i40e_vsi *vsi) } static bool -i40e_config_link(struct i40e_hw *hw) +ixl_config_link(struct i40e_hw *hw) { bool check; i40e_aq_get_link_info(hw, TRUE, NULL, NULL); check = i40e_get_link_status(hw); -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG printf("Link is %s\n", check ? "up":"down"); #endif return (check); @@ -2357,7 +2389,7 @@ i40e_config_link(struct i40e_hw *hw) * **********************************************************************/ static int -i40e_setup_vsi(struct i40e_vsi *vsi) +ixl_setup_vsi(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; device_t dev = vsi->dev; @@ -2374,7 +2406,7 @@ i40e_setup_vsi(struct i40e_vsi *vsi) device_printf(dev,"aq_get_switch_config failed!!\n"); return (ret); } -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG printf("Switch config: header reported: %d in structure, %d total\n", sw_config->header.num_reported, sw_config->header.num_total); printf("type=%d seid=%d uplink=%d downlink=%d\n", @@ -2394,7 +2426,7 @@ i40e_setup_vsi(struct i40e_vsi *vsi) device_printf(dev,"get vsi params failed %x!!\n", ret); return (ret); } -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, " "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid, @@ -2425,7 +2457,7 @@ i40e_setup_vsi(struct i40e_vsi *vsi) memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); /* Reset VSI statistics */ - i40e_vsi_reset_stats(vsi); + ixl_vsi_reset_stats(vsi); vsi->hw_filters_add = 0; vsi->hw_filters_del = 0; @@ -2445,9 +2477,9 @@ i40e_setup_vsi(struct i40e_vsi *vsi) * **********************************************************************/ static int -i40e_initialize_vsi(struct i40e_vsi *vsi) +ixl_initialize_vsi(struct ixl_vsi *vsi) { - struct i40e_queue *que = vsi->queues; + struct ixl_queue *que = vsi->queues; device_t dev = vsi->dev; struct i40e_hw *hw = vsi->hw; int err = 0; @@ -2490,10 +2522,10 @@ i40e_initialize_vsi(struct i40e_vsi *vsi) txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(i), txctl); - i40e_flush(hw); + ixl_flush(hw); /* Do ring (re)init */ - i40e_init_tx_ring(que); + ixl_init_tx_ring(que); /* Next setup the HMC RX Context */ if (vsi->max_frame_size <= 2048) @@ -2520,6 +2552,12 @@ i40e_initialize_vsi(struct i40e_vsi *vsi) rctx.tphdata_ena = 0; rctx.tphhead_ena = 0; rctx.lrxqthresh = 2; +#ifdef DEV_NETMAP + /* "CRC strip in netmap is conditional" */ + if (vsi->ifp->if_capenable & IFCAP_NETMAP && !ixl_crcstrip) + rctx.crcstrip = 0; + else +#endif /* DEV_NETMAP */ rctx.crcstrip = 1; rctx.l2tsel = 1; rctx.showiv = 1; @@ -2537,12 +2575,27 @@ i40e_initialize_vsi(struct i40e_vsi *vsi) device_printf(dev, "Unable to set RX context %d\n", i); break; } - err = i40e_init_rx_ring(que); + err = ixl_init_rx_ring(que); if (err) { device_printf(dev, "Fail in init_rx_ring %d\n", i); break; } wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0); +#ifdef DEV_NETMAP + /* TODO appropriately comment + * Code based on netmap code in ixgbe_init_locked() + * Messes with what the software sets as queue + * descriptor tail in hardware. + */ + if (vsi->ifp->if_capenable & IFCAP_NETMAP) + { + struct netmap_adapter *na = NA(vsi->ifp); + struct netmap_kring *kring = &na->rx_rings[que->me]; + int t = na->num_rx_desc - 1 - kring->nr_hwavail; + + wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); + } else +#endif /* DEV_NETMAP */ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1); } return (err); @@ -2555,11 +2608,11 @@ i40e_initialize_vsi(struct i40e_vsi *vsi) * **********************************************************************/ void -i40e_free_vsi(struct i40e_vsi *vsi) +ixl_free_vsi(struct ixl_vsi *vsi) { - struct i40e_pf *pf = (struct i40e_pf *)vsi->back; - struct i40e_queue *que = vsi->queues; - struct i40e_mac_filter *f; + struct ixl_pf *pf = (struct ixl_pf *)vsi->back; + struct ixl_queue *que = vsi->queues; + struct ixl_mac_filter *f; /* Free station queues */ for (int i = 0; i < vsi->num_queues; i++, que++) { @@ -2568,21 +2621,21 @@ i40e_free_vsi(struct i40e_vsi *vsi) if (!mtx_initialized(&txr->mtx)) /* uninitialized */ continue; - I40E_TX_LOCK(txr); - i40e_free_que_tx(que); + IXL_TX_LOCK(txr); + ixl_free_que_tx(que); if (txr->base) i40e_free_dma(&pf->hw, &txr->dma); - I40E_TX_UNLOCK(txr); - I40E_TX_LOCK_DESTROY(txr); + IXL_TX_UNLOCK(txr); + IXL_TX_LOCK_DESTROY(txr); if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ continue; - I40E_RX_LOCK(rxr); - i40e_free_que_rx(que); + IXL_RX_LOCK(rxr); + ixl_free_que_rx(que); if (rxr->base) i40e_free_dma(&pf->hw, &rxr->dma); - I40E_RX_UNLOCK(rxr); - I40E_RX_LOCK_DESTROY(rxr); + IXL_RX_UNLOCK(rxr); + IXL_RX_LOCK_DESTROY(rxr); } free(vsi->queues, M_DEVBUF); @@ -2604,11 +2657,11 @@ i40e_free_vsi(struct i40e_vsi *vsi) * **********************************************************************/ static int -i40e_setup_stations(struct i40e_pf *pf) +ixl_setup_stations(struct ixl_pf *pf) { device_t dev = pf->dev; - struct i40e_vsi *vsi; - struct i40e_queue *que; + struct ixl_vsi *vsi; + struct ixl_queue *que; struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize; @@ -2622,7 +2675,7 @@ i40e_setup_stations(struct i40e_pf *pf) /* Get memory for the station queues */ if (!(vsi->queues = - (struct i40e_queue *) malloc(sizeof(struct i40e_queue) * + (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; @@ -2631,7 +2684,7 @@ i40e_setup_stations(struct i40e_pf *pf) for (int i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; - que->num_desc = i40e_ringsz; + que->num_desc = ixl_ringsz; que->me = i; que->vsi = vsi; /* mark the queue as active */ @@ -2658,7 +2711,7 @@ i40e_setup_stations(struct i40e_pf *pf) txr->base = (struct i40e_tx_desc *)txr->dma.va; bzero((void *)txr->base, tsize); /* Now allocate transmit soft structs for the ring */ - if (i40e_allocate_tx_data(que)) { + if (ixl_allocate_tx_data(que)) { device_printf(dev, "Critical Failure setting up TX structures\n"); error = ENOMEM; @@ -2699,7 +2752,7 @@ i40e_setup_stations(struct i40e_pf *pf) bzero((void *)rxr->base, rsize); /* Allocate receive soft structs for the ring*/ - if (i40e_allocate_rx_data(que)) { + if (ixl_allocate_rx_data(que)) { device_printf(dev, "Critical Failure setting up receive structs\n"); error = ENOMEM; @@ -2729,9 +2782,9 @@ i40e_setup_stations(struct i40e_pf *pf) ** interrupt moderation value. */ static void -i40e_set_queue_rx_itr(struct i40e_queue *que) +ixl_set_queue_rx_itr(struct ixl_queue *que) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; @@ -2743,31 +2796,31 @@ i40e_set_queue_rx_itr(struct i40e_queue *que) if (rxr->bytes == 0) return; - if (i40e_dynamic_rx_itr) { + if (ixl_dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { - case I40E_LOW_LATENCY: + case IXL_LOW_LATENCY: if (rx_bytes > 10) { - rx_latency = I40E_AVE_LATENCY; - rx_itr = I40E_ITR_20K; + rx_latency = IXL_AVE_LATENCY; + rx_itr = IXL_ITR_20K; } break; - case I40E_AVE_LATENCY: + case IXL_AVE_LATENCY: if (rx_bytes > 20) { - rx_latency = I40E_BULK_LATENCY; - rx_itr = I40E_ITR_8K; + rx_latency = IXL_BULK_LATENCY; + rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { - rx_latency = I40E_LOW_LATENCY; - rx_itr = I40E_ITR_100K; + rx_latency = IXL_LOW_LATENCY; + rx_itr = IXL_ITR_100K; } break; - case I40E_BULK_LATENCY: + case IXL_BULK_LATENCY: if (rx_bytes <= 20) { - rx_latency = I40E_AVE_LATENCY; - rx_itr = I40E_ITR_20K; + rx_latency = IXL_AVE_LATENCY; + rx_itr = IXL_ITR_20K; } break; } @@ -2778,17 +2831,17 @@ i40e_set_queue_rx_itr(struct i40e_queue *que) /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); - rxr->itr = rx_itr & I40E_MAX_ITR; - wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, + rxr->itr = rx_itr & IXL_MAX_ITR; + wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ - if (vsi->rx_itr_setting & I40E_ITR_DYNAMIC) - vsi->rx_itr_setting = i40e_rx_itr; + if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) + vsi->rx_itr_setting = ixl_rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; - wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, + wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, que->me), rxr->itr); } } @@ -2803,9 +2856,9 @@ i40e_set_queue_rx_itr(struct i40e_queue *que) ** interrupt moderation value. */ static void -i40e_set_queue_tx_itr(struct i40e_queue *que) +ixl_set_queue_tx_itr(struct ixl_queue *que) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; @@ -2817,30 +2870,30 @@ i40e_set_queue_tx_itr(struct i40e_queue *que) if (txr->bytes == 0) return; - if (i40e_dynamic_tx_itr) { + if (ixl_dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { - case I40E_LOW_LATENCY: + case IXL_LOW_LATENCY: if (tx_bytes > 10) { - tx_latency = I40E_AVE_LATENCY; - tx_itr = I40E_ITR_20K; + tx_latency = IXL_AVE_LATENCY; + tx_itr = IXL_ITR_20K; } break; - case I40E_AVE_LATENCY: + case IXL_AVE_LATENCY: if (tx_bytes > 20) { - tx_latency = I40E_BULK_LATENCY; - tx_itr = I40E_ITR_8K; + tx_latency = IXL_BULK_LATENCY; + tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { - tx_latency = I40E_LOW_LATENCY; - tx_itr = I40E_ITR_100K; + tx_latency = IXL_LOW_LATENCY; + tx_itr = IXL_ITR_100K; } break; - case I40E_BULK_LATENCY: + case IXL_BULK_LATENCY: if (tx_bytes <= 20) { - tx_latency = I40E_AVE_LATENCY; - tx_itr = I40E_ITR_20K; + tx_latency = IXL_AVE_LATENCY; + tx_itr = IXL_ITR_20K; } break; } @@ -2851,18 +2904,18 @@ i40e_set_queue_tx_itr(struct i40e_queue *que) /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); - txr->itr = tx_itr & I40E_MAX_ITR; - wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, + txr->itr = tx_itr & IXL_MAX_ITR; + wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ - if (vsi->tx_itr_setting & I40E_ITR_DYNAMIC) - vsi->tx_itr_setting = i40e_tx_itr; + if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) + vsi->tx_itr_setting = ixl_tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; - wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, + wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, que->me), txr->itr); } } @@ -2873,11 +2926,11 @@ i40e_set_queue_tx_itr(struct i40e_queue *que) static void -i40e_add_hw_stats(struct i40e_pf *pf) +ixl_add_hw_stats(struct ixl_pf *pf) { device_t dev = pf->dev; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *queues = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *queues = vsi->queues; struct i40e_eth_stats *vsi_stats = &vsi->eth_stats; struct i40e_hw_port_stats *pf_stats = &pf->stats; @@ -2909,7 +2962,7 @@ i40e_add_hw_stats(struct i40e_pf *pf) CTLFLAG_RD, NULL, "VSI-specific stats"); vsi_list = SYSCTL_CHILDREN(vsi_node); - i40e_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats); + ixl_add_sysctls_eth_stats(ctx, vsi_list, vsi_stats); /* Queue statistics */ for (int q = 0; q < vsi->num_queues; q++) { @@ -2943,7 +2996,7 @@ i40e_add_hw_stats(struct i40e_pf *pf) CTLFLAG_RD, &(txr->total_packets), "Queue Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", - CTLFLAG_RD, &(txr->bytes), + CTLFLAG_RD, &(txr->tx_bytes), "Queue Bytes Transmitted"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), @@ -2954,15 +3007,15 @@ i40e_add_hw_stats(struct i40e_pf *pf) } /* MAC stats */ - i40e_add_sysctls_mac_stats(ctx, child, pf_stats); + ixl_add_sysctls_mac_stats(ctx, child, pf_stats); } static void -i40e_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, +ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_eth_stats *eth_stats) { - struct i40e_sysctl_info ctls[] = + struct ixl_sysctl_info ctls[] = { {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, {ð_stats->rx_unicast, "ucast_pkts_rcvd", @@ -2983,7 +3036,7 @@ i40e_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, {0,0,0} }; - struct i40e_sysctl_info *entry = ctls; + struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, @@ -2994,7 +3047,7 @@ i40e_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, } static void -i40e_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, +ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_hw_port_stats *stats) { @@ -3003,9 +3056,9 @@ i40e_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); struct i40e_eth_stats *eth_stats = &stats->eth; - i40e_add_sysctls_eth_stats(ctx, stat_list, eth_stats); + ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); - struct i40e_sysctl_info ctls[] = + struct ixl_sysctl_info ctls[] = { {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, @@ -3042,7 +3095,7 @@ i40e_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, {0,0,0} }; - struct i40e_sysctl_info *entry = ctls; + struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, @@ -3053,12 +3106,12 @@ i40e_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, } /* -** i40e_config_rss - setup RSS +** ixl_config_rss - setup RSS ** - note this is done for the single vsi */ -static void i40e_config_rss(struct i40e_vsi *vsi) +static void ixl_config_rss(struct ixl_vsi *vsi) { - struct i40e_pf *pf = (struct i40e_pf *)vsi->back; + struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; u32 lut = 0; u64 set_hena, hena; @@ -3104,7 +3157,7 @@ static void i40e_config_rss(struct i40e_vsi *vsi) if ((i & 3) == 3) wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); } - i40e_flush(hw); + ixl_flush(hw); } @@ -3116,11 +3169,11 @@ static void i40e_config_rss(struct i40e_vsi *vsi) ** repopulate the real table. */ static void -i40e_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) +ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct i40e_vsi *vsi = ifp->if_softc; + struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; - struct i40e_pf *pf = (struct i40e_pf *)vsi->back; + struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) /* Not our event */ return; @@ -3128,10 +3181,10 @@ i40e_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; - I40E_PF_LOCK(pf); + IXL_PF_LOCK(pf); ++vsi->num_vlans; - i40e_add_filter(vsi, hw->mac.addr, vtag); - I40E_PF_UNLOCK(pf); + ixl_add_filter(vsi, hw->mac.addr, vtag); + IXL_PF_UNLOCK(pf); } /* @@ -3140,11 +3193,11 @@ i40e_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) ** in the soft vfta. */ static void -i40e_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) +ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { - struct i40e_vsi *vsi = ifp->if_softc; + struct ixl_vsi *vsi = ifp->if_softc; struct i40e_hw *hw = vsi->hw; - struct i40e_pf *pf = (struct i40e_pf *)vsi->back; + struct ixl_pf *pf = (struct ixl_pf *)vsi->back; if (ifp->if_softc != arg) return; @@ -3152,10 +3205,10 @@ i40e_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; - I40E_PF_LOCK(pf); + IXL_PF_LOCK(pf); --vsi->num_vlans; - i40e_del_filter(vsi, hw->mac.addr, vtag); - I40E_PF_UNLOCK(pf); + ixl_del_filter(vsi, hw->mac.addr, vtag); + IXL_PF_UNLOCK(pf); } /* @@ -3164,9 +3217,9 @@ i40e_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) ** after a soft reset. */ static void -i40e_setup_vlan_filters(struct i40e_vsi *vsi) +ixl_setup_vlan_filters(struct ixl_vsi *vsi) { - struct i40e_mac_filter *f; + struct ixl_mac_filter *f; int cnt = 0, flags; if (vsi->num_vlans == 0) @@ -3177,10 +3230,10 @@ i40e_setup_vlan_filters(struct i40e_vsi *vsi) ** for the AQ update. */ SLIST_FOREACH(f, &vsi->ftl, next) { - if (f->flags & I40E_FILTER_VLAN) { + if (f->flags & IXL_FILTER_VLAN) { f->flags |= - (I40E_FILTER_ADD | - I40E_FILTER_USED); + (IXL_FILTER_ADD | + IXL_FILTER_USED); cnt++; } } @@ -3188,9 +3241,9 @@ i40e_setup_vlan_filters(struct i40e_vsi *vsi) printf("setup vlan: no filters found!\n"); return; } - flags = I40E_FILTER_VLAN; - flags |= (I40E_FILTER_ADD | I40E_FILTER_USED); - i40e_add_hw_filters(vsi, flags, cnt); + flags = IXL_FILTER_VLAN; + flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); + ixl_add_hw_filters(vsi, flags, cnt); return; } @@ -3199,35 +3252,35 @@ i40e_setup_vlan_filters(struct i40e_vsi *vsi) ** needs to know about. */ static void -i40e_init_filters(struct i40e_vsi *vsi) +ixl_init_filters(struct ixl_vsi *vsi) { /* Add broadcast address */ u8 bc[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - i40e_add_filter(vsi, bc, I40E_VLAN_ANY); + ixl_add_filter(vsi, bc, IXL_VLAN_ANY); } /* ** This routine adds mulicast filters */ static void -i40e_add_mc_filter(struct i40e_vsi *vsi, u8 *macaddr) +ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) { - struct i40e_mac_filter *f; + struct ixl_mac_filter *f; /* Does one already exist */ - f = i40e_find_filter(vsi, macaddr, I40E_VLAN_ANY); + f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) return; - f = i40e_get_filter(vsi); + f = ixl_get_filter(vsi); if (f == NULL) { printf("WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); - f->vlan = I40E_VLAN_ANY; - f->flags |= (I40E_FILTER_ADD | I40E_FILTER_USED - | I40E_FILTER_MC); + f->vlan = IXL_VLAN_ANY; + f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED + | IXL_FILTER_MC); return; } @@ -3236,15 +3289,15 @@ i40e_add_mc_filter(struct i40e_vsi *vsi, u8 *macaddr) ** This routine adds macvlan filters */ static void -i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan) +ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { - struct i40e_mac_filter *f, *tmp; + struct ixl_mac_filter *f, *tmp; device_t dev = vsi->dev; - DEBUGOUT("i40e_add_filter: begin"); + DEBUGOUT("ixl_add_filter: begin"); /* Does one already exist */ - f = i40e_find_filter(vsi, macaddr, vlan); + f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) return; /* @@ -3252,46 +3305,46 @@ i40e_add_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan) ** need to remove the ANY filter that indicates we are ** not in a vlan, and replace that with a 0 filter. */ - if ((vlan != I40E_VLAN_ANY) && (vsi->num_vlans == 1)) { - tmp = i40e_find_filter(vsi, macaddr, I40E_VLAN_ANY); + if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { + tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (tmp != NULL) { - i40e_del_filter(vsi, macaddr, I40E_VLAN_ANY); - i40e_add_filter(vsi, macaddr, 0); + ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); + ixl_add_filter(vsi, macaddr, 0); } } - f = i40e_get_filter(vsi); + f = ixl_get_filter(vsi); if (f == NULL) { device_printf(dev, "WARNING: no filter available!!\n"); return; } bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = vlan; - f->flags |= (I40E_FILTER_ADD | I40E_FILTER_USED); - if (f->vlan != I40E_VLAN_ANY) - f->flags |= I40E_FILTER_VLAN; + f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); + if (f->vlan != IXL_VLAN_ANY) + f->flags |= IXL_FILTER_VLAN; - i40e_add_hw_filters(vsi, f->flags, 1); + ixl_add_hw_filters(vsi, f->flags, 1); return; } static void -i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan) +ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { - struct i40e_mac_filter *f; + struct ixl_mac_filter *f; - f = i40e_find_filter(vsi, macaddr, vlan); + f = ixl_find_filter(vsi, macaddr, vlan); if (f == NULL) return; - f->flags |= I40E_FILTER_DEL; - i40e_del_hw_filters(vsi, 1); + f->flags |= IXL_FILTER_DEL; + ixl_del_hw_filters(vsi, 1); /* Check if this is the last vlan removal */ - if (vlan != I40E_VLAN_ANY && vsi->num_vlans == 0) { + if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { /* Switch back to a non-vlan filter */ - i40e_del_filter(vsi, macaddr, 0); - i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY); + ixl_del_filter(vsi, macaddr, 0); + ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); } return; } @@ -3299,10 +3352,10 @@ i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan) /* ** Find the filter with both matching mac addr and vlan id */ -static struct i40e_mac_filter * -i40e_find_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan) +static struct ixl_mac_filter * +ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) { - struct i40e_mac_filter *f; + struct ixl_mac_filter *f; bool match = FALSE; SLIST_FOREACH(f, &vsi->ftl, next) { @@ -3325,10 +3378,10 @@ i40e_find_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan) ** the filters in the hardware. */ static void -i40e_add_hw_filters(struct i40e_vsi *vsi, int flags, int cnt) +ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) { struct i40e_aqc_add_macvlan_element_data *a, *b; - struct i40e_mac_filter *f; + struct ixl_mac_filter *f; struct i40e_hw *hw = vsi->hw; device_t dev = vsi->dev; int err, j = 0; @@ -3350,9 +3403,9 @@ i40e_add_hw_filters(struct i40e_vsi *vsi, int flags, int cnt) b = &a[j]; // a pox on fvl long names :) bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); b->vlan_tag = - (f->vlan == I40E_VLAN_ANY ? 0 : f->vlan); + (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); b->flags = I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; - f->flags &= ~I40E_FILTER_ADD; + f->flags &= ~IXL_FILTER_ADD; j++; } if (j == cnt) @@ -3376,15 +3429,15 @@ i40e_add_hw_filters(struct i40e_vsi *vsi, int flags, int cnt) ** the filters in the hardware. */ static void -i40e_del_hw_filters(struct i40e_vsi *vsi, int cnt) +ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; struct i40e_hw *hw = vsi->hw; device_t dev = vsi->dev; - struct i40e_mac_filter *f, *f_temp; + struct ixl_mac_filter *f, *f_temp; int err, j = 0; - DEBUGOUT("i40e_del_hw_filters: begin\n"); + DEBUGOUT("ixl_del_hw_filters: begin\n"); d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); @@ -3394,13 +3447,13 @@ i40e_del_hw_filters(struct i40e_vsi *vsi, int cnt) } SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { - if (f->flags & I40E_FILTER_DEL) { + if (f->flags & IXL_FILTER_DEL) { e = &d[j]; // a pox on fvl long names :) bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); - e->vlan_tag = (f->vlan == I40E_VLAN_ANY ? 0 : f->vlan); + e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; /* delete entry from vsi list */ - SLIST_REMOVE(&vsi->ftl, f, i40e_mac_filter, next); + SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); free(f, M_DEVBUF); j++; } @@ -3424,13 +3477,13 @@ i40e_del_hw_filters(struct i40e_vsi *vsi, int cnt) } free(d, M_DEVBUF); - DEBUGOUT("i40e_del_hw_filters: end\n"); + DEBUGOUT("ixl_del_hw_filters: end\n"); return; } static void -i40e_enable_rings(struct i40e_vsi *vsi) +ixl_enable_rings(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; u32 reg; @@ -3469,7 +3522,7 @@ i40e_enable_rings(struct i40e_vsi *vsi) } static void -i40e_disable_rings(struct i40e_vsi *vsi) +ixl_disable_rings(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; u32 reg; @@ -3507,12 +3560,12 @@ i40e_disable_rings(struct i40e_vsi *vsi) } /** - * i40e_handle_mdd_event + * ixl_handle_mdd_event * * Called from interrupt handler to identify possibly malicious vfs * (But also detects events from the PF, as well) **/ -static void i40e_handle_mdd_event(struct i40e_pf *pf) +static void ixl_handle_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; @@ -3575,215 +3628,215 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); - i40e_flush(hw); + ixl_flush(hw); } static void -i40e_enable_intr(struct i40e_vsi *vsi) +ixl_enable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; - struct i40e_queue *que = vsi->queues; + struct ixl_queue *que = vsi->queues; - if (i40e_enable_msix) { - i40e_enable_adminq(hw); + if (ixl_enable_msix) { + ixl_enable_adminq(hw); for (int i = 0; i < vsi->num_queues; i++, que++) - i40e_enable_queue(hw, que->me); + ixl_enable_queue(hw, que->me); } else - i40e_enable_legacy(hw); + ixl_enable_legacy(hw); } static void -i40e_disable_intr(struct i40e_vsi *vsi) +ixl_disable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; - struct i40e_queue *que = vsi->queues; + struct ixl_queue *que = vsi->queues; - if (i40e_enable_msix) { - i40e_disable_adminq(hw); + if (ixl_enable_msix) { + ixl_disable_adminq(hw); for (int i = 0; i < vsi->num_queues; i++, que++) - i40e_disable_queue(hw, que->me); + ixl_disable_queue(hw, que->me); } else - i40e_disable_legacy(hw); + ixl_disable_legacy(hw); } static void -i40e_enable_adminq(struct i40e_hw *hw) +ixl_enable_adminq(struct i40e_hw *hw) { u32 reg; reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | - (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); + (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); - i40e_flush(hw); + ixl_flush(hw); return; } static void -i40e_disable_adminq(struct i40e_hw *hw) +ixl_disable_adminq(struct i40e_hw *hw) { u32 reg; - reg = I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; + reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); return; } static void -i40e_enable_queue(struct i40e_hw *hw, int id) +ixl_enable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } static void -i40e_disable_queue(struct i40e_hw *hw, int id) +ixl_disable_queue(struct i40e_hw *hw, int id) { u32 reg; - reg = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; + reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); return; } static void -i40e_enable_legacy(struct i40e_hw *hw) +ixl_enable_legacy(struct i40e_hw *hw) { u32 reg; reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | - (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); + (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); } static void -i40e_disable_legacy(struct i40e_hw *hw) +ixl_disable_legacy(struct i40e_hw *hw) { u32 reg; - reg = I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; + reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); return; } static void -i40e_update_stats_counters(struct i40e_pf *pf) +ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = vsi->ifp; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; /* Update hw stats */ - i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); - i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); - i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); - i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); - i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); - i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_TDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_discards, &nsd->eth.tx_discards); - i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); - i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); - i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); - i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); - i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); - i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); - i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); - i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); - i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); - i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); /* Flow control (LFC) stats */ - i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); - i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); - i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); - i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); /* Priority flow control stats */ #if 0 for (int i = 0; i < 8; i++) { - i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), + ixl_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_rx[i], &nsd->priority_xon_rx[i]); - i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), + ixl_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_tx[i], &nsd->priority_xon_tx[i]); - i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), + ixl_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xoff_tx[i], &nsd->priority_xoff_tx[i]); - i40e_stat_update32(hw, + ixl_stat_update32(hw, I40E_GLPRT_RXON2OFFCNT(hw->port, i), pf->stat_offsets_loaded, &osd->priority_xon_2_xoff[i], @@ -3792,82 +3845,82 @@ i40e_update_stats_counters(struct i40e_pf *pf) #endif /* Packet size stats rx */ - i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); - i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); - i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); - i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); - i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); - i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); - i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); /* Packet size stats tx */ - i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); - i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); - i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); - i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); - i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); - i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); - i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), + ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); - i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); - i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); - i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); - i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), + ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); pf->stat_offsets_loaded = true; /* End hw stats */ /* Update vsi stats */ - i40e_update_eth_stats(vsi); + ixl_update_eth_stats(vsi); /* OS statistics */ // ERJ - these are per-port, update all vsis? @@ -3879,18 +3932,18 @@ i40e_update_stats_counters(struct i40e_pf *pf) ** - do outside interrupt since it might sleep */ static void -i40e_do_adminq(void *context, int pending) +ixl_do_adminq(void *context, int pending) { - struct i40e_pf *pf = context; + struct ixl_pf *pf = context; struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; + struct ixl_vsi *vsi = &pf->vsi; struct i40e_arq_event_info event; i40e_status ret; u32 reg, loop = 0; u16 opcode, result; - event.msg_size = I40E_AQ_BUF_SZ; - event.msg_buf = malloc(event.msg_size, + event.msg_len = IXL_AQ_BUF_SZ; + event.msg_buf = malloc(event.msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!event.msg_buf) { printf("Unable to allocate adminq memory\n"); @@ -3905,8 +3958,8 @@ i40e_do_adminq(void *context, int pending) opcode = LE16_TO_CPU(event.desc.opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: - vsi->link_up = i40e_config_link(hw); - i40e_update_link_status(pf); + vsi->link_up = ixl_config_link(hw); + ixl_update_link_status(pf); break; case i40e_aqc_opc_send_msg_to_pf: /* process pf/vf communication here */ @@ -3914,13 +3967,13 @@ i40e_do_adminq(void *context, int pending) case i40e_aqc_opc_event_lan_overflow: break; default: -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG printf("AdminQ unknown event %x\n", opcode); #endif break; } - } while (result && (loop++ < I40E_ADM_LIMIT)); + } while (result && (loop++ < IXL_ADM_LIMIT)); reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; @@ -3928,15 +3981,15 @@ i40e_do_adminq(void *context, int pending) free(event.msg_buf, M_DEVBUF); if (pf->msix > 1) - i40e_enable_adminq(&pf->hw); + ixl_enable_adminq(&pf->hw); else - i40e_enable_intr(vsi); + ixl_enable_intr(vsi); } static int -i40e_debug_info(SYSCTL_HANDLER_ARGS) +ixl_debug_info(SYSCTL_HANDLER_ARGS) { - struct i40e_pf *pf; + struct ixl_pf *pf; int error, input = 0; error = sysctl_handle_int(oidp, &input, 0, req); @@ -3945,19 +3998,19 @@ i40e_debug_info(SYSCTL_HANDLER_ARGS) return (error); if (input == 1) { - pf = (struct i40e_pf *)arg1; - i40e_print_debug_info(pf); + pf = (struct ixl_pf *)arg1; + ixl_print_debug_info(pf); } return (error); } static void -i40e_print_debug_info(struct i40e_pf *pf) +ixl_print_debug_info(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_queue *que = vsi->queues; struct rx_ring *rxr = &que->rxr; struct tx_ring *txr = &que->txr; u32 reg; @@ -4001,9 +4054,9 @@ i40e_print_debug_info(struct i40e_pf *pf) /** * Update VSI-specific ethernet statistics counters. **/ -void i40e_update_eth_stats(struct i40e_vsi *vsi) +void ixl_update_eth_stats(struct ixl_vsi *vsi) { - struct i40e_pf *pf = (struct i40e_pf *)vsi->back; + struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = vsi->ifp; struct i40e_eth_stats *es; @@ -4014,43 +4067,43 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) oes = &vsi->eth_stats_offsets; /* Gather up the stats that the hw collects */ - i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx), + ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); - i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), + ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); - i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); - i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); - i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); - i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); - i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); - i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); - i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); - i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), + ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); @@ -4077,7 +4130,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi) /** * Reset all of the stats for the given pf **/ -void i40e_pf_reset_stats(struct i40e_pf *pf) +void ixl_pf_reset_stats(struct ixl_pf *pf) { bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); @@ -4087,7 +4140,7 @@ void i40e_pf_reset_stats(struct i40e_pf *pf) /** * Resets all stats of the given vsi **/ -void i40e_vsi_reset_stats(struct i40e_vsi *vsi) +void ixl_vsi_reset_stats(struct ixl_vsi *vsi) { bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); @@ -4103,7 +4156,7 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) * to report stats that count from zero. **/ static void -i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, +ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; @@ -4132,7 +4185,7 @@ i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, * Read and update a 32 bit stat from the hw **/ static void -i40e_stat_update32(struct i40e_hw *hw, u32 reg, +ixl_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; @@ -4154,7 +4207,7 @@ i40e_stat_update32(struct i40e_hw *hw, u32 reg, ** 3 - full */ static int -i40e_set_flowcntl(SYSCTL_HANDLER_ARGS) +ixl_set_flowcntl(SYSCTL_HANDLER_ARGS) { /* * TODO: ensure flow control is disabled if @@ -4163,7 +4216,7 @@ i40e_set_flowcntl(SYSCTL_HANDLER_ARGS) * TODO: ensure tx CRC by hardware should be enabled * if tx flow control is enabled. */ - struct i40e_pf *pf = (struct i40e_pf *)arg1; + struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_fc = 0, error = 0; @@ -4214,30 +4267,76 @@ i40e_set_flowcntl(SYSCTL_HANDLER_ARGS) device_printf(dev, "%s: FC set failure:\n", __func__); device_printf(dev, "%s: Current: %s / Requested: %s\n", __func__, - i40e_fc_string[hw->fc.current_mode], - i40e_fc_string[hw->fc.requested_mode]); + ixl_fc_string[hw->fc.current_mode], + ixl_fc_string[hw->fc.requested_mode]); } return (0); } +static int +ixl_current_speed(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + struct i40e_hw *hw = &pf->hw; + int error = 0, index = 0; + + char *speeds[] = { + "Unknown", + "100M", + "1G", + "10G", + "40G", + "20G" + }; + + ixl_update_link_status(pf); + + switch (hw->phy.link_info.link_speed) { + case I40E_LINK_SPEED_100MB: + index = 1; + break; + case I40E_LINK_SPEED_1GB: + index = 2; + break; + case I40E_LINK_SPEED_10GB: + index = 3; + break; + case I40E_LINK_SPEED_40GB: + index = 4; + break; + case I40E_LINK_SPEED_20GB: + index = 5; + break; + case I40E_LINK_SPEED_UNKNOWN: + default: + index = 0; + break; + } + + error = sysctl_handle_string(oidp, speeds[index], + strlen(speeds[index]), req); + return (error); +} + /* ** Control link advertise speed: -** 1 - advertise 1G only -** 2 - advertise 10G only -** 3 - advertise 1 and 10G +** Flags: +** 0x1 - advertise 100 Mb +** 0x2 - advertise 1G +** 0x4 - advertise 10G ** ** Does not work on 40G devices. */ static int -i40e_set_advertise(SYSCTL_HANDLER_ARGS) +ixl_set_advertise(SYSCTL_HANDLER_ARGS) { - struct i40e_pf *pf = (struct i40e_pf *)arg1; + struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; - int current_ls = 0, requested_ls = 0; + int requested_ls = 0; enum i40e_status_code aq_error = 0; int error = 0; @@ -4248,6 +4347,21 @@ i40e_set_advertise(SYSCTL_HANDLER_ARGS) if (i40e_is_40G_device(hw->device_id)) return (ENODEV); + /* Read in new mode */ + requested_ls = pf->advertised_speed; + error = sysctl_handle_int(oidp, &requested_ls, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + if (requested_ls < 1 || requested_ls > 7) { + device_printf(dev, + "Invalid advertised speed; valid modes are 0x1 through 0x7\n"); + return (EINVAL); + } + + /* Exit if no change */ + if (pf->advertised_speed == requested_ls) + return (0); + /* Get current capability information */ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (aq_error) { @@ -4257,32 +4371,6 @@ i40e_set_advertise(SYSCTL_HANDLER_ARGS) return (EAGAIN); } - /* Figure out current mode */ - else if (abilities.link_speed & I40E_LINK_SPEED_10GB - && abilities.link_speed & I40E_LINK_SPEED_1GB) - current_ls = 3; - else if (abilities.link_speed & I40E_LINK_SPEED_10GB) - current_ls = 2; - else if (abilities.link_speed & I40E_LINK_SPEED_1GB) - current_ls = 1; - else - current_ls = 0; - - /* Read in new mode */ - requested_ls = current_ls; - error = sysctl_handle_int(oidp, &requested_ls, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); - if (requested_ls < 1 || requested_ls > 3) { - device_printf(dev, - "Invalid advertised speed; valid modes are 1 through 3\n"); - return (EINVAL); - } - - /* Exit if no change */ - if (current_ls == requested_ls) - return (0); - /* Prepare new config */ bzero(&config, sizeof(config)); config.phy_type = abilities.phy_type; @@ -4292,18 +4380,12 @@ i40e_set_advertise(SYSCTL_HANDLER_ARGS) config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; /* Translate into aq cmd link_speed */ - switch (requested_ls) { - case 3: - config.link_speed = I40E_LINK_SPEED_10GB - | I40E_LINK_SPEED_1GB; - case 2: - config.link_speed = I40E_LINK_SPEED_10GB; - case 1: - config.link_speed = I40E_LINK_SPEED_1GB; - default: - // nothing should get here - break; - } + if (requested_ls & 0x4) + config.link_speed |= I40E_LINK_SPEED_10GB; + if (requested_ls & 0x2) + config.link_speed |= I40E_LINK_SPEED_1GB; + if (requested_ls & 0x1) + config.link_speed |= I40E_LINK_SPEED_100MB; /* Do aq command & restart link */ aq_error = i40e_aq_set_phy_config(hw, &config, NULL); @@ -4314,7 +4396,8 @@ i40e_set_advertise(SYSCTL_HANDLER_ARGS) return (EAGAIN); } - i40e_update_link_status(pf); + pf->advertised_speed = requested_ls; + ixl_update_link_status(pf); return (0); } @@ -4323,7 +4406,7 @@ i40e_set_advertise(SYSCTL_HANDLER_ARGS) ** the bus this adapter is plugged into. */ static u16 -i40e_get_bus_info(struct i40e_hw *hw, device_t dev) +ixl_get_bus_info(struct i40e_hw *hw, device_t dev) { u16 link; u32 offset; @@ -4390,11 +4473,11 @@ i40e_get_bus_info(struct i40e_hw *hw, device_t dev) return (link); } -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG static int -i40e_sysctl_link_status(SYSCTL_HANDLER_ARGS) +ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) { - struct i40e_pf *pf = (struct i40e_pf *)arg1; + struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; struct i40e_link_status link_status; char buf[512]; @@ -4421,9 +4504,9 @@ i40e_sysctl_link_status(SYSCTL_HANDLER_ARGS) } static int -i40e_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) +ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) { - struct i40e_pf *pf = (struct i40e_pf *)arg1; + struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; struct i40e_aq_get_phy_abilities_resp abilities_resp; char buf[512]; @@ -4452,11 +4535,11 @@ i40e_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) } static int -i40e_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) +ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) { - struct i40e_pf *pf = (struct i40e_pf *)arg1; - struct i40e_vsi *vsi = &pf->vsi; - struct i40e_mac_filter *f; + struct ixl_pf *pf = (struct ixl_pf *)arg1; + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_mac_filter *f; char *buf, *buf_i; int error = 0; @@ -4497,18 +4580,18 @@ i40e_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) return error; } -#define I40E_SW_RES_SIZE 0x14 +#define IXL_SW_RES_SIZE 0x14 static int -i40e_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS) +ixl_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS) { - struct i40e_pf *pf = (struct i40e_pf *)arg1; + struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; u8 num_entries; - struct i40e_aqc_switch_resource_alloc_element_resp resp[I40E_SW_RES_SIZE]; + struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; buf = sbuf_new_for_sysctl(NULL, NULL, 0, req); if (!buf) { @@ -4518,7 +4601,7 @@ i40e_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS) error = i40e_aq_get_switch_resource_alloc(hw, &num_entries, resp, - I40E_SW_RES_SIZE, + IXL_SW_RES_SIZE, NULL); if (error) { device_printf(dev, "%s: get_switch_resource_alloc() error %d, aq error %d\n", @@ -4565,9 +4648,9 @@ i40e_sysctl_hw_res_info(SYSCTL_HANDLER_ARGS) ** TODO: Also needs a queue index input! **/ static int -i40e_sysctl_dump_txd(SYSCTL_HANDLER_ARGS) +ixl_sysctl_dump_txd(SYSCTL_HANDLER_ARGS) { - struct i40e_pf *pf = (struct i40e_pf *)arg1; + struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; struct sbuf *buf; int error = 0; @@ -4599,7 +4682,7 @@ i40e_sysctl_dump_txd(SYSCTL_HANDLER_ARGS) sbuf_cat(buf, "\n"); // set to queue 1? - struct i40e_queue *que = pf->vsi.queues; + struct ixl_queue *que = pf->vsi.queues; struct tx_ring *txr = &(que[1].txr); struct i40e_tx_desc *txd = &txr->base[desc_idx]; diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c new file mode 100644 index 000000000000..0e6e572761ad --- /dev/null +++ b/sys/dev/ixl/if_ixlv.c @@ -0,0 +1,2742 @@ +/****************************************************************************** + + Copyright (c) 2013-2014, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "opt_inet.h" +#include "opt_inet6.h" +#include "ixl.h" +#include "ixlv.h" + +/********************************************************************* + * Driver version + *********************************************************************/ +char ixlv_driver_version[] = "1.1.4"; + +/********************************************************************* + * PCI Device ID Table + * + * Used by probe to select devices to load on + * Last field stores an index into ixlv_strings + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } + *********************************************************************/ + +static ixl_vendor_info_t ixlv_vendor_info_array[] = +{ + {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0}, + {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0}, + /* required last entry */ + {0, 0, 0, 0, 0} +}; + +/********************************************************************* + * Table of branding strings + *********************************************************************/ + +static char *ixlv_strings[] = { + "Intel(R) Ethernet Connection XL710 VF Driver" +}; + + +/********************************************************************* + * Function prototypes + *********************************************************************/ +static int ixlv_probe(device_t); +static int ixlv_attach(device_t); +static int ixlv_detach(device_t); +static int ixlv_shutdown(device_t); +static void ixlv_init_locked(struct ixlv_sc *); +static int ixlv_allocate_pci_resources(struct ixlv_sc *); +static void ixlv_free_pci_resources(struct ixlv_sc *); +static int ixlv_assign_msix(struct ixlv_sc *); +static int ixlv_init_msix(struct ixlv_sc *); +static int ixlv_init_taskqueue(struct ixlv_sc *); +static int ixlv_setup_queues(struct ixlv_sc *); +static void ixlv_config_rss(struct ixlv_sc *); +static void ixlv_stop(struct ixlv_sc *); +static void ixlv_add_multi(struct ixl_vsi *); +static void ixlv_del_multi(struct ixl_vsi *); +static void ixlv_update_link_status(struct ixlv_sc *); +static void ixlv_free_queues(struct ixl_vsi *); +static int ixlv_setup_interface(device_t, struct ixlv_sc *); + +static int ixlv_media_change(struct ifnet *); +static void ixlv_media_status(struct ifnet *, struct ifmediareq *); + +static void ixlv_local_timer(void *); + +static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16); +static void ixlv_init_filters(struct ixlv_sc *); +static void ixlv_free_filters(struct ixlv_sc *); + +static void ixlv_msix_que(void *); +static void ixlv_msix_adminq(void *); +static void ixlv_do_adminq(void *, int); +static void ixlv_sched_aq(void *); +static void ixlv_handle_que(void *, int); +static int ixlv_reset(struct ixlv_sc *); +static int ixlv_reset_complete(struct i40e_hw *); +static void ixlv_set_queue_rx_itr(struct ixl_queue *); +static void ixlv_set_queue_tx_itr(struct ixl_queue *); + +static void ixlv_enable_adminq_irq(struct i40e_hw *); +static void ixlv_disable_adminq_irq(struct i40e_hw *); +static void ixlv_enable_queue_irq(struct i40e_hw *, int); +static void ixlv_disable_queue_irq(struct i40e_hw *, int); + +static void ixlv_setup_vlan_filters(struct ixlv_sc *); +static void ixlv_register_vlan(void *, struct ifnet *, u16); +static void ixlv_unregister_vlan(void *, struct ifnet *, u16); + +static void ixlv_cap_txcsum_tso(struct ixl_vsi *, + struct ifnet *, int); + +static void ixlv_add_stats_sysctls(struct ixlv_sc *); + +/********************************************************************* + * FreeBSD Device Interface Entry Points + *********************************************************************/ + +static device_method_t ixlv_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ixlv_probe), + DEVMETHOD(device_attach, ixlv_attach), + DEVMETHOD(device_detach, ixlv_detach), + DEVMETHOD(device_shutdown, ixlv_shutdown), + {0, 0} +}; + +static driver_t ixlv_driver = { + "ixlv", ixlv_methods, sizeof(struct ixlv_sc), +}; + +devclass_t ixlv_devclass; +DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0); + +MODULE_DEPEND(ixlv, pci, 1, 1, 1); +MODULE_DEPEND(ixlv, ether, 1, 1, 1); + +/* +** TUNEABLE PARAMETERS: +*/ + +static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0, + "IXLV driver parameters"); + +/* +** Number of descriptors per ring: +** - TX and RX are the same size +*/ +static int ixlv_ringsz = DEFAULT_RING; +TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz); +SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN, + &ixlv_ringsz, 0, "Descriptor Ring Size"); + +/* Set to zero to auto calculate */ +int ixlv_max_queues = 0; +TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues); +SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN, + &ixlv_max_queues, 0, "Number of Queues"); + +/* +** Number of entries in Tx queue buf_ring. +** Increasing this will reduce the number of +** errors when transmitting fragmented UDP +** packets. +*/ +static int ixlv_txbrsz = DEFAULT_TXBRSZ; +TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz); +SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN, + &ixlv_txbrsz, 0, "TX Buf Ring Size"); + +/* +** Controls for Interrupt Throttling +** - true/false for dynamic adjustment +** - default values for static ITR +*/ +int ixlv_dynamic_rx_itr = 0; +TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr); +SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, + &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); + +int ixlv_dynamic_tx_itr = 0; +TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr); +SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, + &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); + +int ixlv_rx_itr = IXL_ITR_8K; +TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr); +SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN, + &ixlv_rx_itr, 0, "RX Interrupt Rate"); + +int ixlv_tx_itr = IXL_ITR_4K; +TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr); +SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN, + &ixlv_tx_itr, 0, "TX Interrupt Rate"); + + +/********************************************************************* + * Device identification routine + * + * ixlv_probe determines if the driver should be loaded on + * the hardware based on PCI vendor/device id of the device. + * + * return BUS_PROBE_DEFAULT on success, positive on failure + *********************************************************************/ + +static int +ixlv_probe(device_t dev) +{ + ixl_vendor_info_t *ent; + + u16 pci_vendor_id, pci_device_id; + u16 pci_subvendor_id, pci_subdevice_id; + char device_name[256]; + + INIT_DEBUGOUT("ixlv_probe: begin"); + + pci_vendor_id = pci_get_vendor(dev); + if (pci_vendor_id != I40E_INTEL_VENDOR_ID) + return (ENXIO); + + pci_device_id = pci_get_device(dev); + pci_subvendor_id = pci_get_subvendor(dev); + pci_subdevice_id = pci_get_subdevice(dev); + + ent = ixlv_vendor_info_array; + while (ent->vendor_id != 0) { + if ((pci_vendor_id == ent->vendor_id) && + (pci_device_id == ent->device_id) && + + ((pci_subvendor_id == ent->subvendor_id) || + (ent->subvendor_id == 0)) && + + ((pci_subdevice_id == ent->subdevice_id) || + (ent->subdevice_id == 0))) { + sprintf(device_name, "%s, Version - %s", + ixlv_strings[ent->index], + ixlv_driver_version); + device_set_desc_copy(dev, device_name); + return (BUS_PROBE_DEFAULT); + } + ent++; + } + return (ENXIO); +} + +/********************************************************************* + * Device initialization routine + * + * The attach entry point is called when the driver is being loaded. + * This routine identifies the type of hardware, allocates all resources + * and initializes the hardware. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixlv_attach(device_t dev) +{ + struct ixlv_sc *sc; + struct i40e_hw *hw; + struct ixl_vsi *vsi; + int bufsz, error = 0, retries = 0; + + INIT_DBG_DEV(dev, "begin"); + + /* Allocate, clear, and link in our primary soft structure */ + sc = device_get_softc(dev); + sc->dev = sc->osdep.dev = dev; + hw = &sc->hw; + vsi = &sc->vsi; + vsi->dev = dev; + + /* Allocate filter lists */ + ixlv_init_filters(sc); + + /* Core Lock Init*/ + mtx_init(&sc->mtx, device_get_nameunit(dev), + "IXL SC Lock", MTX_DEF); + mtx_init(&sc->aq_task_mtx, device_get_nameunit(dev), + "IXL AQ Task Lock", MTX_DEF); + + /* Set up the timer & aq watchdog callouts */ + callout_init_mtx(&sc->timer, &sc->mtx, 0); + callout_init_mtx(&sc->aq_task, &sc->aq_task_mtx, 0); + + /* Save off the information about this board */ + hw->vendor_id = pci_get_vendor(dev); + hw->device_id = pci_get_device(dev); + hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); + hw->subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + hw->subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); + + hw->bus.device = pci_get_slot(dev); + hw->bus.func = pci_get_function(dev); + + /* Do PCI setup - map BAR0, etc */ + if (ixlv_allocate_pci_resources(sc)) { + device_printf(dev, "%s: Allocation of PCI resources failed\n", + __func__); + error = ENXIO; + goto err_early; + } + + INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors"); + + error = i40e_set_mac_type(hw); + if (error) { + device_printf(dev, "%s: set_mac_type failed: %d\n", + __func__, error); + goto err_pci_res; + } + + error = ixlv_reset_complete(hw); + if (error) { + device_printf(dev, "%s: Device is still being reset\n", + __func__); + goto err_pci_res; + } + + INIT_DBG_DEV(dev, "VF Device is ready for configuration"); + + hw->aq.num_arq_entries = IXL_AQ_LEN; + hw->aq.num_asq_entries = IXL_AQ_LEN; + hw->aq.arq_buf_size = IXL_AQ_BUFSZ; + hw->aq.asq_buf_size = IXL_AQ_BUFSZ; + + error = i40e_init_adminq(hw); + if (error) { + device_printf(dev, "%s: init_adminq failed: %d\n", + __func__, error); + goto err_pci_res; + } + + INIT_DBG_DEV(dev, "Initialized Admin Queue"); + + error = ixlv_send_api_ver(sc); + if (error) { + device_printf(dev, "%s: unable to send to PF (%d)\n", + __func__, error); + goto err_aq; + } + + while (!i40e_asq_done(hw)) { + if (++retries > IXLV_AQ_MAX_ERR) { + device_printf(dev, "%s: Admin Queue timeout " + "(waiting for send_api_ver)\n", __func__); + error = ENXIO; + goto err_aq; + } + i40e_msec_delay(10); + } + + INIT_DBG_DEV(dev, "Sent API version message to PF"); + + /* Wait for API version msg to arrive */ + error = ixlv_verify_api_ver(sc); + if (error) { + device_printf(dev, + "%s: Unable to verify API version, error %d\n", + __func__, error); + goto err_aq; + } + + INIT_DBG_DEV(dev, "PF API version verified"); + + /* Need API version before sending reset message */ + error = ixlv_reset(sc); + if (error) { + device_printf(dev, "VF reset failed; reload the driver\n"); + goto err_aq; + } + + INIT_DBG_DEV(dev, "VF reset complete"); + + /* Ask for VF config from PF */ + error = ixlv_send_vf_config_msg(sc); + if (error) { + device_printf(dev, + "%s: Unable to send VF config request, error %d\n", + __func__, error); + goto err_aq; + } + + retries = 0; + while (!i40e_asq_done(hw)) { + if (++retries > IXLV_AQ_MAX_ERR) { + device_printf(dev, "%s: Admin Queue timeout " + "(waiting for send_vf_config_msg)\n", __func__); + error = ENXIO; + goto err_aq; + } + i40e_msec_delay(10); + } + + INIT_DBG_DEV(dev, "Sent VF config message to PF"); + + bufsz = sizeof(struct i40e_virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource)); + sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT); + if (!sc->vf_res) { + device_printf(dev, + "%s: Unable to allocate memory for VF configuration" + " message from PF\n", __func__); + error = ENOMEM; + goto err_aq; + } + + /* Check for VF config response */ + error = ixlv_get_vf_config(sc); + if (error) { + device_printf(dev, + "%s: Unable to get VF configuration from PF\n", + __func__); + error = EBUSY; + goto err_res_buf; + } + + INIT_DBG_DEV(dev, "Received valid VF config from PF"); + INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d", + sc->vf_res->num_vsis, + sc->vf_res->num_queue_pairs, + sc->vf_res->max_vectors, + sc->vf_res->max_mtu); + INIT_DBG_DEV(dev, "Offload flags: %#010x", + sc->vf_res->vf_offload_flags); + + /* got VF config message back from PF, now we can parse it */ + for (int i = 0; i < sc->vf_res->num_vsis; i++) { + if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + sc->vsi_res = &sc->vf_res->vsi_res[i]; + } + if (!sc->vsi_res) { + device_printf(dev, "%s: no LAN VSI found\n", __func__); + goto err_res_buf; + } + + INIT_DBG_DEV(dev, "Resource Acquisition complete"); + + /* If no mac address was assigned just make a random one */ + if (!ixlv_check_ether_addr(hw->mac.addr)) { + u8 addr[ETHER_ADDR_LEN]; + arc4rand(&addr, sizeof(addr), 0); + addr[0] &= 0xFE; + addr[0] |= 0x02; + bcopy(addr, hw->mac.addr, sizeof(addr)); + } + + vsi->id = sc->vsi_res->vsi_id; + vsi->back = (void *)sc; + + /* Link in this virtual environment is always 'up' */ + vsi->link_up = TRUE; + + /* This allocates the memory and early settings */ + if (ixlv_setup_queues(sc) != 0) { + device_printf(dev, "%s: setup queues failed!\n", + __func__); + goto out; + } + + /* Setup the stack interface */ + if (ixlv_setup_interface(dev, sc) != 0) { + device_printf(dev, "%s: setup interface failed!\n", + __func__); + goto out; + } + + INIT_DBG_DEV(dev, "Queue memory and interface setup"); + + /* Do queue interrupt setup */ + ixlv_assign_msix(sc); + + /* Start AdminQ taskqueue */ + ixlv_init_taskqueue(sc); + + /* Start the admin queue scheduler timer */ + callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc); + + /* Initialize stats */ + bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); + ixlv_add_stats_sysctls(sc); + + /* Register for VLAN events */ + vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, + ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); + vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, + ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); + + /* We want AQ enabled early */ + ixlv_enable_adminq_irq(hw); + + /* Set things up to run init */ + sc->aq_pending = 0; + sc->aq_required = 0; + sc->init_state = IXLV_INIT_READY; + + INIT_DBG_DEV(dev, "end"); + return (error); + +out: + ixlv_free_queues(vsi); +err_res_buf: + free(sc->vf_res, M_DEVBUF); +err_aq: + i40e_shutdown_adminq(hw); +err_pci_res: + ixlv_free_pci_resources(sc); +err_early: + mtx_destroy(&sc->mtx); + mtx_destroy(&sc->aq_task_mtx); + ixlv_free_filters(sc); + INIT_DBG_DEV(dev, "end: error %d", error); + return (error); +} + +/********************************************************************* + * Device removal routine + * + * The detach entry point is called when the driver is being removed. + * This routine stops the adapter and deallocates all the resources + * that were allocated for driver operation. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixlv_detach(device_t dev) +{ + struct ixlv_sc *sc = device_get_softc(dev); + struct ixl_vsi *vsi = &sc->vsi; + int retries = 0; + + INIT_DBG_DEV(dev, "begin"); + + /* Make sure VLANS are not using driver */ + if (vsi->ifp->if_vlantrunk != NULL) { + device_printf(dev, "Vlan in use, detach first\n"); + INIT_DBG_DEV(dev, "end"); + return (EBUSY); + } + + /* Stop driver */ + if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { + mtx_lock(&sc->mtx); + ixlv_stop(sc); + mtx_unlock(&sc->mtx); + + /* + ** Ensure queues are disabled before examining + ** admin queue state later in detach. + */ + while (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING + && ++retries < IXLV_AQ_MAX_ERR) { + i40e_msec_delay(10); + } +#ifdef IXL_DEBUG + if (retries >= IXLV_AQ_MAX_ERR) + device_printf(dev, "Issue disabling queues for detach\n"); +#endif + } + + /* Unregister VLAN events */ + if (vsi->vlan_attach != NULL) + EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); + if (vsi->vlan_detach != NULL) + EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); + + /* Stop AQ callout */ + callout_drain(&sc->aq_task); + callout_stop(&sc->aq_task); + +#ifdef IXL_DEBUG + /* Report on possible AQ failures */ + if (sc->aq_required || sc->aq_pending) { + device_printf(dev, "AQ status on detach:\n"); + device_printf(dev, "required : 0x%4b\n", sc->aq_required, + IXLV_FLAGS); + device_printf(dev, "pending : 0x%4b\n", sc->aq_pending, + IXLV_FLAGS); + device_printf(dev, "current_op: %d\n", sc->current_op); + } +#endif + + i40e_shutdown_adminq(&sc->hw); + while (taskqueue_cancel(sc->tq, &sc->aq_irq, NULL) != 0) + taskqueue_drain(sc->tq, &sc->aq_irq); + taskqueue_free(sc->tq); + + /* force the state down */ + vsi->ifp->if_flags &= ~IFF_UP; + ether_ifdetach(vsi->ifp); + if_free(vsi->ifp); + + free(sc->vf_res, M_DEVBUF); + ixlv_free_pci_resources(sc); + ixlv_free_queues(vsi); + mtx_destroy(&sc->mtx); + mtx_destroy(&sc->aq_task_mtx); + ixlv_free_filters(sc); + + bus_generic_detach(dev); + INIT_DBG_DEV(dev, "end"); + return (0); +} + +/********************************************************************* + * + * Shutdown entry point + * + **********************************************************************/ + +static int +ixlv_shutdown(device_t dev) +{ + struct ixlv_sc *sc = device_get_softc(dev); + + INIT_DBG_DEV(dev, "begin"); + + mtx_lock(&sc->mtx); + ixlv_stop(sc); + mtx_unlock(&sc->mtx); + + INIT_DBG_DEV(dev, "end"); + return (0); +} + +/* + * Configure TXCSUM(IPV6) and TSO(4/6) + * - the hardware handles these together so we + * need to tweak them + */ +static void +ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) +{ + /* Enable/disable TXCSUM/TSO4 */ + if (!(ifp->if_capenable & IFCAP_TXCSUM) + && !(ifp->if_capenable & IFCAP_TSO4)) { + if (mask & IFCAP_TXCSUM) { + ifp->if_capenable |= IFCAP_TXCSUM; + /* enable TXCSUM, restore TSO if previously enabled */ + if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { + vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; + ifp->if_capenable |= IFCAP_TSO4; + } + } + else if (mask & IFCAP_TSO4) { + ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); + vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; + if_printf(ifp, + "TSO4 requires txcsum, enabling both...\n"); + } + } else if((ifp->if_capenable & IFCAP_TXCSUM) + && !(ifp->if_capenable & IFCAP_TSO4)) { + if (mask & IFCAP_TXCSUM) + ifp->if_capenable &= ~IFCAP_TXCSUM; + else if (mask & IFCAP_TSO4) + ifp->if_capenable |= IFCAP_TSO4; + } else if((ifp->if_capenable & IFCAP_TXCSUM) + && (ifp->if_capenable & IFCAP_TSO4)) { + if (mask & IFCAP_TXCSUM) { + vsi->flags |= IXL_FLAGS_KEEP_TSO4; + ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); + if_printf(ifp, + "TSO4 requires txcsum, disabling both...\n"); + } else if (mask & IFCAP_TSO4) + ifp->if_capenable &= ~IFCAP_TSO4; + } + + /* Enable/disable TXCSUM_IPV6/TSO6 */ + if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) + && !(ifp->if_capenable & IFCAP_TSO6)) { + if (mask & IFCAP_TXCSUM_IPV6) { + ifp->if_capenable |= IFCAP_TXCSUM_IPV6; + if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { + vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; + ifp->if_capenable |= IFCAP_TSO6; + } + } else if (mask & IFCAP_TSO6) { + ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); + vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; + if_printf(ifp, + "TSO6 requires txcsum6, enabling both...\n"); + } + } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) + && !(ifp->if_capenable & IFCAP_TSO6)) { + if (mask & IFCAP_TXCSUM_IPV6) + ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; + else if (mask & IFCAP_TSO6) + ifp->if_capenable |= IFCAP_TSO6; + } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) + && (ifp->if_capenable & IFCAP_TSO6)) { + if (mask & IFCAP_TXCSUM_IPV6) { + vsi->flags |= IXL_FLAGS_KEEP_TSO6; + ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); + if_printf(ifp, + "TSO6 requires txcsum6, disabling both...\n"); + } else if (mask & IFCAP_TSO6) + ifp->if_capenable &= ~IFCAP_TSO6; + } +} + +/********************************************************************* + * Ioctl entry point + * + * ixlv_ioctl is called when the user wants to configure the + * interface. + * + * return 0 on success, positive on failure + **********************************************************************/ + +static int +ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data) +{ + struct ixl_vsi *vsi = ifp->if_softc; + struct ixlv_sc *sc = vsi->back; + struct ifreq *ifr = (struct ifreq *)data; +#if defined(INET) || defined(INET6) + struct ifaddr *ifa = (struct ifaddr *)data; + bool avoid_reset = FALSE; +#endif + int error = 0; + + + switch (command) { + + case SIOCSIFADDR: +#ifdef INET + if (ifa->ifa_addr->sa_family == AF_INET) + avoid_reset = TRUE; +#endif +#ifdef INET6 + if (ifa->ifa_addr->sa_family == AF_INET6) + avoid_reset = TRUE; +#endif +#if defined(INET) || defined(INET6) + /* + ** Calling init results in link renegotiation, + ** so we avoid doing it when possible. + */ + if (avoid_reset) { + ifp->if_flags |= IFF_UP; + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + ixlv_init(sc); + if (!(ifp->if_flags & IFF_NOARP)) + arp_ifinit(ifp, ifa); + } else + error = ether_ioctl(ifp, command, data); + break; +#endif + case SIOCSIFMTU: + IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)"); + mtx_lock(&sc->mtx); + if (ifr->ifr_mtu > IXL_MAX_FRAME - + ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { + error = EINVAL; + IOCTL_DBG_IF(ifp, "mtu too large"); + } else { + IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu); + // ERJ: Interestingly enough, these types don't match + ifp->if_mtu = ifr->ifr_mtu; + vsi->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + ETHER_VLAN_ENCAP_LEN; + + ixlv_init_locked(sc); + } + mtx_unlock(&sc->mtx); + break; + case SIOCSIFFLAGS: + IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)"); + mtx_lock(&sc->mtx); + if (ifp->if_flags & IFF_UP) { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + ixlv_init_locked(sc); + } else + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixlv_stop(sc); + sc->if_flags = ifp->if_flags; + mtx_unlock(&sc->mtx); + break; + case SIOCADDMULTI: + IOCTL_DBG_IF2(ifp, "SIOCADDMULTI"); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + mtx_lock(&sc->mtx); + ixlv_disable_intr(vsi); + ixlv_add_multi(vsi); + ixlv_enable_intr(vsi); + mtx_unlock(&sc->mtx); + } + break; + case SIOCDELMULTI: + IOCTL_DBG_IF2(ifp, "SIOCDELMULTI"); + if (sc->init_state == IXLV_RUNNING) { + mtx_lock(&sc->mtx); + ixlv_disable_intr(vsi); + ixlv_del_multi(vsi); + ixlv_enable_intr(vsi); + mtx_unlock(&sc->mtx); + } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)"); + error = ifmedia_ioctl(ifp, ifr, &sc->media, command); + break; + case SIOCSIFCAP: + { + int mask = ifr->ifr_reqcap ^ ifp->if_capenable; + IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)"); + + ixlv_cap_txcsum_tso(vsi, ifp, mask); + + if (mask & IFCAP_RXCSUM) + ifp->if_capenable ^= IFCAP_RXCSUM; + if (mask & IFCAP_RXCSUM_IPV6) + ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; + if (mask & IFCAP_LRO) + ifp->if_capenable ^= IFCAP_LRO; + if (mask & IFCAP_VLAN_HWTAGGING) + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + if (mask & IFCAP_VLAN_HWFILTER) + ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; + if (mask & IFCAP_VLAN_HWTSO) + ifp->if_capenable ^= IFCAP_VLAN_HWTSO; + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + ixlv_init(sc); + } + VLAN_CAPABILITIES(ifp); + + break; + } + + default: + IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command); + error = ether_ioctl(ifp, command, data); + break; + } + + return (error); +} + +/* +** To do a reinit on the VF is unfortunately more complicated +** than a physical device, we must have the PF more or less +** completely recreate our memory, so many things that were +** done only once at attach in traditional drivers now must be +** redone at each reinitialization. This function does that +** 'prelude' so we can then call the normal locked init code. +*/ +int +ixlv_reinit_locked(struct ixlv_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + struct ixl_vsi *vsi = &sc->vsi; + struct ifnet *ifp = vsi->ifp; + struct ixlv_vlan_filter *vf; + int error = 0; + + INIT_DBG_IF(ifp, "begin"); + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixlv_stop(sc); + + if ((sc->init_state == IXLV_RESET_REQUIRED) || + (sc->init_state == IXLV_RESET_PENDING)) + error = ixlv_reset(sc); + + /* set the state in case we went thru RESET */ + sc->init_state = IXLV_RUNNING; + + if (vsi->num_vlans != 0) + SLIST_FOREACH(vf, sc->vlan_filters, next) + vf->flags = IXL_FILTER_ADD; + else { /* clean any stale filters */ + while (!SLIST_EMPTY(sc->vlan_filters)) { + vf = SLIST_FIRST(sc->vlan_filters); + SLIST_REMOVE_HEAD(sc->vlan_filters, next); + free(vf, M_DEVBUF); + } + } + + ixlv_enable_adminq_irq(hw); + sc->aq_pending = 0; + sc->aq_required = 0; + + INIT_DBG_IF(ifp, "end"); + return (error); +} + + +static void +ixlv_init_locked(struct ixlv_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_queue *que = vsi->queues; + struct ifnet *ifp = vsi->ifp; + int error = 0; + + INIT_DBG_IF(ifp, "begin"); + + /* Verify we have the core lock */ + if (!mtx_owned(&sc->mtx)) { + if_printf(ifp, "%s: sc mutex not owned; acquire" + "before calling this function!\n", __func__); + goto init_done; + } + + /* Do a reinit first if an init has already been done */ + if ((sc->init_state == IXLV_RUNNING) || + (sc->init_state == IXLV_RESET_REQUIRED) || + (sc->init_state == IXLV_RESET_PENDING)) + error = ixlv_reinit_locked(sc); + /* Don't bother with init if we failed reinit */ + if (error) + goto init_done; + + /* Check for an LAA mac address... */ + bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN); + + ifp->if_hwassist = 0; + if (ifp->if_capenable & IFCAP_TSO) + ifp->if_hwassist |= CSUM_TSO; + if (ifp->if_capenable & IFCAP_TXCSUM) + ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP); + if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) + ifp->if_hwassist |= CSUM_OFFLOAD_IPV6; + + /* Add mac filter for this VF to PF */ + error = ixlv_add_mac_filter(sc, hw->mac.addr, 0); + + // send message, then enqueue another task + if (!error || error == EEXIST) { + sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER; + callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, + ixlv_sched_aq, sc); + } + + /* Setup vlan's if needed */ + ixlv_setup_vlan_filters(sc); + + /* + ** Prepare the queues for operation + */ + for (int i = 0; i < vsi->num_queues; i++, que++) { + struct rx_ring *rxr = &que->rxr; + + ixl_init_tx_ring(que); + + /* Need to set mbuf size now */ + if (vsi->max_frame_size <= 2048) + rxr->mbuf_sz = MCLBYTES; + else + rxr->mbuf_sz = MJUMPAGESIZE; + ixl_init_rx_ring(que); + } + + /* Configure queues */ + sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_QUEUES; + callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, + ixlv_sched_aq, sc); + + /* Set up RSS */ + ixlv_config_rss(sc); + + /* Map vectors */ + sc->aq_required |= IXLV_FLAG_AQ_MAP_VECTORS; + callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, + ixlv_sched_aq, sc); + + /* Enable queues */ + sc->aq_required |= IXLV_FLAG_AQ_ENABLE_QUEUES; + callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, + ixlv_sched_aq, sc); + + /* Start the local timer */ + callout_reset(&sc->timer, hz, ixlv_local_timer, sc); + + sc->init_state = IXLV_RUNNING; + +init_done: + INIT_DBG_IF(ifp, "end"); + return; +} + +/* +** Init entry point for the stack +*/ +void +ixlv_init(void *arg) +{ + struct ixlv_sc *sc = arg; + + mtx_lock(&sc->mtx); + ixlv_init_locked(sc); + mtx_unlock(&sc->mtx); + return; +} + +/* + * Allocate MSI/X vectors, setup the AQ vector early + */ +static int +ixlv_init_msix(struct ixlv_sc *sc) +{ + device_t dev = sc->dev; + int rid, want, vectors, queues, available; + + rid = PCIR_BAR(IXL_BAR); + sc->msix_mem = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (!sc->msix_mem) { + /* May not be enabled */ + device_printf(sc->dev, + "Unable to map MSIX table \n"); + goto fail; + } + + available = pci_msix_count(dev); + if (available == 0) { /* system has msix disabled */ + bus_release_resource(dev, SYS_RES_MEMORY, + rid, sc->msix_mem); + sc->msix_mem = NULL; + goto fail; + } + + /* Figure out a reasonable auto config value */ + queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus; + + /* Override with hardcoded value if sane */ + if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) + queues = ixlv_max_queues; + + /* Enforce the VF max value */ + if (queues > IXLV_MAX_QUEUES) + queues = IXLV_MAX_QUEUES; + + /* + ** Want one vector (RX/TX pair) per queue + ** plus an additional for the admin queue. + */ + want = queues + 1; + if (want <= available) /* Have enough */ + vectors = want; + else { + device_printf(sc->dev, + "MSIX Configuration Problem, " + "%d vectors available but %d wanted!\n", + available, want); + goto fail; + } + + if (pci_alloc_msix(dev, &vectors) == 0) { + device_printf(sc->dev, + "Using MSIX interrupts with %d vectors\n", vectors); + sc->msix = vectors; + sc->vsi.num_queues = queues; + } + + /* + ** Explicitly set the guest PCI BUSMASTER capability + ** and we must rewrite the ENABLE in the MSIX control + ** register again at this point to cause the host to + ** successfully initialize us. + */ + { + u16 pci_cmd_word; + int msix_ctrl; + pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); + pci_cmd_word |= PCIM_CMD_BUSMASTEREN; + pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); + pci_find_cap(dev, PCIY_MSIX, &rid); + rid += PCIR_MSIX_CTRL; + msix_ctrl = pci_read_config(dev, rid, 2); + msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; + pci_write_config(dev, rid, msix_ctrl, 2); + } + + /* Next we need to setup the vector for the Admin Queue */ + rid = 1; // zero vector + 1 + sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, + &rid, RF_SHAREABLE | RF_ACTIVE); + if (sc->res == NULL) { + device_printf(dev,"Unable to allocate" + " bus resource: AQ interrupt \n"); + goto fail; + } + if (bus_setup_intr(dev, sc->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixlv_msix_adminq, sc, &sc->tag)) { + sc->res = NULL; + device_printf(dev, "Failed to register AQ handler"); + goto fail; + } + bus_describe_intr(dev, sc->res, sc->tag, "adminq"); + + return (vectors); + +fail: + /* The VF driver MUST use MSIX */ + return (0); +} + +static int +ixlv_allocate_pci_resources(struct ixlv_sc *sc) +{ + int rid; + device_t dev = sc->dev; + + rid = PCIR_BAR(0); + sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + + if (!(sc->pci_mem)) { + device_printf(dev,"Unable to allocate bus resource: memory\n"); + return (ENXIO); + } + + sc->osdep.mem_bus_space_tag = + rman_get_bustag(sc->pci_mem); + sc->osdep.mem_bus_space_handle = + rman_get_bushandle(sc->pci_mem); + sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); + sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; + + sc->hw.back = &sc->osdep; + + /* May need to pre-emptively disable adminq interrupts */ + ixlv_disable_adminq_irq(&sc->hw); + + /* + ** Now setup MSI/X, it will return + ** us the number of supported vectors + */ + sc->msix = ixlv_init_msix(sc); + + /* We fail without MSIX support */ + if (sc->msix == 0) + return (ENXIO); + + return (0); +} + +static void +ixlv_free_pci_resources(struct ixlv_sc *sc) +{ + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_queue *que = vsi->queues; + device_t dev = sc->dev; + + /* We may get here before stations are setup */ + if (que == NULL) + goto early; + + /* + ** Release all msix queue resources: + */ + for (int i = 0; i < vsi->num_queues; i++, que++) { + int rid = que->msix + 1; + if (que->tag != NULL) { + bus_teardown_intr(dev, que->res, que->tag); + que->tag = NULL; + } + if (que->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); + } + +early: + /* Clean the AdminQ interrupt */ + if (sc->tag != NULL) { + bus_teardown_intr(dev, sc->res, sc->tag); + sc->tag = NULL; + } + if (sc->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res); + + pci_release_msi(dev); + + if (sc->msix_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(IXL_BAR), sc->msix_mem); + + if (sc->pci_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(0), sc->pci_mem); + + return; +} + +static int +ixlv_init_taskqueue(struct ixlv_sc *sc) +{ + int error = 0; + + /* Tasklet for AQ Interrupts */ + TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc); + + sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, + taskqueue_thread_enqueue, &sc->tq); + taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq", + device_get_nameunit(sc->dev)); + + return (error); +} + +/********************************************************************* + * + * Setup MSIX Interrupt resources and handlers for the VSI queues + * + **********************************************************************/ +static int +ixlv_assign_msix(struct ixlv_sc *sc) +{ + device_t dev = sc->dev; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_queue *que = vsi->queues; + struct tx_ring *txr; + int error, rid, vector = 1; + + for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { + rid = vector + 1; + txr = &que->txr; + que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); + if (que->res == NULL) { + device_printf(dev,"Unable to allocate" + " bus resource: que interrupt [%d]\n", vector); + return (ENXIO); + } + /* Set the handler function */ + error = bus_setup_intr(dev, que->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixlv_msix_que, que, &que->tag); + if (error) { + que->res = NULL; + device_printf(dev, "Failed to register que handler"); + return (error); + } + bus_describe_intr(dev, que->res, que->tag, "que %d", i); + /* Bind the vector to a CPU */ + bus_bind_intr(dev, que->res, i); + que->msix = vector; + vsi->que_mask |= (u64)(1 << que->msix); + TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); + TASK_INIT(&que->task, 0, ixlv_handle_que, que); + que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", + device_get_nameunit(sc->dev)); + } + + return (0); +} + +/* +** XXX: Assumes the vf's admin queue has been initialized. +*/ +static int +ixlv_reset(struct ixlv_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + int error = 0; + + /* Ask the PF to reset us if we are initiating */ + if (sc->init_state != IXLV_RESET_PENDING) + ixlv_request_reset(sc); + + i40e_msec_delay(100); + error = ixlv_reset_complete(hw); + if (error) { + device_printf(dev, "%s: VF reset failed\n", + __func__); + return (error); + } + + error = i40e_shutdown_adminq(hw); + if (error) { + device_printf(dev, "%s: shutdown_adminq failed: %d\n", + __func__, error); + return (error); + } + + error = i40e_init_adminq(hw); + if (error) { + device_printf(dev, "%s: init_adminq failed: %d\n", + __func__, error); + return(error); + } + + return (0); +} + +static int +ixlv_reset_complete(struct i40e_hw *hw) +{ + u32 reg; + + for (int i = 0; i < 100; i++) { + reg = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + + if ((reg == I40E_VFR_VFACTIVE) || + (reg == I40E_VFR_COMPLETED)) + return (0); + i40e_usec_delay(20); + } + + return (EBUSY); +} + + +/********************************************************************* + * + * Setup networking device structure and register an interface. + * + **********************************************************************/ +static int +ixlv_setup_interface(device_t dev, struct ixlv_sc *sc) +{ + struct ifnet *ifp; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_queue *que = vsi->queues; + + INIT_DBG_DEV(dev, "begin"); + + ifp = vsi->ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(dev, "can not allocate ifnet structure\n"); + return (-1); + } + + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + + ifp->if_mtu = ETHERMTU; + ifp->if_baudrate = 4000000000; // ?? + ifp->if_init = ixlv_init; + ifp->if_softc = vsi; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = ixlv_ioctl; + + ifp->if_transmit = ixl_mq_start; + + ifp->if_qflush = ixl_qflush; + ifp->if_snd.ifq_maxlen = que->num_desc - 2; + + ether_ifattach(ifp, sc->hw.mac.addr); + + vsi->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + ETHER_VLAN_ENCAP_LEN; + + /* + * Tell the upper layer(s) we support long frames. + */ + ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + + ifp->if_capabilities |= IFCAP_HWCSUM; + ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; + ifp->if_capabilities |= IFCAP_TSO; + ifp->if_capabilities |= IFCAP_JUMBO_MTU; + + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING + | IFCAP_VLAN_HWTSO + | IFCAP_VLAN_MTU + | IFCAP_VLAN_HWCSUM + | IFCAP_LRO; + ifp->if_capenable = ifp->if_capabilities; + + /* + ** Don't turn this on by default, if vlans are + ** created on another pseudo device (eg. lagg) + ** then vlan events are not passed thru, breaking + ** operation, but with HW FILTER off it works. If + ** using vlans directly on the ixl driver you can + ** enable this and get full hardware tag filtering. + */ + ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; + + /* + * Specify the media types supported by this adapter and register + * callbacks to update media and link information + */ + ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change, + ixlv_media_status); + + // JFV Add media types later? + + ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); + + INIT_DBG_DEV(dev, "end"); + return (0); +} + +/* +** Allocate and setup the interface queues +*/ +static int +ixlv_setup_queues(struct ixlv_sc *sc) +{ + device_t dev = sc->dev; + struct ixl_vsi *vsi; + struct ixl_queue *que; + struct tx_ring *txr; + struct rx_ring *rxr; + int rsize, tsize; + int error = I40E_SUCCESS; + + vsi = &sc->vsi; + vsi->back = (void *)sc; + vsi->hw = &sc->hw; + vsi->num_vlans = 0; + + /* Get memory for the station queues */ + if (!(vsi->queues = + (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * + vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate queue memory\n"); + error = ENOMEM; + goto early; + } + + for (int i = 0; i < vsi->num_queues; i++) { + que = &vsi->queues[i]; + que->num_desc = ixlv_ringsz; + que->me = i; + que->vsi = vsi; + /* mark the queue as active */ + vsi->active_queues |= (u64)1 << que->me; + + txr = &que->txr; + txr->que = que; + txr->tail = I40E_QTX_TAIL1(que->me); + /* Initialize the TX lock */ + snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", + device_get_nameunit(dev), que->me); + mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); + /* + ** Create the TX descriptor ring, the extra int is + ** added as the location for HEAD WB. + */ + tsize = roundup2((que->num_desc * + sizeof(struct i40e_tx_desc)) + + sizeof(u32), DBA_ALIGN); + if (i40e_allocate_dma(&sc->hw, + &txr->dma, tsize, DBA_ALIGN)) { + device_printf(dev, + "Unable to allocate TX Descriptor memory\n"); + error = ENOMEM; + goto fail; + } + txr->base = (struct i40e_tx_desc *)txr->dma.va; + bzero((void *)txr->base, tsize); + /* Now allocate transmit soft structs for the ring */ + if (ixl_allocate_tx_data(que)) { + device_printf(dev, + "Critical Failure setting up TX structures\n"); + error = ENOMEM; + goto fail; + } + /* Allocate a buf ring */ + txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF, + M_WAITOK, &txr->mtx); + if (txr->br == NULL) { + device_printf(dev, + "Critical Failure setting up TX buf ring\n"); + error = ENOMEM; + goto fail; + } + + /* + * Next the RX queues... + */ + rsize = roundup2(que->num_desc * + sizeof(union i40e_rx_desc), DBA_ALIGN); + rxr = &que->rxr; + rxr->que = que; + rxr->tail = I40E_QRX_TAIL1(que->me); + + /* Initialize the RX side lock */ + snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", + device_get_nameunit(dev), que->me); + mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); + + if (i40e_allocate_dma(&sc->hw, + &rxr->dma, rsize, 4096)) { //JFV - should this be DBA? + device_printf(dev, + "Unable to allocate RX Descriptor memory\n"); + error = ENOMEM; + goto fail; + } + rxr->base = (union i40e_rx_desc *)rxr->dma.va; + bzero((void *)rxr->base, rsize); + + /* Allocate receive soft structs for the ring*/ + if (ixl_allocate_rx_data(que)) { + device_printf(dev, + "Critical Failure setting up receive structs\n"); + error = ENOMEM; + goto fail; + } + } + + return (0); + +fail: + free(vsi->queues, M_DEVBUF); + for (int i = 0; i < vsi->num_queues; i++) { + que = &vsi->queues[i]; + rxr = &que->rxr; + txr = &que->txr; + if (rxr->base) + i40e_free_dma(&sc->hw, &rxr->dma); + if (txr->base) + i40e_free_dma(&sc->hw, &txr->dma); + } + +early: + return (error); +} + +/* +** This routine is run via an vlan config EVENT, +** it enables us to use the HW Filter table since +** we can get the vlan id. This just creates the +** entry in the soft version of the VFTA, init will +** repopulate the real table. +*/ +static void +ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct ixl_vsi *vsi = ifp->if_softc; + struct ixlv_sc *sc = vsi->back; + struct ixlv_vlan_filter *v; + + + if (ifp->if_softc != arg) /* Not our event */ + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + /* Sanity check - make sure it doesn't already exist */ + SLIST_FOREACH(v, sc->vlan_filters, next) { + if (v->vlan == vtag) + return; + } + + mtx_lock(&sc->mtx); + ++vsi->num_vlans; + v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); + SLIST_INSERT_HEAD(sc->vlan_filters, v, next); + v->vlan = vtag; + v->flags = IXL_FILTER_ADD; + sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER; + mtx_unlock(&sc->mtx); + return; +} + +/* +** This routine is run via an vlan +** unconfig EVENT, remove our entry +** in the soft vfta. +*/ +static void +ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct ixl_vsi *vsi = ifp->if_softc; + struct ixlv_sc *sc = vsi->back; + struct ixlv_vlan_filter *v; + int i = 0; + + if (ifp->if_softc != arg) + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + mtx_lock(&sc->mtx); + SLIST_FOREACH(v, sc->vlan_filters, next) { + if (v->vlan == vtag) { + v->flags = IXL_FILTER_DEL; + ++i; + --vsi->num_vlans; + } + } + if (i) + sc->aq_required |= IXLV_FLAG_AQ_DEL_VLAN_FILTER; + mtx_unlock(&sc->mtx); + return; +} + +/* +** Get a new filter and add it to the mac filter list. +*/ +static struct ixlv_mac_filter * +ixlv_get_mac_filter(struct ixlv_sc *sc) +{ + struct ixlv_mac_filter *f; + + f = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); + SLIST_INSERT_HEAD(sc->mac_filters, f, next); + + return (f); +} + +/* +** Find the filter with matching MAC address +*/ +static struct ixlv_mac_filter * +ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr) +{ + struct ixlv_mac_filter *f; + bool match = FALSE; + + SLIST_FOREACH(f, sc->mac_filters, next) { + if (cmp_etheraddr(f->macaddr, macaddr)) { + match = TRUE; + break; + } + } + + if (!match) + f = NULL; + return (f); +} + +/* +** Admin Queue interrupt handler +*/ +static void +ixlv_msix_adminq(void *arg) +{ + struct ixlv_sc *sc = arg; + struct i40e_hw *hw = &sc->hw; + u32 reg, mask; + + reg = rd32(hw, I40E_VFINT_ICR01); + mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + + reg = rd32(hw, I40E_VFINT_DYN_CTL01); + reg |= I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; + wr32(hw, I40E_VFINT_DYN_CTL01, reg); + + /* re-enable interrupt causes */ + wr32(hw, I40E_VFINT_ICR0_ENA1, mask); + wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK); + + /* schedule task */ + taskqueue_enqueue(sc->tq, &sc->aq_irq); + return; +} + +void +ixlv_enable_intr(struct ixl_vsi *vsi) +{ + struct i40e_hw *hw = vsi->hw; + struct ixl_queue *que = vsi->queues; + + ixlv_enable_adminq_irq(hw); + for (int i = 0; i < vsi->num_queues; i++, que++) + ixlv_enable_queue_irq(hw, que->me); +} + +void +ixlv_disable_intr(struct ixl_vsi *vsi) +{ + struct i40e_hw *hw = vsi->hw; + struct ixl_queue *que = vsi->queues; + + ixlv_disable_adminq_irq(hw); + for (int i = 0; i < vsi->num_queues; i++, que++) + ixlv_disable_queue_irq(hw, que->me); +} + + +static void +ixlv_disable_adminq_irq(struct i40e_hw *hw) +{ + wr32(hw, I40E_VFINT_DYN_CTL01, 0); + wr32(hw, I40E_VFINT_ICR0_ENA1, 0); + /* flush */ + rd32(hw, I40E_VFGEN_RSTAT); + return; +} + +static void +ixlv_enable_adminq_irq(struct i40e_hw *hw) +{ + wr32(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK); + /* flush */ + rd32(hw, I40E_VFGEN_RSTAT); + return; +} + +static void +ixlv_enable_queue_irq(struct i40e_hw *hw, int id) +{ + u32 reg; + + reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN_CLEARPBA_MASK; + wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg); +} + +static void +ixlv_disable_queue_irq(struct i40e_hw *hw, int id) +{ + wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0); + rd32(hw, I40E_VFGEN_RSTAT); + return; +} + + +/* +** Provide a update to the queue RX +** interrupt moderation value. +*/ +static void +ixlv_set_queue_rx_itr(struct ixl_queue *que) +{ + struct ixl_vsi *vsi = que->vsi; + struct i40e_hw *hw = vsi->hw; + struct rx_ring *rxr = &que->rxr; + u16 rx_itr; + u16 rx_latency = 0; + int rx_bytes; + + + /* Idle, do nothing */ + if (rxr->bytes == 0) + return; + + if (ixlv_dynamic_rx_itr) { + rx_bytes = rxr->bytes/rxr->itr; + rx_itr = rxr->itr; + + /* Adjust latency range */ + switch (rxr->latency) { + case IXL_LOW_LATENCY: + if (rx_bytes > 10) { + rx_latency = IXL_AVE_LATENCY; + rx_itr = IXL_ITR_20K; + } + break; + case IXL_AVE_LATENCY: + if (rx_bytes > 20) { + rx_latency = IXL_BULK_LATENCY; + rx_itr = IXL_ITR_8K; + } else if (rx_bytes <= 10) { + rx_latency = IXL_LOW_LATENCY; + rx_itr = IXL_ITR_100K; + } + break; + case IXL_BULK_LATENCY: + if (rx_bytes <= 20) { + rx_latency = IXL_AVE_LATENCY; + rx_itr = IXL_ITR_20K; + } + break; + } + + rxr->latency = rx_latency; + + if (rx_itr != rxr->itr) { + /* do an exponential smoothing */ + rx_itr = (10 * rx_itr * rxr->itr) / + ((9 * rx_itr) + rxr->itr); + rxr->itr = rx_itr & IXL_MAX_ITR; + wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, + que->me), rxr->itr); + } + } else { /* We may have have toggled to non-dynamic */ + if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) + vsi->rx_itr_setting = ixlv_rx_itr; + /* Update the hardware if needed */ + if (rxr->itr != vsi->rx_itr_setting) { + rxr->itr = vsi->rx_itr_setting; + wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, + que->me), rxr->itr); + } + } + rxr->bytes = 0; + rxr->packets = 0; + return; +} + + +/* +** Provide a update to the queue TX +** interrupt moderation value. +*/ +static void +ixlv_set_queue_tx_itr(struct ixl_queue *que) +{ + struct ixl_vsi *vsi = que->vsi; + struct i40e_hw *hw = vsi->hw; + struct tx_ring *txr = &que->txr; + u16 tx_itr; + u16 tx_latency = 0; + int tx_bytes; + + + /* Idle, do nothing */ + if (txr->bytes == 0) + return; + + if (ixlv_dynamic_tx_itr) { + tx_bytes = txr->bytes/txr->itr; + tx_itr = txr->itr; + + switch (txr->latency) { + case IXL_LOW_LATENCY: + if (tx_bytes > 10) { + tx_latency = IXL_AVE_LATENCY; + tx_itr = IXL_ITR_20K; + } + break; + case IXL_AVE_LATENCY: + if (tx_bytes > 20) { + tx_latency = IXL_BULK_LATENCY; + tx_itr = IXL_ITR_8K; + } else if (tx_bytes <= 10) { + tx_latency = IXL_LOW_LATENCY; + tx_itr = IXL_ITR_100K; + } + break; + case IXL_BULK_LATENCY: + if (tx_bytes <= 20) { + tx_latency = IXL_AVE_LATENCY; + tx_itr = IXL_ITR_20K; + } + break; + } + + txr->latency = tx_latency; + + if (tx_itr != txr->itr) { + /* do an exponential smoothing */ + tx_itr = (10 * tx_itr * txr->itr) / + ((9 * tx_itr) + txr->itr); + txr->itr = tx_itr & IXL_MAX_ITR; + wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, + que->me), txr->itr); + } + + } else { /* We may have have toggled to non-dynamic */ + if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) + vsi->tx_itr_setting = ixlv_tx_itr; + /* Update the hardware if needed */ + if (txr->itr != vsi->tx_itr_setting) { + txr->itr = vsi->tx_itr_setting; + wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, + que->me), txr->itr); + } + } + txr->bytes = 0; + txr->packets = 0; + return; +} + + +/* +** +** MSIX Interrupt Handlers and Tasklets +** +*/ +static void +ixlv_handle_que(void *context, int pending) +{ + struct ixl_queue *que = context; + struct ixl_vsi *vsi = que->vsi; + struct i40e_hw *hw = vsi->hw; + struct tx_ring *txr = &que->txr; + struct ifnet *ifp = vsi->ifp; + bool more; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + more = ixl_rxeof(que, IXL_RX_LIMIT); + mtx_lock(&txr->mtx); + ixl_txeof(que); + if (!drbr_empty(ifp, txr->br)) + ixl_mq_start_locked(ifp, txr); + mtx_unlock(&txr->mtx); + if (more) { + taskqueue_enqueue(que->tq, &que->task); + return; + } + } + + /* Reenable this interrupt - hmmm */ + ixlv_enable_queue_irq(hw, que->me); + return; +} + + +/********************************************************************* + * + * MSIX Queue Interrupt Service routine + * + **********************************************************************/ +static void +ixlv_msix_que(void *arg) +{ + struct ixl_queue *que = arg; + struct ixl_vsi *vsi = que->vsi; + struct i40e_hw *hw = vsi->hw; + struct tx_ring *txr = &que->txr; + bool more_tx, more_rx; + + /* Spurious interrupts are ignored */ + if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) + return; + + ++que->irqs; + + more_rx = ixl_rxeof(que, IXL_RX_LIMIT); + + mtx_lock(&txr->mtx); + more_tx = ixl_txeof(que); + /* + ** Make certain that if the stack + ** has anything queued the task gets + ** scheduled to handle it. + */ + if (!drbr_empty(vsi->ifp, txr->br)) + more_tx = 1; + mtx_unlock(&txr->mtx); + + ixlv_set_queue_rx_itr(que); + ixlv_set_queue_tx_itr(que); + + if (more_tx || more_rx) + taskqueue_enqueue(que->tq, &que->task); + else + ixlv_enable_queue_irq(hw, que->me); + + return; +} + + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called whenever the user queries the status of + * the interface using ifconfig. + * + **********************************************************************/ +static void +ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) +{ + struct ixl_vsi *vsi = ifp->if_softc; + struct ixlv_sc *sc = vsi->back; + + INIT_DBG_IF(ifp, "begin"); + + mtx_lock(&sc->mtx); + + ixlv_update_link_status(sc); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!vsi->link_up) { + mtx_unlock(&sc->mtx); + INIT_DBG_IF(ifp, "end: link not up"); + return; + } + + ifmr->ifm_status |= IFM_ACTIVE; + /* Hardware is always full-duplex */ + ifmr->ifm_active |= IFM_FDX; + mtx_unlock(&sc->mtx); + INIT_DBG_IF(ifp, "end"); + return; +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called when the user changes speed/duplex using + * media/mediopt option with ifconfig. + * + **********************************************************************/ +static int +ixlv_media_change(struct ifnet * ifp) +{ + struct ixl_vsi *vsi = ifp->if_softc; + struct ifmedia *ifm = &vsi->media; + + INIT_DBG_IF(ifp, "begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + INIT_DBG_IF(ifp, "end"); + return (0); +} + + +/********************************************************************* + * Multicast Initialization + * + * This routine is called by init to reset a fresh state. + * + **********************************************************************/ + +static void +ixlv_init_multi(struct ixl_vsi *vsi) +{ + struct ixlv_mac_filter *f; + struct ixlv_sc *sc = vsi->back; + int mcnt = 0; + + IOCTL_DBG_IF(vsi->ifp, "begin"); + + /* First clear any multicast filters */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if ((f->flags & IXL_FILTER_USED) + && (f->flags & IXL_FILTER_MC)) { + f->flags |= IXL_FILTER_DEL; + mcnt++; + } + } + if (mcnt > 0) + sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER; + + IOCTL_DBG_IF(vsi->ifp, "end"); +} + +static void +ixlv_add_multi(struct ixl_vsi *vsi) +{ + struct ifmultiaddr *ifma; + struct ifnet *ifp = vsi->ifp; + struct ixlv_sc *sc = vsi->back; + int mcnt = 0; + + IOCTL_DBG_IF(ifp, "begin"); + + if_maddr_rlock(ifp); + /* + ** Get a count, to decide if we + ** simply use multicast promiscuous. + */ + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + mcnt++; + } + if_maddr_runlock(ifp); + + if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { + /* delete all multicast filters */ + ixlv_init_multi(vsi); + sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC; + sc->aq_required |= IXLV_FLAG_AQ_CONFIGURE_PROMISC; + IOCTL_DEBUGOUT("%s: end: too many filters", __func__); + return; + } + + mcnt = 0; + if_maddr_rlock(ifp); + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + if (!ixlv_add_mac_filter(sc, + (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr), + IXL_FILTER_MC)) + mcnt++; + } + if_maddr_runlock(ifp); + /* + ** Notify AQ task that sw filters need to be + ** added to hw list + */ + if (mcnt > 0) + sc->aq_required |= IXLV_FLAG_AQ_ADD_MAC_FILTER; + + IOCTL_DBG_IF(ifp, "end"); +} + +static void +ixlv_del_multi(struct ixl_vsi *vsi) +{ + struct ixlv_mac_filter *f; + struct ifmultiaddr *ifma; + struct ifnet *ifp = vsi->ifp; + struct ixlv_sc *sc = vsi->back; + int mcnt = 0; + bool match = FALSE; + + IOCTL_DBG_IF(ifp, "begin"); + + /* Search for removed multicast addresses */ + if_maddr_rlock(ifp); + SLIST_FOREACH(f, sc->mac_filters, next) { + if ((f->flags & IXL_FILTER_USED) + && (f->flags & IXL_FILTER_MC)) { + /* check if mac address in filter is in sc's list */ + match = FALSE; + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + u8 *mc_addr = + (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); + if (cmp_etheraddr(f->macaddr, mc_addr)) { + match = TRUE; + break; + } + } + /* if this filter is not in the sc's list, remove it */ + if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) { + f->flags |= IXL_FILTER_DEL; + mcnt++; + IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT, + MAC_FORMAT_ARGS(f->macaddr)); + } + else if (match == FALSE) + IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT, + MAC_FORMAT_ARGS(f->macaddr)); + } + } + if_maddr_runlock(ifp); + + if (mcnt > 0) + sc->aq_required |= IXLV_FLAG_AQ_DEL_MAC_FILTER; + + IOCTL_DBG_IF(ifp, "end"); +} + +/********************************************************************* + * Timer routine + * + * This routine checks for link status,updates statistics, + * and runs the watchdog check. + * + **********************************************************************/ + +static void +ixlv_local_timer(void *arg) +{ + struct ixlv_sc *sc = arg; + struct i40e_hw *hw = &sc->hw; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_queue *que = vsi->queues; + device_t dev = sc->dev; + int hung = 0; + u32 mask, val, oldval; + + mtx_assert(&sc->mtx, MA_OWNED); + + /* If Reset is in progress just bail */ + if (sc->init_state == IXLV_RESET_PENDING) + return; + + /* Check for when PF triggers a VF reset */ + val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + + if (val != I40E_VFR_VFACTIVE + && val != I40E_VFR_COMPLETED) { +#ifdef IXL_DEBUG + device_printf(dev, "%s: reset in progress! (%d)\n", + __func__, val); +#endif + return; + } + + /* check for Admin queue errors */ + val = rd32(hw, hw->aq.arq.len); + oldval = val; + if (val & I40E_VF_ARQLEN_ARQVFE_MASK) { + device_printf(dev, "ARQ VF Error detected\n"); + val &= ~I40E_VF_ARQLEN_ARQVFE_MASK; + } + if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) { + device_printf(dev, "ARQ Overflow Error detected\n"); + val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK; + } + if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) { + device_printf(dev, "ARQ Critical Error detected\n"); + val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK; + } + if (oldval != val) + wr32(hw, hw->aq.arq.len, val); + + val = rd32(hw, hw->aq.asq.len); + oldval = val; + if (val & I40E_VF_ATQLEN_ATQVFE_MASK) { + device_printf(dev, "ASQ VF Error detected\n"); + val &= ~I40E_VF_ATQLEN_ATQVFE_MASK; + } + if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) { + device_printf(dev, "ASQ Overflow Error detected\n"); + val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK; + } + if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) { + device_printf(dev, "ASQ Critical Error detected\n"); + val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK; + } + if (oldval != val) + wr32(hw, hw->aq.asq.len, val); + + /* clean and process any events */ + taskqueue_enqueue(sc->tq, &sc->aq_irq); + + /* + ** Check status on the queues for a hang + */ + mask = (I40E_VFINT_DYN_CTLN_INTENA_MASK | + I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK); + + for (int i = 0; i < vsi->num_queues; i++,que++) { + /* Any queues with outstanding work get a sw irq */ + if (que->busy) + wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask); + /* + ** Each time txeof runs without cleaning, but there + ** are uncleaned descriptors it increments busy. If + ** we get to 5 we declare it hung. + */ + if (que->busy == IXL_QUEUE_HUNG) { + ++hung; + /* Mark the queue as inactive */ + vsi->active_queues &= ~((u64)1 << que->me); + continue; + } else { + /* Check if we've come back from hung */ + if ((vsi->active_queues & ((u64)1 << que->me)) == 0) + vsi->active_queues |= ((u64)1 << que->me); + } + if (que->busy >= IXL_MAX_TX_BUSY) { + device_printf(dev,"Warning queue %d " + "appears to be hung!\n", i); + que->busy = IXL_QUEUE_HUNG; + ++hung; + } + } + /* Only reset when all queues show hung */ + if (hung == vsi->num_queues) + goto hung; + callout_reset(&sc->timer, hz, ixlv_local_timer, sc); + return; + +hung: + device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n"); + sc->init_state = IXLV_RESET_REQUIRED; + ixlv_init_locked(sc); +} + +/* +** Note: this routine updates the OS on the link state +** the real check of the hardware only happens with +** a link interrupt. +*/ +static void +ixlv_update_link_status(struct ixlv_sc *sc) +{ + struct ixl_vsi *vsi = &sc->vsi; + struct ifnet *ifp = vsi->ifp; + device_t dev = sc->dev; + + if (vsi->link_up){ + if (vsi->link_active == FALSE) { + if (bootverbose) + device_printf(dev,"Link is Up, %d Gbps\n", + (vsi->link_speed == I40E_LINK_SPEED_40GB) ? 40:10); + vsi->link_active = TRUE; + if_link_state_change(ifp, LINK_STATE_UP); + } + } else { /* Link down */ + if (vsi->link_active == TRUE) { + if (bootverbose) + device_printf(dev,"Link is Down\n"); + if_link_state_change(ifp, LINK_STATE_DOWN); + vsi->link_active = FALSE; + } + } + + return; +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC and deallocates TX/RX buffers. + * + **********************************************************************/ + +static void +ixlv_stop(struct ixlv_sc *sc) +{ + mtx_assert(&sc->sc_mtx, MA_OWNED); + + INIT_DBG_IF(&sc->vsi->ifp, "begin"); + + sc->aq_required |= IXLV_FLAG_AQ_DISABLE_QUEUES; + callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, + ixlv_sched_aq, sc); + + /* Stop the local timer */ + callout_stop(&sc->timer); + + INIT_DBG_IF(&sc->vsi->ifp, "end"); +} + + +/********************************************************************* + * + * Free all station queue structs. + * + **********************************************************************/ +static void +ixlv_free_queues(struct ixl_vsi *vsi) +{ + struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; + struct ixl_queue *que = vsi->queues; + + for (int i = 0; i < vsi->num_queues; i++, que++) { + struct tx_ring *txr = &que->txr; + struct rx_ring *rxr = &que->rxr; + + if (!mtx_initialized(&txr->mtx)) /* uninitialized */ + continue; + IXL_TX_LOCK(txr); + ixl_free_que_tx(que); + if (txr->base) + i40e_free_dma(&sc->hw, &txr->dma); + IXL_TX_UNLOCK(txr); + IXL_TX_LOCK_DESTROY(txr); + + if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ + continue; + IXL_RX_LOCK(rxr); + ixl_free_que_rx(que); + if (rxr->base) + i40e_free_dma(&sc->hw, &rxr->dma); + IXL_RX_UNLOCK(rxr); + IXL_RX_LOCK_DESTROY(rxr); + + } + free(vsi->queues, M_DEVBUF); +} + + +/* +** ixlv_config_rss - setup RSS +*/ +static void +ixlv_config_rss(struct ixlv_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + struct ixl_vsi *vsi = &sc->vsi; + u32 lut = 0; + u64 set_hena, hena; + int i, j; + + /* set up random bits */ + static const u32 seed[I40E_VFQF_HKEY_MAX_INDEX + 1] = { + 0x794221b4, 0xbca0c5ab, 0x6cd5ebd9, 0x1ada6127, + 0x983b3aa1, 0x1c4e71eb, 0x7f6328b2, 0xfcdc0da0, + 0xc135cafa, 0x7a6f7e2d, 0xe7102d28, 0x163cd12e, + 0x4954b126 }; + + /* Fill out hash function seed */ + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + wr32(hw, I40E_VFQF_HKEY(i), seed[i]); + + /* Enable PCTYPES for RSS: */ + set_hena = + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | + ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | + ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | + ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD); + + hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | + ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); + hena |= set_hena; + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == vsi->num_queues) + j = 0; + /* lut = 4-byte sliding window of 4 lut entries */ + lut = (lut << 8) | (j & + ((0x1 << hw->func_caps.rss_table_entry_width) - 1)); + /* On i = 3, we have 4 entries in lut; write to the register */ + if ((i & 3) == 3) + wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); + } + ixl_flush(hw); +} + + +/* +** This routine refreshes vlan filters, called by init +** it scans the filter table and then updates the AQ +*/ +static void +ixlv_setup_vlan_filters(struct ixlv_sc *sc) +{ + struct ixl_vsi *vsi = &sc->vsi; + struct ixlv_vlan_filter *f; + int cnt = 0; + + if (vsi->num_vlans == 0) + return; + /* + ** Scan the filter table for vlan entries, + ** and if found call for the AQ update. + */ + SLIST_FOREACH(f, sc->vlan_filters, next) + if (f->flags & IXL_FILTER_ADD) + cnt++; + if (cnt == 0) + return; + + sc->aq_required |= IXLV_FLAG_AQ_ADD_VLAN_FILTER; + return; +} + + +/* +** This routine adds new MAC filters to the sc's list; +** these are later added in hardware by the periodic +** aq task. +*/ +static int +ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) +{ + struct ixlv_mac_filter *f; + device_t dev = sc->dev; + + /* Does one already exist? */ + f = ixlv_find_mac_filter(sc, macaddr); + if (f != NULL) { + IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT, + MAC_FORMAT_ARGS(macaddr)); + return (EEXIST); + } + + /* If not, get a new empty filter */ + f = ixlv_get_mac_filter(sc); + if (f == NULL) { + device_printf(dev, "%s: no filters available!!\n", + __func__); + return (ENOMEM); + } + + IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT, + MAC_FORMAT_ARGS(macaddr)); + + bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); + f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); + f->flags |= flags; + return (0); +} + +/* +** Tasklet handler for MSIX Adminq interrupts +** - done outside interrupt context since it might sleep +*/ +static void +ixlv_do_adminq(void *context, int pending) +{ + struct ixlv_sc *sc = context; + struct i40e_hw *hw = &sc->hw; + struct i40e_arq_event_info event; + struct i40e_virtchnl_msg *v_msg; + i40e_status ret; + u16 result = 0; + + + event.buf_len = IXL_AQ_BUF_SZ; + event.msg_buf = malloc(event.buf_len, + M_DEVBUF, M_NOWAIT | M_ZERO); + if (!event.msg_buf) { + printf("Unable to allocate adminq memory\n"); + return; + } + v_msg = (struct i40e_virtchnl_msg *)&event.desc; + + mtx_lock(&sc->mtx); + /* clean and process any events */ + do { + ret = i40e_clean_arq_element(hw, &event, &result); + if (ret) + break; + ixlv_vc_completion(sc, v_msg->v_opcode, + v_msg->v_retval, event.msg_buf, event.msg_len); + if (result != 0) + bzero(event.msg_buf, IXL_AQ_BUF_SZ); + } while (result); + + ixlv_enable_adminq_irq(hw); + free(event.msg_buf, M_DEVBUF); + mtx_unlock(&sc->mtx); + return; +} + +/* +** ixlv_sched_aq - Periodic scheduling tasklet +** +*/ +static void +ixlv_sched_aq(void *context) +{ + struct ixlv_sc *sc = context; + struct ixl_vsi *vsi = &sc->vsi; + + /* This is driven by a callout, don't spin */ + if (!mtx_trylock(&sc->mtx)) + goto done_nolock; + + if (sc->init_state == IXLV_RESET_PENDING) + goto done; + + /* Process requested admin queue tasks */ + if (sc->aq_pending) + goto done; + + if (sc->aq_required & IXLV_FLAG_AQ_MAP_VECTORS) { + ixlv_map_queues(sc); + goto done; + } + + if (sc->aq_required & IXLV_FLAG_AQ_ADD_MAC_FILTER) { + ixlv_add_ether_filters(sc); + goto done; + } + + if (sc->aq_required & IXLV_FLAG_AQ_ADD_VLAN_FILTER) { + ixlv_add_vlans(sc); + goto done; + } + + if (sc->aq_required & IXLV_FLAG_AQ_DEL_MAC_FILTER) { + ixlv_del_ether_filters(sc); + goto done; + } + + if (sc->aq_required & IXLV_FLAG_AQ_DEL_VLAN_FILTER) { + ixlv_del_vlans(sc); + goto done; + } + + if (sc->aq_required & IXLV_FLAG_AQ_CONFIGURE_QUEUES) { + ixlv_configure_queues(sc); + goto done; + } + + if (sc->aq_required & IXLV_FLAG_AQ_DISABLE_QUEUES) { + ixlv_disable_queues(sc); + goto done; + } + + if (sc->aq_required & IXLV_FLAG_AQ_ENABLE_QUEUES) { + ixlv_enable_queues(sc); + goto done; + } + + /* Do stats request only if no other AQ operations requested */ + if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) + ixlv_request_stats(sc); + +done: + mtx_unlock(&sc->mtx); +done_nolock: + if (sc->aq_required) /* Reschedule */ + callout_reset(&sc->aq_task, IXLV_CALLOUT_TIMO, + ixlv_sched_aq, sc); + else + callout_reset(&sc->aq_task, 2 * hz, ixlv_sched_aq, sc); +} + +static void +ixlv_add_stats_sysctls(struct ixlv_sc *sc) +{ + device_t dev = sc->dev; + struct ixl_vsi *vsi = &sc->vsi; + struct i40e_eth_stats *es = &vsi->eth_stats; + + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); + struct sysctl_oid *tree = device_get_sysctl_tree(dev); + struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); + + struct sysctl_oid *vsi_node, *queue_node; + struct sysctl_oid_list *vsi_list, *queue_list; + +#define QUEUE_NAME_LEN 32 + char queue_namebuf[QUEUE_NAME_LEN]; + + struct ixl_queue *queues = vsi->queues; + struct tx_ring *txr; + struct rx_ring *rxr; + + /* Driver statistics */ + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", + CTLFLAG_RD, &sc->watchdog_events, + "Watchdog timeouts"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq", + CTLFLAG_RD, &sc->admin_irq, + "Admin Queue IRQ Handled"); + + /* VSI statistics */ + vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", + CTLFLAG_RD, NULL, "VSI-specific statistics"); + vsi_list = SYSCTL_CHILDREN(vsi_node); + + struct ixl_sysctl_info ctls[] = + { + {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, + {&es->rx_unicast, "ucast_pkts_rcvd", + "Unicast Packets Received"}, + {&es->rx_multicast, "mcast_pkts_rcvd", + "Multicast Packets Received"}, + {&es->rx_broadcast, "bcast_pkts_rcvd", + "Broadcast Packets Received"}, + {&es->rx_discards, "rx_discards", "Discarded RX packets"}, + {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, + {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, + {&es->tx_multicast, "mcast_pkts_txd", + "Multicast Packets Transmitted"}, + {&es->tx_broadcast, "bcast_pkts_txd", + "Broadcast Packets Transmitted"}, + {&es->tx_discards, "tx_discards", "Discarded TX packets"}, + // end + {0,0,0} + }; + struct ixl_sysctl_info *entry = ctls; + while (entry->stat != 0) + { + SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, + CTLFLAG_RD, entry->stat, + entry->description); + entry++; + } + + /* Queue statistics */ + for (int q = 0; q < vsi->num_queues; q++) { + snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); + queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, + CTLFLAG_RD, NULL, "Queue Name"); + queue_list = SYSCTL_CHILDREN(queue_node); + + txr = &(queues[q].txr); + rxr = &(queues[q].rxr); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", + CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), + "m_defrag() failed"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped", + CTLFLAG_RD, &(queues[q].dropped_pkts), + "Driver dropped packets"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", + CTLFLAG_RD, &(queues[q].irqs), + "irqs on this queue"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", + CTLFLAG_RD, &(queues[q].tso), + "TSO"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup", + CTLFLAG_RD, &(queues[q].tx_dma_setup), + "Driver tx dma failure in xmit"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", + CTLFLAG_RD, &(txr->no_desc), + "Queue No Descriptor Available"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", + CTLFLAG_RD, &(txr->total_packets), + "Queue Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", + CTLFLAG_RD, &(txr->tx_bytes), + "Queue Bytes Transmitted"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", + CTLFLAG_RD, &(rxr->rx_packets), + "Queue Packets Received"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + CTLFLAG_RD, &(rxr->rx_bytes), + "Queue Bytes Received"); + } +} + +static void +ixlv_init_filters(struct ixlv_sc *sc) +{ + sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter), + M_DEVBUF, M_NOWAIT | M_ZERO); + SLIST_INIT(sc->mac_filters); + sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter), + M_DEVBUF, M_NOWAIT | M_ZERO); + SLIST_INIT(sc->vlan_filters); + return; +} + +static void +ixlv_free_filters(struct ixlv_sc *sc) +{ + struct ixlv_mac_filter *f; + struct ixlv_vlan_filter *v; + + while (!SLIST_EMPTY(sc->mac_filters)) { + f = SLIST_FIRST(sc->mac_filters); + SLIST_REMOVE_HEAD(sc->mac_filters, next); + free(f, M_DEVBUF); + } + while (!SLIST_EMPTY(sc->vlan_filters)) { + v = SLIST_FIRST(sc->vlan_filters); + SLIST_REMOVE_HEAD(sc->vlan_filters, next); + free(v, M_DEVBUF); + } + return; +} + diff --git a/sys/dev/i40e/i40e.h b/sys/dev/ixl/ixl.h old mode 100755 new mode 100644 similarity index 64% rename from sys/dev/i40e/i40e.h rename to sys/dev/ixl/ixl.h index e377c2cd6970..25e6d27bc6e2 --- a/sys/dev/i40e/i40e.h +++ b/sys/dev/ixl/ixl.h @@ -33,8 +33,8 @@ /*$FreeBSD$*/ -#ifndef _I40E_H_ -#define _I40E_H_ +#ifndef _IXL_H_ +#define _IXL_H_ #include @@ -91,7 +91,7 @@ #include "i40e_type.h" #include "i40e_prototype.h" -#ifdef I40E_DEBUG +#ifdef IXL_DEBUG #include #define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x" @@ -100,15 +100,48 @@ (mac_addr)[4], (mac_addr)[5] #define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off") -#define DPRINTF(...) printf(__VA_ARGS__) -#define DDPRINTF(dev, ...) device_printf(dev, __VA_ARGS__) -#define IDPRINTF(ifp, ...) if_printf(ifp, __VA_ARGS__) -// static void i40e_dump_desc(void *, u8, u16); +#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__) +#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__) +#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__) + +/* Defines for printing generic debug information */ +#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__) +#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__) +#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__) + +/* Defines for printing specific debug information */ +#define DEBUG_INIT 1 +#define DEBUG_IOCTL 1 +#define DEBUG_HW 1 + +#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__) +#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__) +#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__) + +#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__) +#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \ + if_printf(ifp, S "\n", ##__VA_ARGS__) +#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__) + +#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__) + #else +#define DEBUG_INIT 0 +#define DEBUG_IOCTL 0 +#define DEBUG_HW 0 + #define DPRINTF(...) #define DDPRINTF(...) #define IDPRINTF(...) + +#define INIT_DEBUGOUT(...) +#define INIT_DBG_DEV(...) +#define INIT_DBG_IF(...) +#define IOCTL_DEBUGOUT(...) +#define IOCTL_DBG_IF2(...) +#define IOCTL_DBG_IF(...) +#define HW_DEBUGOUT(...) #endif /* Tunables */ @@ -124,6 +157,11 @@ #define MAX_RING 4096 #define MIN_RING 32 +/* +** Default number of entries in Tx queue buf_ring. +*/ +#define DEFAULT_TXBRSZ (4096 * 4096) + /* Alignment for rings */ #define DBA_ALIGN 128 @@ -138,106 +176,91 @@ * pass between any two TX clean operations, such only happening * when the TX hardware is functioning. */ -#define I40E_WATCHDOG (10 * hz) +#define IXL_WATCHDOG (10 * hz) /* * This parameters control when the driver calls the routine to reclaim * transmit descriptors. */ -#define I40E_TX_CLEANUP_THRESHOLD (que->num_desc / 8) -#define I40E_TX_OP_THRESHOLD (que->num_desc / 32) +#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8) +#define IXL_TX_OP_THRESHOLD (que->num_desc / 32) /* Flow control constants */ -#define I40E_FC_PAUSE 0xFFFF -#define I40E_FC_HI 0x20000 -#define I40E_FC_LO 0x10000 - -/* Defines for printing debug information */ -#define DEBUG_INIT 0 -#define DEBUG_IOCTL 0 -#define DEBUG_HW 0 - -#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") -#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) -#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) -#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") -#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) -#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) -#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") -#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) -#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) +#define IXL_FC_PAUSE 0xFFFF +#define IXL_FC_HI 0x20000 +#define IXL_FC_LO 0x10000 #define MAX_MULTICAST_ADDR 128 -#define I40E_BAR 3 -#define I40E_ADM_LIMIT 2 -#define I40E_TSO_SIZE 65535 -#define I40E_TX_BUF_SZ ((u32) 1514) -#define I40E_AQ_BUF_SZ ((u32) 4096) -#define I40E_RX_HDR 128 -#define I40E_AQ_LEN 32 -#define I40E_AQ_BUFSZ 4096 -#define I40E_RX_LIMIT 512 -#define I40E_RX_ITR 0 -#define I40E_TX_ITR 1 -#define I40E_ITR_NONE 3 -#define I40E_QUEUE_EOL 0x7FF -#define I40E_MAX_FRAME 0x2600 -#define I40E_MAX_TX_SEGS 8 -#define I40E_MAX_TSO_SEGS 66 -#define I40E_SPARSE_CHAIN 6 -#define I40E_QUEUE_HUNG 0x80000000 +#define IXL_BAR 3 +#define IXL_ADM_LIMIT 2 +#define IXL_TSO_SIZE 65535 +#define IXL_TX_BUF_SZ ((u32) 1514) +#define IXL_AQ_BUF_SZ ((u32) 4096) +#define IXL_RX_HDR 128 +#define IXL_AQ_LEN 256 +#define IXL_AQ_BUFSZ 4096 +#define IXL_RX_LIMIT 512 +#define IXL_RX_ITR 0 +#define IXL_TX_ITR 1 +#define IXL_ITR_NONE 3 +#define IXL_QUEUE_EOL 0x7FF +#define IXL_MAX_FRAME 0x2600 +#define IXL_MAX_TX_SEGS 8 +#define IXL_MAX_TSO_SEGS 66 +#define IXL_SPARSE_CHAIN 6 +#define IXL_QUEUE_HUNG 0x80000000 /* ERJ: hardware can support ~1.5k filters between all functions */ -#define I40E_MAX_FILTERS 256 -#define I40E_MAX_TX_BUSY 10 +#define IXL_MAX_FILTERS 256 +#define IXL_MAX_TX_BUSY 10 -#define I40E_NVM_VERSION_LO_SHIFT 0 -#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) -#define I40E_NVM_VERSION_HI_SHIFT 12 -#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT) +#define IXL_NVM_VERSION_LO_SHIFT 0 +#define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT) +#define IXL_NVM_VERSION_HI_SHIFT 12 +#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT) /* * Interrupt Moderation parameters */ -#define I40E_MAX_ITR 0x07FF -#define I40E_ITR_100K 0x0005 -#define I40E_ITR_20K 0x0019 -#define I40E_ITR_8K 0x003E -#define I40E_ITR_4K 0x007A -#define I40E_ITR_DYNAMIC 0x8000 -#define I40E_LOW_LATENCY 0 -#define I40E_AVE_LATENCY 1 -#define I40E_BULK_LATENCY 2 +#define IXL_MAX_ITR 0x07FF +#define IXL_ITR_100K 0x0005 +#define IXL_ITR_20K 0x0019 +#define IXL_ITR_8K 0x003E +#define IXL_ITR_4K 0x007A +#define IXL_ITR_DYNAMIC 0x8000 +#define IXL_LOW_LATENCY 0 +#define IXL_AVE_LATENCY 1 +#define IXL_BULK_LATENCY 2 /* MacVlan Flags */ -#define I40E_FILTER_USED (u16)(1 << 0) -#define I40E_FILTER_VLAN (u16)(1 << 1) -#define I40E_FILTER_ADD (u16)(1 << 2) -#define I40E_FILTER_DEL (u16)(1 << 3) -#define I40E_FILTER_MC (u16)(1 << 4) +#define IXL_FILTER_USED (u16)(1 << 0) +#define IXL_FILTER_VLAN (u16)(1 << 1) +#define IXL_FILTER_ADD (u16)(1 << 2) +#define IXL_FILTER_DEL (u16)(1 << 3) +#define IXL_FILTER_MC (u16)(1 << 4) /* used in the vlan field of the filter when not a vlan */ -#define I40E_VLAN_ANY -1 +#define IXL_VLAN_ANY -1 #define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6) #define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO) -/* Misc flags for i40e_vsi.flags */ -#define I40E_FLAGS_KEEP_TSO4 (1 << 0) -#define I40E_FLAGS_KEEP_TSO6 (1 << 1) +/* Misc flags for ixl_vsi.flags */ +#define IXL_FLAGS_KEEP_TSO4 (1 << 0) +#define IXL_FLAGS_KEEP_TSO6 (1 << 1) -#define I40E_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx) -#define I40E_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) -#define I40E_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) -#define I40E_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx) -#define I40E_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) +#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx) +#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) +#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) +#define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx) +#define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) -#define I40E_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx) -#define I40E_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) -#define I40E_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) +#define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx) +#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) +#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) /* ***************************************************************************** @@ -248,36 +271,39 @@ * ***************************************************************************** */ -typedef struct _i40e_vendor_info_t { +typedef struct _ixl_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; -} i40e_vendor_info_t; +} ixl_vendor_info_t; -struct i40e_tx_buf { +struct ixl_tx_buf { u32 eop_index; struct mbuf *m_head; bus_dmamap_t map; bus_dma_tag_t tag; }; -struct i40e_rx_buf { +struct ixl_rx_buf { struct mbuf *m_head; struct mbuf *m_pack; struct mbuf *fmp; bus_dmamap_t hmap; bus_dmamap_t pmap; +#ifdef DEV_NETMAP + u64 addr; +#endif }; /* ** This struct has multiple uses, multicast ** addresses, vlans, and mac filters all use it. */ -struct i40e_mac_filter { - SLIST_ENTRY(i40e_mac_filter) next; +struct ixl_mac_filter { + SLIST_ENTRY(ixl_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; s16 vlan; u16 flags; @@ -288,7 +314,7 @@ struct i40e_mac_filter { * The Transmit ring control struct */ struct tx_ring { - struct i40e_queue *que; + struct ixl_queue *que; struct mtx mtx; u32 tail; struct i40e_tx_desc *base; @@ -299,7 +325,7 @@ struct tx_ring { u16 atr_count; u16 itr; u16 latency; - struct i40e_tx_buf *buffers; + struct ixl_tx_buf *buffers; volatile u16 avail; u32 cmd; bus_dma_tag_t tx_tag; @@ -307,9 +333,12 @@ struct tx_ring { char mtx_name[16]; struct buf_ring *br; - /* Soft Stats */ + /* Used for Dynamic ITR calculation */ u32 packets; u32 bytes; + + /* Soft Stats */ + u64 tx_bytes; u64 no_desc; u64 total_packets; }; @@ -319,7 +348,7 @@ struct tx_ring { * The Receive ring control struct */ struct rx_ring { - struct i40e_queue *que; + struct ixl_queue *que; struct mtx mtx; union i40e_rx_desc *base; struct i40e_dma_mem dma; @@ -332,16 +361,17 @@ struct rx_ring { u16 itr; u16 latency; char mtx_name[16]; - struct i40e_rx_buf *buffers; + struct ixl_rx_buf *buffers; u32 mbuf_sz; u32 tail; bus_dma_tag_t htag; bus_dma_tag_t ptag; - /* Soft stats */ + /* Used for Dynamic ITR calculation */ u32 packets; u32 bytes; + /* Soft stats */ u64 split; u64 rx_packets; u64 rx_bytes; @@ -353,8 +383,8 @@ struct rx_ring { ** Driver queue struct: this is the interrupt container ** for the associated tx and rx ring pair. */ -struct i40e_queue { - struct i40e_vsi *vsi; +struct ixl_queue { + struct ixl_vsi *vsi; u32 me; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ @@ -384,8 +414,8 @@ struct i40e_queue { ** there would be one of these per traffic class/type ** for now just one, and its embedded in the pf */ -SLIST_HEAD(i40e_ftl_head, i40e_mac_filter); -struct i40e_vsi { +SLIST_HEAD(ixl_ftl_head, ixl_mac_filter); +struct ixl_vsi { void *back; struct ifnet *ifp; struct device *dev; @@ -397,7 +427,7 @@ struct i40e_vsi { u16 num_queues; u16 rx_itr_setting; u16 tx_itr_setting; - struct i40e_queue *queues; /* head of queues */ + struct ixl_queue *queues; /* head of queues */ bool link_active; u16 seid; u16 max_frame_size; @@ -406,7 +436,7 @@ struct i40e_vsi { u32 fc; /* local flow ctrl setting */ /* MAC/VLAN Filter list */ - struct i40e_ftl_head ftl; + struct ixl_ftl_head ftl; struct i40e_aqc_vsi_properties_data info; @@ -432,7 +462,7 @@ struct i40e_vsi { ** Find the number of unrefreshed RX descriptors */ static inline u16 -i40e_rx_unrefreshed(struct i40e_queue *que) +ixl_rx_unrefreshed(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; @@ -446,13 +476,13 @@ i40e_rx_unrefreshed(struct i40e_queue *que) /* ** Find the next available unused filter */ -static inline struct i40e_mac_filter * -i40e_get_filter(struct i40e_vsi *vsi) +static inline struct ixl_mac_filter * +ixl_get_filter(struct ixl_vsi *vsi) { - struct i40e_mac_filter *f; + struct ixl_mac_filter *f; /* create a new empty filter */ - f = malloc(sizeof(struct i40e_mac_filter), + f = malloc(sizeof(struct ixl_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INSERT_HEAD(&vsi->ftl, f, next); @@ -478,19 +508,19 @@ cmp_etheraddr(u8 *ea1, u8 *ea2) /* * Info for stats sysctls */ -struct i40e_sysctl_info { +struct ixl_sysctl_info { u64 *stat; char *name; char *description; }; -extern int i40e_atr_rate; +extern int ixl_atr_rate; /* -** i40e_fw_version_str - format the FW and NVM version strings +** ixl_fw_version_str - format the FW and NVM version strings */ static inline char * -i40e_fw_version_str(struct i40e_hw *hw) +ixl_fw_version_str(struct i40e_hw *hw) { static char buf[32]; @@ -498,10 +528,10 @@ i40e_fw_version_str(struct i40e_hw *hw) "f%d.%d a%d.%d n%02x.%02x e%08x", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.api_maj_ver, hw->aq.api_min_ver, - (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >> - I40E_NVM_VERSION_HI_SHIFT, - (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >> - I40E_NVM_VERSION_LO_SHIFT, + (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> + IXL_NVM_VERSION_HI_SHIFT, + (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> + IXL_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack); return buf; } @@ -509,21 +539,21 @@ i40e_fw_version_str(struct i40e_hw *hw) /********************************************************************* * TXRX Function prototypes *********************************************************************/ -int i40e_allocate_tx_data(struct i40e_queue *); -int i40e_allocate_rx_data(struct i40e_queue *); -void i40e_init_tx_ring(struct i40e_queue *); -int i40e_init_rx_ring(struct i40e_queue *); -bool i40e_rxeof(struct i40e_queue *, int); -bool i40e_txeof(struct i40e_queue *); -int i40e_mq_start(struct ifnet *, struct mbuf *); -int i40e_mq_start_locked(struct ifnet *, struct tx_ring *); -void i40e_deferred_mq_start(void *, int); -void i40e_qflush(struct ifnet *); -void i40e_free_vsi(struct i40e_vsi *); -void i40e_free_que_tx(struct i40e_queue *); -void i40e_free_que_rx(struct i40e_queue *); -#ifdef I40E_FDIR -void i40e_atr(struct i40e_queue *, struct tcphdr *, int); +int ixl_allocate_tx_data(struct ixl_queue *); +int ixl_allocate_rx_data(struct ixl_queue *); +void ixl_init_tx_ring(struct ixl_queue *); +int ixl_init_rx_ring(struct ixl_queue *); +bool ixl_rxeof(struct ixl_queue *, int); +bool ixl_txeof(struct ixl_queue *); +int ixl_mq_start(struct ifnet *, struct mbuf *); +int ixl_mq_start_locked(struct ifnet *, struct tx_ring *); +void ixl_deferred_mq_start(void *, int); +void ixl_qflush(struct ifnet *); +void ixl_free_vsi(struct ixl_vsi *); +void ixl_free_que_tx(struct ixl_queue *); +void ixl_free_que_rx(struct ixl_queue *); +#ifdef IXL_FDIR +void ixl_atr(struct ixl_queue *, struct tcphdr *, int); #endif -#endif /* _I40E_H_ */ +#endif /* _IXL_H_ */ diff --git a/sys/dev/i40e/i40e_pf.h b/sys/dev/ixl/ixl_pf.h old mode 100755 new mode 100644 similarity index 84% rename from sys/dev/i40e/i40e_pf.h rename to sys/dev/ixl/ixl_pf.h index a19ebec52540..055c54f21a3f --- a/sys/dev/i40e/i40e_pf.h +++ b/sys/dev/ixl/ixl_pf.h @@ -33,11 +33,11 @@ /*$FreeBSD$*/ -#ifndef _I40E_PF_H_ -#define _I40E_PF_H_ +#ifndef _IXL_PF_H_ +#define _IXL_PF_H_ /* Physical controller structure */ -struct i40e_pf { +struct ixl_pf { struct i40e_hw hw; struct i40e_osdep osdep; struct device *dev; @@ -64,6 +64,8 @@ struct i40e_pf { struct task adminq; struct taskqueue *tq; + int advertised_speed; + /* ** VSI - Stations: ** These are the traffic class holders, and @@ -71,7 +73,7 @@ struct i40e_pf { ** associated with them. ** NOTE: for now using just one, so embed it. */ - struct i40e_vsi vsi; + struct ixl_vsi vsi; /* Misc stats maintained by the driver */ u64 watchdog_events; @@ -84,11 +86,11 @@ struct i40e_pf { }; -#define I40E_PF_LOCK_INIT(_sc, _name) \ - mtx_init(&(_sc)->pf_mtx, _name, "I40E PF Lock", MTX_DEF) -#define I40E_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx) -#define I40E_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx) -#define I40E_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx) -#define I40E_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED) +#define IXL_PF_LOCK_INIT(_sc, _name) \ + mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF) +#define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx) +#define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx) +#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx) +#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED) -#endif /* _I40E_PF_H_ */ +#endif /* _IXL_PF_H_ */ diff --git a/sys/dev/i40e/i40e_txrx.c b/sys/dev/ixl/ixl_txrx.c similarity index 84% rename from sys/dev/i40e/i40e_txrx.c rename to sys/dev/ixl/ixl_txrx.c index e6fdc46697fb..80678cab827e 100755 --- a/sys/dev/i40e/i40e_txrx.c +++ b/sys/dev/ixl/ixl_txrx.c @@ -33,28 +33,25 @@ /*$FreeBSD$*/ /* -** I40E driver TX/RX Routines: +** IXL driver TX/RX Routines: ** This was seperated to allow usage by ** both the BASE and the VF drivers. */ -#ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_inet.h" #include "opt_inet6.h" -#endif - -#include "i40e.h" +#include "ixl.h" /* Local Prototypes */ -static void i40e_rx_checksum(struct mbuf *, u32, u32, u8); -static void i40e_refresh_mbufs(struct i40e_queue *, int); -static int i40e_xmit(struct i40e_queue *, struct mbuf **); -static int i40e_tx_setup_offload(struct i40e_queue *, +static void ixl_rx_checksum(struct mbuf *, u32, u32, u8); +static void ixl_refresh_mbufs(struct ixl_queue *, int); +static int ixl_xmit(struct ixl_queue *, struct mbuf **); +static int ixl_tx_setup_offload(struct ixl_queue *, struct mbuf *, u32 *, u32 *); -static bool i40e_tso_setup(struct i40e_queue *, struct mbuf *); +static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *); -static __inline void i40e_rx_discard(struct rx_ring *, int); -static __inline void i40e_rx_input(struct rx_ring *, struct ifnet *, +static __inline void ixl_rx_discard(struct rx_ring *, int); +static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *, struct mbuf *, u8); /* @@ -62,10 +59,10 @@ static __inline void i40e_rx_input(struct rx_ring *, struct ifnet *, ** */ int -i40e_mq_start(struct ifnet *ifp, struct mbuf *m) +ixl_mq_start(struct ifnet *ifp, struct mbuf *m) { - struct i40e_vsi *vsi = ifp->if_softc; - struct i40e_queue *que; + struct ixl_vsi *vsi = ifp->if_softc; + struct ixl_queue *que; struct tx_ring *txr; int err, i; @@ -85,9 +82,9 @@ i40e_mq_start(struct ifnet *ifp, struct mbuf *m) err = drbr_enqueue(ifp, txr->br, m); if (err) return(err); - if (I40E_TX_TRYLOCK(txr)) { - i40e_mq_start_locked(ifp, txr); - I40E_TX_UNLOCK(txr); + if (IXL_TX_TRYLOCK(txr)) { + ixl_mq_start_locked(ifp, txr); + IXL_TX_UNLOCK(txr); } else taskqueue_enqueue(que->tq, &que->tx_task); @@ -95,10 +92,10 @@ i40e_mq_start(struct ifnet *ifp, struct mbuf *m) } int -i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) +ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) { - struct i40e_queue *que = txr->que; - struct i40e_vsi *vsi = que->vsi; + struct ixl_queue *que = txr->que; + struct ixl_vsi *vsi = que->vsi; struct mbuf *next; int err = 0; @@ -109,7 +106,7 @@ i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) /* Process the transmit queue */ while ((next = drbr_peek(ifp, txr->br)) != NULL) { - if ((err = i40e_xmit(que, &next)) != 0) { + if ((err = ixl_xmit(que, &next)) != 0) { if (next == NULL) drbr_advance(ifp, txr->br); else @@ -123,8 +120,8 @@ i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) break; } - if (txr->avail < I40E_TX_CLEANUP_THRESHOLD) - i40e_txeof(que); + if (txr->avail < IXL_TX_CLEANUP_THRESHOLD) + ixl_txeof(que); return (err); } @@ -133,35 +130,35 @@ i40e_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) * Called from a taskqueue to drain queued transmit packets. */ void -i40e_deferred_mq_start(void *arg, int pending) +ixl_deferred_mq_start(void *arg, int pending) { - struct i40e_queue *que = arg; + struct ixl_queue *que = arg; struct tx_ring *txr = &que->txr; - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct ifnet *ifp = vsi->ifp; - I40E_TX_LOCK(txr); + IXL_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) - i40e_mq_start_locked(ifp, txr); - I40E_TX_UNLOCK(txr); + ixl_mq_start_locked(ifp, txr); + IXL_TX_UNLOCK(txr); } /* ** Flush all queue ring buffers */ void -i40e_qflush(struct ifnet *ifp) +ixl_qflush(struct ifnet *ifp) { - struct i40e_vsi *vsi = ifp->if_softc; + struct ixl_vsi *vsi = ifp->if_softc; for (int i = 0; i < vsi->num_queues; i++) { - struct i40e_queue *que = &vsi->queues[i]; + struct ixl_queue *que = &vsi->queues[i]; struct tx_ring *txr = &que->txr; struct mbuf *m; - I40E_TX_LOCK(txr); + IXL_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); - I40E_TX_UNLOCK(txr); + IXL_TX_UNLOCK(txr); } if_qflush(ifp); } @@ -172,7 +169,7 @@ i40e_qflush(struct ifnet *ifp) ** mbufs to deliver an mss-size chunk of data */ static inline bool -i40e_tso_detect_sparse(struct mbuf *mp) +ixl_tso_detect_sparse(struct mbuf *mp) { struct mbuf *m; int num = 0, mss; @@ -187,7 +184,7 @@ i40e_tso_detect_sparse(struct mbuf *mp) if (m->m_next == NULL) break; } - if (num > I40E_SPARSE_CHAIN) + if (num > IXL_SPARSE_CHAIN) ret = TRUE; return (ret); @@ -201,15 +198,15 @@ i40e_tso_detect_sparse(struct mbuf *mp) * - return 0 on success, positive on failure * **********************************************************************/ -#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) +#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) static int -i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp) +ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; - struct i40e_tx_buf *buf; + struct ixl_tx_buf *buf; struct i40e_tx_desc *txd = NULL; struct mbuf *m_head, *m; int i, j, error, nsegs, maxsegs; @@ -218,7 +215,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp) u32 cmd, off; bus_dmamap_t map; bus_dma_tag_t tag; - bus_dma_segment_t segs[I40E_MAX_TSO_SEGS]; + bus_dma_segment_t segs[IXL_MAX_TSO_SEGS]; cmd = off = 0; @@ -233,13 +230,13 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp) buf = &txr->buffers[first]; map = buf->map; tag = txr->tx_tag; - maxsegs = I40E_MAX_TX_SEGS; + maxsegs = IXL_MAX_TX_SEGS; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { /* Use larger mapping for TSO */ tag = txr->tso_tag; - maxsegs = I40E_MAX_TSO_SEGS; - if (i40e_tso_detect_sparse(m_head)) { + maxsegs = IXL_MAX_TSO_SEGS; + if (ixl_tso_detect_sparse(m_head)) { m = m_defrag(m_head, M_NOWAIT); *m_headp = m; } @@ -296,7 +293,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp) /* Set up the TSO/CSUM offload */ if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { - error = i40e_tx_setup_offload(que, m_head, &cmd, &off); + error = ixl_tx_setup_offload(que, m_head, &cmd, &off); if (error) goto xmit_fail; } @@ -335,7 +332,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp) } /* Set the last descriptor for report */ txd->cmd_type_offset_bsz |= - htole64(((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); + htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); txr->avail -= nsegs; txr->next_avail = i; @@ -358,7 +355,7 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp) ++txr->total_packets; wr32(hw, txr->tail, i); - i40e_flush(hw); + ixl_flush(hw); /* Mark outstanding work */ if (que->busy == 0) que->busy = 1; @@ -378,12 +375,12 @@ i40e_xmit(struct i40e_queue *que, struct mbuf **m_headp) * **********************************************************************/ int -i40e_allocate_tx_data(struct i40e_queue *que) +ixl_allocate_tx_data(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; device_t dev = vsi->dev; - struct i40e_tx_buf *buf; + struct ixl_tx_buf *buf; int error = 0; /* @@ -394,8 +391,8 @@ i40e_allocate_tx_data(struct i40e_queue *que) BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ - I40E_TSO_SIZE, /* maxsize */ - I40E_MAX_TX_SEGS, /* nsegments */ + IXL_TSO_SIZE, /* maxsize */ + IXL_MAX_TX_SEGS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ @@ -411,8 +408,8 @@ i40e_allocate_tx_data(struct i40e_queue *que) BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ - I40E_TSO_SIZE, /* maxsize */ - I40E_MAX_TSO_SEGS, /* nsegments */ + IXL_TSO_SIZE, /* maxsize */ + IXL_MAX_TSO_SEGS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ @@ -423,7 +420,7 @@ i40e_allocate_tx_data(struct i40e_queue *que) } if (!(txr->buffers = - (struct i40e_tx_buf *) malloc(sizeof(struct i40e_tx_buf) * + (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) * que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; @@ -453,13 +450,21 @@ i40e_allocate_tx_data(struct i40e_queue *que) * **********************************************************************/ void -i40e_init_tx_ring(struct i40e_queue *que) +ixl_init_tx_ring(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; - struct i40e_tx_buf *buf; + struct ixl_tx_buf *buf; +#ifdef DEV_NETMAP + struct ixl_vsi *vsi = que->vsi; + struct netmap_adapter *na = NA(vsi->ifp); + struct netmap_slot *slot; +#endif /* DEV_NETMAP */ /* Clear the old ring contents */ - I40E_TX_LOCK(txr); + IXL_TX_LOCK(txr); +#ifdef DEV_NETMAP + slot = netmap_reset(na, NR_TX, que->me, 0); +#endif bzero((void *)txr->base, (sizeof(struct i40e_tx_desc)) * que->num_desc); @@ -467,9 +472,9 @@ i40e_init_tx_ring(struct i40e_queue *que) txr->next_avail = 0; txr->next_to_clean = 0; -#ifdef I40E_FDIR +#ifdef IXL_FDIR /* Initialize flow director */ - txr->atr_rate = i40e_atr_rate; + txr->atr_rate = ixl_atr_rate; txr->atr_count = 0; #endif @@ -483,6 +488,13 @@ i40e_init_tx_ring(struct i40e_queue *que) m_freem(buf->m_head); buf->m_head = NULL; } +#ifdef DEV_NETMAP + if (slot) + { + int si = netmap_idx_n2k(&na->tx_rings[que->me], i); + netmap_load_map(txr->tag, buf->map, NMB(slot + si)); + } +#endif /* Clear the EOP index */ buf->eop_index = -1; } @@ -492,7 +504,7 @@ i40e_init_tx_ring(struct i40e_queue *que) bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - I40E_TX_UNLOCK(txr); + IXL_TX_UNLOCK(txr); } @@ -502,12 +514,12 @@ i40e_init_tx_ring(struct i40e_queue *que) * **********************************************************************/ void -i40e_free_que_tx(struct i40e_queue *que) +ixl_free_que_tx(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; - struct i40e_tx_buf *buf; + struct ixl_tx_buf *buf; - INIT_DEBUGOUT("i40e_free_que_tx: begin"); + INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); for (int i = 0; i < que->num_desc; i++) { buf = &txr->buffers[i]; @@ -545,6 +557,8 @@ i40e_free_que_tx(struct i40e_queue *que) bus_dma_tag_destroy(txr->tso_tag); txr->tso_tag = NULL; } + + INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me); return; } @@ -555,7 +569,7 @@ i40e_free_que_tx(struct i40e_queue *que) **********************************************************************/ static int -i40e_tx_setup_offload(struct i40e_queue *que, +ixl_tx_setup_offload(struct ixl_queue *que, struct mbuf *mp, u32 *cmd, u32 *off) { struct ether_vlan_header *eh; @@ -570,7 +584,7 @@ i40e_tx_setup_offload(struct i40e_queue *que, /* Set up the TSO context descriptor if required */ if (mp->m_pkthdr.csum_flags & CSUM_TSO) { - tso = i40e_tso_setup(que, mp); + tso = ixl_tso_setup(que, mp); if (tso) ++que->tso; else @@ -625,8 +639,8 @@ i40e_tx_setup_offload(struct i40e_queue *que, *off |= (tcp_hlen >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } -#ifdef I40E_FDIR - i40e_atr(que, th, etype); +#ifdef IXL_FDIR + ixl_atr(que, th, etype); #endif break; case IPPROTO_UDP: @@ -658,11 +672,11 @@ i40e_tx_setup_offload(struct i40e_queue *que, * **********************************************************************/ static bool -i40e_tso_setup(struct i40e_queue *que, struct mbuf *mp) +ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp) { struct tx_ring *txr = &que->txr; struct i40e_tx_context_desc *TXD; - struct i40e_tx_buf *buf; + struct ixl_tx_buf *buf; u32 cmd, mss, type, tsolen; u16 etype; int idx, elen, ip_hlen, tcp_hlen; @@ -749,11 +763,11 @@ i40e_tso_setup(struct i40e_queue *que, struct mbuf *mp) } /* -** i40e_get_tx_head - Retrieve the value from the +** ixl_get_tx_head - Retrieve the value from the ** location the HW records its HEAD index */ static inline u32 -i40e_get_tx_head(struct i40e_queue *que) +ixl_get_tx_head(struct ixl_queue *que) { struct tx_ring *txr = &que->txr; void *head = &txr->base[que->num_desc]; @@ -768,18 +782,47 @@ i40e_get_tx_head(struct i40e_queue *que) * **********************************************************************/ bool -i40e_txeof(struct i40e_queue *que) +ixl_txeof(struct ixl_queue *que) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct ifnet *ifp = vsi->ifp; struct tx_ring *txr = &que->txr; u32 first, last, head, done, processed; - struct i40e_tx_buf *buf; + struct ixl_tx_buf *buf; struct i40e_tx_desc *tx_desc, *eop_desc; mtx_assert(&txr->mtx, MA_OWNED); +#ifdef DEV_NETMAP + if (ifp->if_capenable & IFCAP_NETMAP) { + struct netmap_adapter *na = NA(ifp); + struct netmap_kring *kring = &na->tx_rings[que->me]; + tx_desc = txr->base; + bus_dmamap_sync(txr->dma.tag, txr->dma.map, + BUS_DMASYNC_POSTREAD); + if (!netmap_mitigate || + (kring->nr_kflags < kring->nkr_num_slots && + tx_desc[kring->nr_kflags].cmd_type_offset_bsz & + htole32(I40E_TX_DESC_DTYPE_DESC_DONE))) + { +#if NETMAP_API < 4 + struct ixl_pf *pf = vsi->pf; + kring->nr_kflags = kring->nkr_num_slots; + selwakeuppri(&na->tx_rings[que->me].si, PI_NET); + IXL_TX_UNLOCK(txr); + IXL_PF_LOCK(pf); + selwakeuppri(&na->tx_si, PI_NET); + IXL_PF_UNLOCK(pf); + IXL_TX_LOCK(txr); +#else /* NETMAP_API >= 4 */ + netmap_tx_irq(ifp, txr->que->me); +#endif /* NETMAP_API */ + } + // XXX guessing there is no more work to be done + return FALSE; + } +#endif /* DEV_NETMAP */ /* These are not the descriptors you seek, move along :) */ if (txr->avail == que->num_desc) { @@ -797,7 +840,7 @@ i40e_txeof(struct i40e_queue *que) eop_desc = (struct i40e_tx_desc *)&txr->base[last]; /* Get the Head WB value */ - head = i40e_get_tx_head(que); + head = ixl_get_tx_head(que); /* ** Get the index of the first descriptor @@ -823,7 +866,9 @@ i40e_txeof(struct i40e_queue *que) ++processed; if (buf->m_head) { - txr->bytes += + txr->bytes += /* for ITR adjustment */ + buf->m_head->m_pkthdr.len; + txr->tx_bytes += /* for TX stats */ buf->m_head->m_pkthdr.len; bus_dmamap_sync(buf->tag, buf->map, @@ -869,7 +914,7 @@ i40e_txeof(struct i40e_queue *que) ** be considered hung. If anything has been ** cleaned then reset the state. */ - if ((processed == 0) && (que->busy != I40E_QUEUE_HUNG)) + if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG)) ++que->busy; if (processed) @@ -896,13 +941,13 @@ i40e_txeof(struct i40e_queue *que) * **********************************************************************/ static void -i40e_refresh_mbufs(struct i40e_queue *que, int limit) +ixl_refresh_mbufs(struct ixl_queue *que, int limit) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct rx_ring *rxr = &que->rxr; bus_dma_segment_t hseg[1]; bus_dma_segment_t pseg[1]; - struct i40e_rx_buf *buf; + struct ixl_rx_buf *buf; struct mbuf *mh, *mp; int i, j, nsegs, error; bool refreshed = FALSE; @@ -966,8 +1011,12 @@ i40e_refresh_mbufs(struct i40e_queue *que, int limit) buf->m_pack = mp; bus_dmamap_sync(rxr->ptag, buf->pmap, BUS_DMASYNC_PREREAD); +#ifdef DEV_NETMAP + rxr->base[i].read.pkt_addr = buf->addr; +#else /* !DEV_NETMAP */ rxr->base[i].read.pkt_addr = htole64(pseg[0].ds_addr); +#endif /* DEV_NETMAP */ /* Used only when doing header split */ rxr->base[i].read.hdr_addr = 0; @@ -994,17 +1043,17 @@ i40e_refresh_mbufs(struct i40e_queue *que, int limit) * **********************************************************************/ int -i40e_allocate_rx_data(struct i40e_queue *que) +ixl_allocate_rx_data(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; device_t dev = vsi->dev; - struct i40e_rx_buf *buf; + struct ixl_rx_buf *buf; int i, bsize, error; - bsize = sizeof(struct i40e_rx_buf) * que->num_desc; + bsize = sizeof(struct ixl_rx_buf) * que->num_desc; if (!(rxr->buffers = - (struct i40e_rx_buf *) malloc(bsize, + (struct ixl_rx_buf *) malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); error = ENOMEM; @@ -1069,17 +1118,24 @@ i40e_allocate_rx_data(struct i40e_queue *que) * **********************************************************************/ int -i40e_init_rx_ring(struct i40e_queue *que) +ixl_init_rx_ring(struct ixl_queue *que) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct ifnet *ifp = vsi->ifp; struct rx_ring *rxr = &que->rxr; struct lro_ctrl *lro = &rxr->lro; - struct i40e_rx_buf *buf; + struct ixl_rx_buf *buf; bus_dma_segment_t pseg[1], hseg[1]; int rsize, nsegs, error = 0; +#ifdef DEV_NETMAP + struct netmap_adapter *na = NA(ifp); + struct netmap_slot *slot; +#endif /* DEV_NETMAP */ - I40E_RX_LOCK(rxr); + IXL_RX_LOCK(rxr); +#ifdef DEV_NETMAP + slot = netmap_reset(na, NR_RX, que->me, 0); +#endif /* Clear the ring contents */ rsize = roundup2(que->num_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); @@ -1113,6 +1169,21 @@ i40e_init_rx_ring(struct i40e_queue *que) struct mbuf *mh, *mp; buf = &rxr->buffers[j]; +#ifdef DEV_NETMAP + if (slot) + { + int sj = netmap_idx_n2k(&na->rx_rings[que->me], j); + u64 paddr; + void *addr; + + addr = PNMB(slot + sj, &paddr); + netmap_load_map(rxr->ptag, buf->pmap, addr); + /* Update descriptor and cached value */ + rxr->base[j].read.pkt_addr = htole64(paddr); + buf->addr = htole64(paddr); + continue; + } +#endif /* DEV_NETMAP */ /* ** Don't allocate mbufs if not ** doing header split, its wasteful @@ -1179,10 +1250,10 @@ i40e_init_rx_ring(struct i40e_queue *que) if (ifp->if_capenable & IFCAP_LRO) { int err = tcp_lro_init(lro); if (err) { - printf("LRO Initialization failed!\n"); + if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me); goto fail; } - INIT_DEBUGOUT("RX Soft LRO Initialized\n"); + INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me); rxr->lro_enabled = TRUE; lro->ifp = vsi->ifp; } @@ -1191,7 +1262,7 @@ i40e_init_rx_ring(struct i40e_queue *que) BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); fail: - I40E_RX_UNLOCK(rxr); + IXL_RX_UNLOCK(rxr); return (error); } @@ -1202,12 +1273,12 @@ i40e_init_rx_ring(struct i40e_queue *que) * **********************************************************************/ void -i40e_free_que_rx(struct i40e_queue *que) +ixl_free_que_rx(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; - struct i40e_rx_buf *buf; + struct ixl_rx_buf *buf; - INIT_DEBUGOUT("free_que_rx: begin"); + INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); /* Cleanup any existing buffers */ if (rxr->buffers != NULL) { @@ -1252,11 +1323,13 @@ i40e_free_que_rx(struct i40e_queue *que) bus_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; } + + INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me); return; } static __inline void -i40e_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) +ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) { /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet @@ -1277,16 +1350,16 @@ i40e_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } - I40E_RX_UNLOCK(rxr); + IXL_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, m); - I40E_RX_LOCK(rxr); + IXL_RX_LOCK(rxr); } static __inline void -i40e_rx_discard(struct rx_ring *rxr, int i) +ixl_rx_discard(struct rx_ring *rxr, int i) { - struct i40e_rx_buf *rbuf; + struct ixl_rx_buf *rbuf; rbuf = &rxr->buffers[i]; @@ -1329,20 +1402,42 @@ i40e_rx_discard(struct rx_ring *rxr, int i) * Return TRUE for more work, FALSE for all clean. *********************************************************************/ bool -i40e_rxeof(struct i40e_queue *que, int count) +ixl_rxeof(struct ixl_queue *que, int count) { - struct i40e_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = que->vsi; struct rx_ring *rxr = &que->rxr; struct ifnet *ifp = vsi->ifp; struct lro_ctrl *lro = &rxr->lro; struct lro_entry *queued; int i, nextp, processed = 0; union i40e_rx_desc *cur; - struct i40e_rx_buf *rbuf, *nbuf; + struct ixl_rx_buf *rbuf, *nbuf; - I40E_RX_LOCK(rxr); + IXL_RX_LOCK(rxr); +#ifdef DEV_NETMAP +#if NETMAP_API < 4 + if (ifp->if_capenable & IFCAP_NETMAP) + { + struct netmap_adapter *na = NA(ifp); + + na->rx_rings[que->me].nr_kflags |= NKR_PENDINTR; + selwakeuppri(&na->rx_rings[que->me].si, PI_NET); + IXL_RX_UNLOCK(rxr); + IXL_PF_LOCK(vsi->pf); + selwakeuppri(&na->rx_si, PI_NET); + IXL_PF_UNLOCK(vsi->pf); + return (FALSE); + } +#else /* NETMAP_API >= 4 */ + if (netmap_rx_irq(ifp, que->me, &processed)) + { + IXL_RX_UNLOCK(rxr); + return (FALSE); + } +#endif /* NETMAP_API */ +#endif /* DEV_NETMAP */ for (i = rxr->next_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; @@ -1398,7 +1493,7 @@ i40e_rxeof(struct i40e_queue *que, int count) if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { ifp->if_ierrors++; rxr->discarded++; - i40e_rx_discard(rxr, i); + ixl_rx_discard(rxr, i); goto next_desc; } @@ -1423,8 +1518,8 @@ i40e_rxeof(struct i40e_queue *que, int count) ** descriptor to the next, until we get EOP. */ if (rxr->hdr_split && (rbuf->fmp == NULL)) { - if (hlen > I40E_RX_HDR) - hlen = I40E_RX_HDR; + if (hlen > IXL_RX_HDR) + hlen = IXL_RX_HDR; mh->m_len = hlen; mh->m_flags |= M_PKTHDR; mh->m_next = NULL; @@ -1512,7 +1607,7 @@ i40e_rxeof(struct i40e_queue *que, int count) rxr->packets++; rxr->bytes += sendmp->m_pkthdr.len; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) - i40e_rx_checksum(sendmp, status, error, ptype); + ixl_rx_checksum(sendmp, status, error, ptype); sendmp->m_pkthdr.flowid = que->msix; sendmp->m_flags |= M_FLOWID; } @@ -1527,20 +1622,20 @@ i40e_rxeof(struct i40e_queue *que, int count) /* Now send to the stack or do LRO */ if (sendmp != NULL) { rxr->next_check = i; - i40e_rx_input(rxr, ifp, sendmp, ptype); + ixl_rx_input(rxr, ifp, sendmp, ptype); i = rxr->next_check; } /* Every 8 descriptors we go to refresh mbufs */ if (processed == 8) { - i40e_refresh_mbufs(que, i); + ixl_refresh_mbufs(que, i); processed = 0; } } /* Refresh any remaining buf structs */ - if (i40e_rx_unrefreshed(que)) - i40e_refresh_mbufs(que, i); + if (ixl_rx_unrefreshed(que)) + ixl_refresh_mbufs(que, i); rxr->next_check = i; @@ -1552,7 +1647,7 @@ i40e_rxeof(struct i40e_queue *que, int count) tcp_lro_flush(lro, queued); } - I40E_RX_UNLOCK(rxr); + IXL_RX_UNLOCK(rxr); return (FALSE); } @@ -1565,7 +1660,7 @@ i40e_rxeof(struct i40e_queue *que, int count) * *********************************************************************/ static void -i40e_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype) +ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype) { struct i40e_rx_ptype_decoded decoded; diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h new file mode 100644 index 000000000000..a5bfe13fbe66 --- /dev/null +++ b/sys/dev/ixl/ixlv.h @@ -0,0 +1,205 @@ +/****************************************************************************** + + Copyright (c) 2013-2014, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#ifndef _IXLV_H_ +#define _IXLV_H_ + +#define IXLV_AQ_MAX_ERR 100 +#define IXLV_MAX_FILTERS 128 +#define IXLV_MAX_QUEUES 16 +#define IXLV_AQ_TIMEOUT (1 * hz) +#define IXLV_CALLOUT_TIMO (hz / 50) // 20 msec + +#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1) +#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) +#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) +#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) +#define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) +#define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) +#define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) +#define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) +#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) +#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9) +#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10) + +/* printf %b arg */ +#define IXLV_FLAGS \ + "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \ + "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \ + "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \ + "\12CONFIGURE_PROMISC\13GET_STATS" + +/* Driver state */ +enum ixlv_state_t { + IXLV_START, + IXLV_FAILED, + IXLV_RESET_REQUIRED, + IXLV_RESET_PENDING, + IXLV_VERSION_CHECK, + IXLV_GET_RESOURCES, + IXLV_INIT_READY, + IXLV_INIT_START, + IXLV_INIT_CONFIG, + IXLV_INIT_MAPPING, + IXLV_INIT_ENABLE, + IXLV_INIT_COMPLETE, + IXLV_RUNNING, +}; + +struct ixlv_mac_filter { + SLIST_ENTRY(ixlv_mac_filter) next; + u8 macaddr[ETHER_ADDR_LEN]; + u16 flags; +}; +SLIST_HEAD(mac_list, ixlv_mac_filter); + +struct ixlv_vlan_filter { + SLIST_ENTRY(ixlv_vlan_filter) next; + u16 vlan; + u16 flags; +}; +SLIST_HEAD(vlan_list, ixlv_vlan_filter); + +/* Software controller structure */ +struct ixlv_sc { + struct i40e_hw hw; + struct i40e_osdep osdep; + struct device *dev; + + struct resource *pci_mem; + struct resource *msix_mem; + + enum ixlv_state_t init_state; + + /* + * Interrupt resources + */ + void *tag; + struct resource *res; /* For the AQ */ + + struct ifmedia media; + struct callout timer; + struct callout aq_task; + int msix; + int if_flags; + + struct mtx mtx; + struct mtx aq_task_mtx; + + u32 qbase; + u32 admvec; + struct timeout_task timeout; + struct task aq_irq; + struct task aq_sched; + struct taskqueue *tq; + + struct ixl_vsi vsi; + + /* Mac Filter List */ + struct mac_list *mac_filters; + + /* Vlan Filter List */ + struct vlan_list *vlan_filters; + + /* Promiscuous mode */ + u32 promiscuous_flags; + + /* Admin queue task flags */ + u32 aq_wait_count; + u32 aq_required; + u32 aq_pending; + + /* Virtual comm channel */ + enum i40e_virtchnl_ops current_op; + struct i40e_virtchnl_vf_resource *vf_res; + struct i40e_virtchnl_vsi_resource *vsi_res; + + /* Misc stats maintained by the driver */ + u64 watchdog_events; + u64 admin_irq; + + /* Signaling channels */ + u8 init_done; + u8 config_queues_done; + u8 map_vectors_done; + u8 enable_queues_done; + u8 disable_queues_done; + u8 add_ether_done; + u8 del_ether_done; +}; + +/* +** This checks for a zero mac addr, something that will be likely +** unless the Admin on the Host has created one. +*/ +static inline bool +ixlv_check_ether_addr(u8 *addr) +{ + bool status = TRUE; + + if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && + addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) + status = FALSE; + return (status); +} + +/* +** VF Common function prototypes +*/ +int ixlv_send_api_ver(struct ixlv_sc *); +int ixlv_verify_api_ver(struct ixlv_sc *); +int ixlv_send_vf_config_msg(struct ixlv_sc *); +int ixlv_get_vf_config(struct ixlv_sc *); +void ixlv_init(void *); +int ixlv_reinit_locked(struct ixlv_sc *); +void ixlv_configure_queues(struct ixlv_sc *); +void ixlv_enable_queues(struct ixlv_sc *); +void ixlv_disable_queues(struct ixlv_sc *); +void ixlv_map_queues(struct ixlv_sc *); +void ixlv_enable_intr(struct ixl_vsi *); +void ixlv_disable_intr(struct ixl_vsi *); +void ixlv_add_ether_filters(struct ixlv_sc *); +void ixlv_del_ether_filters(struct ixlv_sc *); +void ixlv_request_stats(struct ixlv_sc *); +void ixlv_request_reset(struct ixlv_sc *); +void ixlv_vc_completion(struct ixlv_sc *, + enum i40e_virtchnl_ops, i40e_status, u8 *, u16); +void ixlv_add_ether_filter(struct ixlv_sc *); +void ixlv_add_vlans(struct ixlv_sc *); +void ixlv_del_vlans(struct ixlv_sc *); +void ixlv_update_stats_counters(struct ixlv_sc *, + struct i40e_eth_stats *); + +#endif /* _IXLV_H_ */ diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c new file mode 100644 index 000000000000..1f912b5db46c --- /dev/null +++ b/sys/dev/ixl/ixlvc.c @@ -0,0 +1,976 @@ +/****************************************************************************** + + Copyright (c) 2013-2014, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +/* +** Virtual Channel support +** These are support functions to communication +** between the VF and PF drivers. +*/ + +#include "ixl.h" +#include "ixlv.h" +#include "i40e_prototype.h" + + +/* busy wait delay in msec */ +#define IXLV_BUSY_WAIT_DELAY 10 +#define IXLV_BUSY_WAIT_COUNT 50 + +/* +** Validate VF messages +*/ +static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len; + + /* Validate message length. */ + switch (v_opcode) { + case I40E_VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct i40e_virtchnl_version_info); + break; + case I40E_VIRTCHNL_OP_RESET_VF: + case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + valid_len = 0; + break; + case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct i40e_virtchnl_txq_info); + break; + case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct i40e_virtchnl_rxq_info); + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct i40e_virtchnl_vsi_queue_config_info *vqc = + (struct i40e_virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + i40e_virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct i40e_virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct i40e_virtchnl_irq_map_info *vimi = + (struct i40e_virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct i40e_virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct i40e_virtchnl_queue_select); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct i40e_virtchnl_ether_addr_list *veal = + (struct i40e_virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct i40e_virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + case I40E_VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct i40e_virtchnl_vlan_filter_list *vfl = + (struct i40e_virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct i40e_virtchnl_promisc_info); + break; + case I40E_VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct i40e_virtchnl_queue_select); + break; + /* These are always errors coming from the VF. */ + case I40E_VIRTCHNL_OP_EVENT: + case I40E_VIRTCHNL_OP_UNKNOWN: + default: + return EPERM; + break; + } + /* few more checks */ + if ((valid_len != msglen) || (err_msg_format)) + return EINVAL; + else + return 0; +} + +/* +** ixlv_send_pf_msg +** +** Send message to PF and print status if failure. +*/ +static int +ixlv_send_pf_msg(struct ixlv_sc *sc, + enum i40e_virtchnl_ops op, u8 *msg, u16 len) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + i40e_status err; + int val_err; + + /* + ** Pre-validating messages to the PF, this might be + ** removed for performance later? + */ + val_err = ixl_vc_validate_vf_msg(sc, op, msg, len); + if (val_err) + device_printf(dev, "Error validating msg to PF for op %d," + " msglen %d: error %d\n", op, len, val_err); + + err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL); + if (err) + device_printf(dev, "Unable to send opcode %d to PF, " + "error %d, aq status %d\n", op, err, hw->aq.asq_last_status); + return err; +} + + +/* +** ixlv_send_api_ver +** +** Send API version admin queue message to the PF. The reply is not checked +** in this function. Returns 0 if the message was successfully +** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. +*/ +int +ixlv_send_api_ver(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_version_info vvi; + + vvi.major = I40E_VIRTCHNL_VERSION_MAJOR; + vvi.minor = I40E_VIRTCHNL_VERSION_MINOR; + + return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION, + (u8 *)&vvi, sizeof(vvi)); +} + +/* +** ixlv_verify_api_ver +** +** Compare API versions with the PF. Must be called after admin queue is +** initialized. Returns 0 if API versions match, EIO if +** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. +*/ +int ixlv_verify_api_ver(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_version_info *pf_vvi; + struct i40e_hw *hw = &sc->hw; + struct i40e_arq_event_info event; + i40e_status err; + int retries = 0; + + event.buf_len = IXL_AQ_BUFSZ; + event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); + if (!event.msg_buf) { + err = ENOMEM; + goto out; + } + + do { + if (++retries > IXLV_AQ_MAX_ERR) + goto out_alloc; + + /* NOTE: initial delay is necessary */ + i40e_msec_delay(100); + err = i40e_clean_arq_element(hw, &event, NULL); + } while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK); + if (err) + goto out_alloc; + + err = (i40e_status)le32toh(event.desc.cookie_low); + if (err) { + err = EIO; + goto out_alloc; + } + + if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) != + I40E_VIRTCHNL_OP_VERSION) { + err = EIO; + goto out_alloc; + } + + pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf; + if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) || + (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR)) + err = EIO; + +out_alloc: + free(event.msg_buf, M_DEVBUF); +out: + return err; +} + +/* +** ixlv_send_vf_config_msg +** +** Send VF configuration request admin queue message to the PF. The reply +** is not checked in this function. Returns 0 if the message was +** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. +*/ +int +ixlv_send_vf_config_msg(struct ixlv_sc *sc) +{ + return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); +} + +/* +** ixlv_get_vf_config +** +** Get VF configuration from PF and populate hw structure. Must be called after +** admin queue is initialized. Busy waits until response is received from PF, +** with maximum timeout. Response from PF is returned in the buffer for further +** processing by the caller. +*/ +int +ixlv_get_vf_config(struct ixlv_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + struct i40e_arq_event_info event; + u16 len; + i40e_status err = 0; + u32 retries = 0; + + /* Note this assumes a single VSI */ + len = sizeof(struct i40e_virtchnl_vf_resource) + + sizeof(struct i40e_virtchnl_vsi_resource); + event.buf_len = len; + event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); + if (!event.msg_buf) { + err = ENOMEM; + goto out; + } + + do { + err = i40e_clean_arq_element(hw, &event, NULL); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { + if (++retries <= IXLV_AQ_MAX_ERR) + i40e_msec_delay(100); + } else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) != + I40E_VIRTCHNL_OP_GET_VF_RESOURCES) { + device_printf(dev, "%s: Received a response from PF," + " opcode %d, error %d\n", __func__, + le32toh(event.desc.cookie_high), + le32toh(event.desc.cookie_low)); + retries++; + continue; + } else { + err = (i40e_status)le32toh(event.desc.cookie_low); + if (err) { + device_printf(dev, "%s: Error returned from PF," + " opcode %d, error %d\n", __func__, + le32toh(event.desc.cookie_high), + le32toh(event.desc.cookie_low)); + err = EIO; + goto out_alloc; + } + break; + } + + if (retries > IXLV_AQ_MAX_ERR) { + INIT_DBG_DEV(dev, "Did not receive response after %d tries.", + retries); + goto out_alloc; + } + + } while (err); + + memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len)); + i40e_vf_parse_hw_config(hw, sc->vf_res); + +out_alloc: + free(event.msg_buf, M_DEVBUF); +out: + return err; +} + +/* +** ixlv_configure_queues +** +** Request that the PF set up our queues. +*/ +void +ixlv_configure_queues(struct ixlv_sc *sc) +{ + device_t dev = sc->dev; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_queue *que = vsi->queues; + struct tx_ring *txr; + struct rx_ring *rxr; + int len, pairs;; + + struct i40e_virtchnl_vsi_queue_config_info *vqci; + struct i40e_virtchnl_queue_pair_info *vqpi; + + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ +#ifdef IXL_DEBUG + device_printf(dev, "%s: command %d pending\n", + __func__, sc->current_op); +#endif + return; + } + + pairs = vsi->num_queues; + sc->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; + len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) + + (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs); + vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + if (!vqci) { + device_printf(dev, "%s: unable to allocate memory\n", __func__); + return; + } + vqci->vsi_id = sc->vsi_res->vsi_id; + vqci->num_queue_pairs = pairs; + vqpi = vqci->qpair; + /* Size check is not needed here - HW max is 16 queue pairs, and we + * can fit info for 31 of them into the AQ buffer before it overflows. + */ + for (int i = 0; i < pairs; i++, que++) { + txr = &que->txr; + rxr = &que->rxr; + vqpi->txq.vsi_id = vqci->vsi_id; + vqpi->txq.queue_id = i; + vqpi->txq.ring_len = que->num_desc; + vqpi->txq.dma_ring_addr = txr->dma.pa; + /* Enable Head writeback */ + vqpi->txq.headwb_enabled = 1; + vqpi->txq.dma_headwb_addr = txr->dma.pa + + (que->num_desc * sizeof(struct i40e_tx_desc)); + + vqpi->rxq.vsi_id = vqci->vsi_id; + vqpi->rxq.queue_id = i; + vqpi->rxq.ring_len = que->num_desc; + vqpi->rxq.dma_ring_addr = rxr->dma.pa; + vqpi->rxq.max_pkt_size = vsi->max_frame_size; + vqpi->rxq.databuffer_size = rxr->mbuf_sz; + vqpi++; + } + + ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); + free(vqci, M_DEVBUF); + sc->aq_pending |= IXLV_FLAG_AQ_CONFIGURE_QUEUES; + sc->aq_required &= ~IXLV_FLAG_AQ_CONFIGURE_QUEUES; +} + +/* +** ixlv_enable_queues +** +** Request that the PF enable all of our queues. +*/ +void +ixlv_enable_queues(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_queue_select vqs; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* we already have a command pending */ +#ifdef IXL_DEBUG + device_printf(sc->dev, "%s: command %d pending\n", + __func__, sc->current_op); +#endif + return; + } + sc->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES; + vqs.vsi_id = sc->vsi_res->vsi_id; + vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; + vqs.rx_queues = vqs.tx_queues; + ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); + sc->aq_pending |= IXLV_FLAG_AQ_ENABLE_QUEUES; + sc->aq_required &= ~IXLV_FLAG_AQ_ENABLE_QUEUES; +} + +/* +** ixlv_disable_queues +** +** Request that the PF disable all of our queues. +*/ +void +ixlv_disable_queues(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_queue_select vqs; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* we already have a command pending */ +#ifdef IXL_DEBUG + device_printf(sc->dev, "%s: command %d pending\n", + __func__, sc->current_op); +#endif + return; + } + sc->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES; + vqs.vsi_id = sc->vsi_res->vsi_id; + vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; + vqs.rx_queues = vqs.tx_queues; + ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); + sc->aq_pending |= IXLV_FLAG_AQ_DISABLE_QUEUES; + sc->aq_required &= ~IXLV_FLAG_AQ_DISABLE_QUEUES; +} + +/* +** ixlv_map_queues +** +** Request that the PF map queues to interrupt vectors. Misc causes, including +** admin queue, are always mapped to vector 0. +*/ +void +ixlv_map_queues(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_irq_map_info *vm; + int i, q, len; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_queue *que = vsi->queues; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* we already have a command pending */ +#ifdef IXL_DEBUG + device_printf(sc->dev, "%s: command %d pending\n", + __func__, sc->current_op); +#endif + return; + } + sc->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; + + /* How many queue vectors, adminq uses one */ + q = sc->msix - 1; + + len = sizeof(struct i40e_virtchnl_irq_map_info) + + (sc->msix * sizeof(struct i40e_virtchnl_vector_map)); + vm = malloc(len, M_DEVBUF, M_NOWAIT); + if (!vm) { + printf("%s: unable to allocate memory\n", __func__); + return; + } + + vm->num_vectors = sc->msix; + /* Queue vectors first */ + for (i = 0; i < q; i++, que++) { + vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; + vm->vecmap[i].vector_id = i + 1; /* first is adminq */ + vm->vecmap[i].txq_map = (1 << que->me); + vm->vecmap[i].rxq_map = (1 << que->me); + } + + /* Misc vector last - this is only for AdminQ messages */ + vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; + vm->vecmap[i].vector_id = 0; + vm->vecmap[i].txq_map = 0; + vm->vecmap[i].rxq_map = 0; + + ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vm, len); + free(vm, M_DEVBUF); + sc->aq_pending |= IXLV_FLAG_AQ_MAP_VECTORS; + sc->aq_required &= ~IXLV_FLAG_AQ_MAP_VECTORS; +} + +/* +** Scan the Filter List looking for vlans that need +** to be added, then create the data to hand to the AQ +** for handling. +*/ +void +ixlv_add_vlans(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_vlan_filter_list *v; + struct ixlv_vlan_filter *f, *ftmp; + device_t dev = sc->dev; + int len, i = 0, cnt = 0; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) + return; + + sc->current_op = I40E_VIRTCHNL_OP_ADD_VLAN; + + /* Get count of VLAN filters to add */ + SLIST_FOREACH(f, sc->vlan_filters, next) { + if (f->flags & IXL_FILTER_ADD) + cnt++; + } + + if (!cnt) { /* no work... */ + sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER; + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + return; + } + + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (cnt * sizeof(u16)); + + if (len > IXL_AQ_BUF_SZ) { + device_printf(dev, "%s: Exceeded Max AQ Buf size\n", + __func__); + return; + } + + v = malloc(len, M_DEVBUF, M_NOWAIT); + if (!v) { + device_printf(dev, "%s: unable to allocate memory\n", + __func__); + return; + } + + v->vsi_id = sc->vsi_res->vsi_id; + v->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { + if (f->flags & IXL_FILTER_ADD) { + bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); + f->flags = IXL_FILTER_USED; + i++; + } + if (i == cnt) + break; + } + if (i == 0) { /* Should not happen... */ + device_printf(dev, "%s: i == 0?\n", __func__); + return; + } + + ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); + free(v, M_DEVBUF); + /* add stats? */ + sc->aq_pending |= IXLV_FLAG_AQ_ADD_VLAN_FILTER; + sc->aq_required &= ~IXLV_FLAG_AQ_ADD_VLAN_FILTER; +} + +/* +** Scan the Filter Table looking for vlans that need +** to be removed, then create the data to hand to the AQ +** for handling. +*/ +void +ixlv_del_vlans(struct ixlv_sc *sc) +{ + device_t dev = sc->dev; + struct i40e_virtchnl_vlan_filter_list *v; + struct ixlv_vlan_filter *f, *ftmp; + int len, i = 0, cnt = 0; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) + return; + + sc->current_op = I40E_VIRTCHNL_OP_DEL_VLAN; + + /* Get count of VLAN filters to delete */ + SLIST_FOREACH(f, sc->vlan_filters, next) { + if (f->flags & IXL_FILTER_DEL) + cnt++; + } + + if (!cnt) { /* no work... */ + sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER; + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + return; + } + + len = sizeof(struct i40e_virtchnl_vlan_filter_list) + + (cnt * sizeof(u16)); + + if (len > IXL_AQ_BUF_SZ) { + device_printf(dev, "%s: Exceeded Max AQ Buf size\n", + __func__); + return; + } + + v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + if (!v) { + device_printf(dev, "%s: unable to allocate memory\n", + __func__); + return; + } + + v->vsi_id = sc->vsi_res->vsi_id; + v->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { + if (f->flags & IXL_FILTER_DEL) { + bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); + i++; + SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); + free(f, M_DEVBUF); + } + if (i == cnt) + break; + } + if (i == 0) { /* Should not happen... */ + device_printf(dev, "%s: i == 0?\n", __func__); + return; + } + + ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); + free(v, M_DEVBUF); + /* add stats? */ + sc->aq_pending |= IXLV_FLAG_AQ_DEL_VLAN_FILTER; + sc->aq_required &= ~IXLV_FLAG_AQ_DEL_VLAN_FILTER; +} + + +/* +** This routine takes additions to the vsi filter +** table and creates an Admin Queue call to create +** the filters in the hardware. +*/ +void +ixlv_add_ether_filters(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_ether_addr_list *a; + struct ixlv_mac_filter *f; + device_t dev = sc->dev; + int len, j = 0, cnt = 0; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) + return; + + sc->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; + + /* Get count of MAC addresses to add */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if (f->flags & IXL_FILTER_ADD) + cnt++; + } + if (cnt == 0) { /* Should not happen... */ + DDPRINTF(dev, "cnt == 0, exiting..."); + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER; + wakeup(&sc->add_ether_done); + return; + } + + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (cnt * sizeof(struct i40e_virtchnl_ether_addr)); + + a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + if (a == NULL) { + device_printf(dev, "%s: Failed to get memory for " + "virtchnl_ether_addr_list\n", __func__); + return; + } + a->vsi_id = sc->vsi.id; + a->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if (f->flags & IXL_FILTER_ADD) { + bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN); + f->flags &= ~IXL_FILTER_ADD; + j++; + + DDPRINTF(dev, "ADD: " MAC_FORMAT, + MAC_FORMAT_ARGS(f->macaddr)); + } + if (j == cnt) + break; + } + DDPRINTF(dev, "len %d, j %d, cnt %d", + len, j, cnt); + ixlv_send_pf_msg(sc, + I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len); + /* add stats? */ + free(a, M_DEVBUF); + sc->aq_pending |= IXLV_FLAG_AQ_ADD_MAC_FILTER; + sc->aq_required &= ~IXLV_FLAG_AQ_ADD_MAC_FILTER; + return; +} + +/* +** This routine takes filters flagged for deletion in the +** sc MAC filter list and creates an Admin Queue call +** to delete those filters in the hardware. +*/ +void +ixlv_del_ether_filters(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_ether_addr_list *d; + device_t dev = sc->dev; + struct ixlv_mac_filter *f, *f_temp; + int len, j = 0, cnt = 0; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) + return; + + sc->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + + /* Get count of MAC addresses to delete */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if (f->flags & IXL_FILTER_DEL) + cnt++; + } + if (cnt == 0) { + DDPRINTF(dev, "cnt == 0, exiting..."); + sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER; + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + wakeup(&sc->del_ether_done); + return; + } + + len = sizeof(struct i40e_virtchnl_ether_addr_list) + + (cnt * sizeof(struct i40e_virtchnl_ether_addr)); + + d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + if (d == NULL) { + device_printf(dev, "%s: Failed to get memory for " + "virtchnl_ether_addr_list\n", __func__); + return; + } + d->vsi_id = sc->vsi.id; + d->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) { + if (f->flags & IXL_FILTER_DEL) { + bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN); + DDPRINTF(dev, "DEL: " MAC_FORMAT, + MAC_FORMAT_ARGS(f->macaddr)); + j++; + SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); + free(f, M_DEVBUF); + } + if (j == cnt) + break; + } + ixlv_send_pf_msg(sc, + I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len); + /* add stats? */ + free(d, M_DEVBUF); + sc->aq_pending |= IXLV_FLAG_AQ_DEL_MAC_FILTER; + sc->aq_required &= ~IXLV_FLAG_AQ_DEL_MAC_FILTER; + return; +} + +/* +** ixlv_request_reset +** Request that the PF reset this VF. No response is expected. +*/ +void +ixlv_request_reset(struct ixlv_sc *sc) +{ + /* + ** Set the reset status to "in progress" before + ** the request, this avoids any possibility of + ** a mistaken early detection of completion. + */ + wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS); + ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0); + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} + +/* +** ixlv_request_stats +** Request the statistics for this VF's VSI from PF. +*/ +void +ixlv_request_stats(struct ixlv_sc *sc) +{ + struct i40e_virtchnl_queue_select vqs; + int error = 0; + + if (sc->current_op != I40E_VIRTCHNL_OP_UNKNOWN) + return; + + sc->current_op = I40E_VIRTCHNL_OP_GET_STATS; + vqs.vsi_id = sc->vsi_res->vsi_id; + error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS, + (u8 *)&vqs, sizeof(vqs)); + /* Low priority, ok if it fails */ + if (error) + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; +} + +/* +** Updates driver's stats counters with VSI stats returned from PF. +*/ +void +ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) +{ + struct ifnet *ifp = sc->vsi.ifp; + + ifp->if_ipackets = es->rx_unicast + + es->rx_multicast + + es->rx_broadcast; + ifp->if_opackets = es->tx_unicast + + es->tx_multicast + + es->tx_broadcast; + ifp->if_ibytes = es->rx_bytes; + ifp->if_obytes = es->tx_bytes; + ifp->if_imcasts = es->rx_multicast; + ifp->if_omcasts = es->tx_multicast; + + ifp->if_oerrors = es->tx_errors; + ifp->if_iqdrops = es->rx_discards; + ifp->if_noproto = es->rx_unknown_protocol; + + sc->vsi.eth_stats = *es; +} + +/* +** ixlv_vc_completion +** +** Asynchronous completion function for admin queue messages. Rather than busy +** wait, we fire off our requests and assume that no errors will be returned. +** This function handles the reply messages. +*/ +void +ixlv_vc_completion(struct ixlv_sc *sc, + enum i40e_virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, u16 msglen) +{ + device_t dev = sc->dev; + struct ixl_vsi *vsi = &sc->vsi; + + if (v_opcode == I40E_VIRTCHNL_OP_EVENT) { + struct i40e_virtchnl_pf_event *vpe = + (struct i40e_virtchnl_pf_event *)msg; + + switch (vpe->event) { + case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + vsi->link_up = + vpe->event_data.link_event.link_status; + vsi->link_speed = + vpe->event_data.link_event.link_speed; + break; + case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + device_printf(dev, "PF initiated reset!\n"); + sc->init_state = IXLV_RESET_PENDING; + ixlv_init(sc); + break; + default: + device_printf(dev, "%s: Unknown event %d from AQ\n", + __func__, vpe->event); + break; + } + + return; + } + + if (v_opcode != sc->current_op + && sc->current_op != I40E_VIRTCHNL_OP_GET_STATS) { + device_printf(dev, "%s: Pending op is %d, received %d.\n", + __func__, sc->current_op, v_opcode); + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + return; + } + + /* Catch-all error response */ + if (v_retval) { + device_printf(dev, + "%s: AQ returned error %d to our request %d!\n", + __func__, v_retval, v_opcode); + } + +#ifdef IXL_DEBUG + if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS) + DDPRINTF(dev, "opcode %d", v_opcode); +#endif + + switch (v_opcode) { + case I40E_VIRTCHNL_OP_GET_STATS: + ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg); + break; + case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_MAC_FILTER); + if (v_retval) { + device_printf(dev, "WARNING: Error adding VF mac filter!\n"); + device_printf(dev, "WARNING: Device may not receive traffic!\n"); + } + break; + case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_MAC_FILTER); + break; + case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_PROMISC); + break; + case I40E_VIRTCHNL_OP_ADD_VLAN: + sc->aq_pending &= ~(IXLV_FLAG_AQ_ADD_VLAN_FILTER); + break; + case I40E_VIRTCHNL_OP_DEL_VLAN: + sc->aq_pending &= ~(IXLV_FLAG_AQ_DEL_VLAN_FILTER); + break; + case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + sc->aq_pending &= ~(IXLV_FLAG_AQ_ENABLE_QUEUES); + if (v_retval == 0) { + /* Turn on all interrupts */ + ixlv_enable_intr(vsi); + /* And inform the stack we're ready */ + vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; + vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + } + break; + case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + sc->aq_pending &= ~(IXLV_FLAG_AQ_DISABLE_QUEUES); + if (v_retval == 0) { + /* Turn off all interrupts */ + ixlv_disable_intr(vsi); + /* Tell the stack that the interface is no longer active */ + vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + } + break; + case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + sc->aq_pending &= ~(IXLV_FLAG_AQ_CONFIGURE_QUEUES); + break; + case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + sc->aq_pending &= ~(IXLV_FLAG_AQ_MAP_VECTORS); + break; + default: + device_printf(dev, + "%s: Received unexpected message %d from PF.\n", + __func__, v_opcode); + break; + } + sc->current_op = I40E_VIRTCHNL_OP_UNKNOWN; + return; +} diff --git a/sys/modules/i40e/Makefile b/sys/modules/ixl/Makefile similarity index 58% rename from sys/modules/i40e/Makefile rename to sys/modules/ixl/Makefile index 52608ce3bf58..94189d0d2daa 100755 --- a/sys/modules/i40e/Makefile +++ b/sys/modules/ixl/Makefile @@ -1,10 +1,11 @@ #$FreeBSD$ -.PATH: ${.CURDIR}/../../dev/i40e +.PATH: ${.CURDIR}/../../dev/ixl -KMOD = if_i40e +KMOD = if_ixl SRCS = device_if.h bus_if.h pci_if.h opt_bdg.h -SRCS += if_i40e.c i40e_txrx.c i40e_osdep.c +SRCS += opt_inet.h opt_inet6.h +SRCS += if_ixl.c ixl_txrx.c i40e_osdep.c # Shared source SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c @@ -12,8 +13,8 @@ SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c CFLAGS += -DSMP # Add Flow Director support -# CFLAGS += -DI40E_FDIR +# CFLAGS += -DIXL_FDIR # Debug messages / sysctls -# CFLAGS += -DI40E_DEBUG +# CFLAGS += -DIXLE_DEBUG .include diff --git a/sys/modules/ixlv/Makefile b/sys/modules/ixlv/Makefile new file mode 100755 index 000000000000..785cab3baf43 --- /dev/null +++ b/sys/modules/ixlv/Makefile @@ -0,0 +1,20 @@ +#$FreeBSD$ + +.PATH: ${.CURDIR}/../../dev/ixl + +KMOD = if_ixlv +SRCS = device_if.h bus_if.h pci_if.h opt_bdg.h +SRCS += opt_inet.h opt_inet6.h +SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c + +# Shared source +SRCS += i40e_common.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c + +CFLAGS += -DSMP + +# Add Flow Director support +# CFLAGS += -DIXL_FDIR +# Debug messages / sysctls +# CFLAGS += -DIXLE_DEBUG + +.include