diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c index 03e94bc8e2..6c09c27776 100644 --- a/drivers/net/i40e/base/i40e_common.c +++ b/drivers/net/i40e/base/i40e_common.c @@ -34,7 +34,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "i40e_type.h" #include "i40e_adminq.h" #include "i40e_prototype.h" -#include "i40e_virtchnl.h" +#include "virtchnl.h" /** @@ -5523,7 +5523,7 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, } if (mac_addr) - i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS, + i40e_memcpy(cmd->mac, mac_addr, ETH_ALEN, I40E_NONDMA_TO_NONDMA); cmd->etype = CPU_TO_LE16(ethtype); @@ -6879,7 +6879,7 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) * completion before returning. **/ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, enum i40e_status_code v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details) @@ -6918,9 +6918,9 @@ enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, * with appropriate information. **/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct i40e_virtchnl_vf_resource *msg) + struct virtchnl_vf_resource *msg) { - struct i40e_virtchnl_vsi_resource *vsi_res; + struct virtchnl_vsi_resource *vsi_res; int i; vsi_res = &msg->vsi_res[0]; @@ -6930,19 +6930,17 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; hw->dev_caps.dcb = msg->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_L2; - hw->dev_caps.fcoe = (msg->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0; + VIRTCHNL_VF_OFFLOAD_L2; hw->dev_caps.iwarp = (msg->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; + VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; for (i = 0; i < msg->num_vsis; i++) { - if (vsi_res->vsi_type == I40E_VSI_SRIOV) { + if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { i40e_memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr, - I40E_ETH_LENGTH_OF_ADDRESS, + ETH_ALEN, I40E_NONDMA_TO_NONDMA); i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr, - I40E_ETH_LENGTH_OF_ADDRESS, + ETH_ALEN, I40E_NONDMA_TO_NONDMA); } vsi_res++; @@ -6959,7 +6957,7 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, **/ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) { - return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF, + return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, I40E_SUCCESS, NULL, 0, NULL); } #endif /* VF_DRIVER */ diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h index 4bd589e783..6ec4304dfc 100644 --- a/drivers/net/i40e/base/i40e_prototype.h +++ b/drivers/net/i40e/base/i40e_prototype.h @@ -36,7 +36,7 @@ POSSIBILITY OF SUCH DAMAGE. #include "i40e_type.h" #include "i40e_alloc.h" -#include "i40e_virtchnl.h" +#include "virtchnl.h" /* Prototypes for shared code functions that are not in * the standard function pointer structures. These are @@ -504,10 +504,10 @@ void i40e_destroy_spinlock(struct i40e_spinlock *sp); /* i40e_common for VF drivers*/ void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct i40e_virtchnl_vf_resource *msg); + struct virtchnl_vf_resource *msg); enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw); enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, enum i40e_status_code v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h index 84d5757688..52a114ab1e 100644 --- a/drivers/net/i40e/base/i40e_type.h +++ b/drivers/net/i40e/base/i40e_type.h @@ -92,7 +92,9 @@ POSSIBILITY OF SUCH DAMAGE. struct i40e_hw; typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); -#define I40E_ETH_LENGTH_OF_ADDRESS 6 +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif /* Data type manipulation macros. */ #define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) #define I40E_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) @@ -439,10 +441,10 @@ struct i40e_hw_capabilities { struct i40e_mac_info { enum i40e_mac_type type; - u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; - u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS]; - u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS]; - u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS]; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; u16 max_fcoeq; }; diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h deleted file mode 100644 index 7a24c0f1b3..0000000000 --- a/drivers/net/i40e/base/i40e_virtchnl.h +++ /dev/null @@ -1,445 +0,0 @@ -/******************************************************************************* - -Copyright (c) 2013 - 2015, Intel Corporation -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -***************************************************************************/ - -#ifndef _I40E_VIRTCHNL_H_ -#define _I40E_VIRTCHNL_H_ - -#include "i40e_type.h" - -/* Description: - * This header file describes the VF-PF communication protocol used - * by the various i40e drivers. - * - * Admin queue buffer usage: - * desc->opcode is always i40e_aqc_opc_send_msg_to_pf - * flags, retval, datalen, and data addr are all used normally. - * Firmware copies the cookie fields when sending messages between the PF and - * VF, but uses all other fields internally. Due to this limitation, we - * must send all messages as "indirect", i.e. using an external buffer. - * - * All the vsi indexes are relative to the VF. Each VF can have maximum of - * three VSIs. All the queue indexes are relative to the VSI. Each VF can - * have a maximum of sixteen queues for all of its VSIs. - * - * The PF is required to return a status code in v_retval for all messages - * except RESET_VF, which does not require any response. The return value is of - * i40e_status_code type, defined in the i40e_type.h. - * - * In general, VF driver initialization should roughly follow the order of these - * opcodes. The VF driver must first validate the API version of the PF driver, - * then request a reset, then get resources, then configure queues and - * interrupts. After these operations are complete, the VF driver may start - * its queues, optionally add MAC and VLAN filters, and process traffic. - */ - -/* Opcodes for VF-PF communication. These are placed in the v_opcode field - * of the virtchnl_msg structure. - */ -enum i40e_virtchnl_ops { -/* The PF sends status change events to VFs using - * the I40E_VIRTCHNL_OP_EVENT opcode. - * VFs send requests to the PF using the other ops. - */ - I40E_VIRTCHNL_OP_UNKNOWN = 0, - I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ - I40E_VIRTCHNL_OP_RESET_VF = 2, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3, - I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, - I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, - I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8, - I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11, - I40E_VIRTCHNL_OP_ADD_VLAN = 12, - I40E_VIRTCHNL_OP_DEL_VLAN = 13, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, - I40E_VIRTCHNL_OP_GET_STATS = 15, - I40E_VIRTCHNL_OP_FCOE = 16, - I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ -#ifdef I40E_SOL_VF_SUPPORT - I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19, -#endif - I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, - I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, - I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, - I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, - -}; - -/* Virtual channel message descriptor. This overlays the admin queue - * descriptor. All other data is passed in external buffers. - */ - -struct i40e_virtchnl_msg { - u8 pad[8]; /* AQ flags/opcode/len/retval fields */ - enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ - enum i40e_status_code v_retval; /* ditto for desc->retval */ - u32 vfid; /* used by PF when sending to VF */ -}; - -/* Message descriptions and data structures.*/ - -/* I40E_VIRTCHNL_OP_VERSION - * VF posts its version number to the PF. PF responds with its version number - * in the same format, along with a return code. - * Reply from PF has its major/minor versions also in param0 and param1. - * If there is a major version mismatch, then the VF cannot operate. - * If there is a minor version mismatch, then the VF can operate but should - * add a warning to the system log. - * - * This enum element MUST always be specified as == 1, regardless of other - * changes in the API. The PF must always respond to this message without - * error regardless of version mismatch. - */ -#define I40E_VIRTCHNL_VERSION_MAJOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR 1 -#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 - -struct i40e_virtchnl_version_info { - u32 major; - u32 minor; -}; - -/* I40E_VIRTCHNL_OP_RESET_VF - * VF sends this request to PF with no parameters - * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register - * until reset completion is indicated. The admin queue must be reinitialized - * after this operation. - * - * When reset is complete, PF must ensure that all queues in all VSIs associated - * with the VF are stopped, all queue configurations in the HMC are set to 0, - * and all MAC and VLAN filters (except the default MAC address) on all VSIs - * are cleared. - */ - -/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES - * Version 1.0 VF sends this request to PF with no parameters - * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities - * PF responds with an indirect message containing - * i40e_virtchnl_vf_resource and one or more - * i40e_virtchnl_vsi_resource structures. - */ - -struct i40e_virtchnl_vsi_resource { - u16 vsi_id; - u16 num_queue_pairs; - enum i40e_vsi_type vsi_type; - u16 qset_handle; - u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS]; -}; -/* VF offload flags */ -#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001 -#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 -#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 -#define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 -#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 -#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 -#define I40E_VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 - -#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \ - I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \ - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) - -struct i40e_virtchnl_vf_resource { - u16 num_vsis; - u16 num_queue_pairs; - u16 max_vectors; - u16 max_mtu; - - u32 vf_offload_flags; - u32 rss_key_size; - u32 rss_lut_size; - - struct i40e_virtchnl_vsi_resource vsi_res[1]; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE - * VF sends this message to set up parameters for one TX queue. - * External data buffer contains one instance of i40e_virtchnl_txq_info. - * PF configures requested queue and returns a status code. - */ - -/* Tx queue config info */ -struct i40e_virtchnl_txq_info { - u16 vsi_id; - u16 queue_id; - u16 ring_len; /* number of descriptors, multiple of 8 */ - u16 headwb_enabled; - u64 dma_ring_addr; - u64 dma_headwb_addr; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE - * VF sends this message to set up parameters for one RX queue. - * External data buffer contains one instance of i40e_virtchnl_rxq_info. - * PF configures requested queue and returns a status code. - */ - -/* Rx queue config info */ -struct i40e_virtchnl_rxq_info { - u16 vsi_id; - u16 queue_id; - u32 ring_len; /* number of descriptors, multiple of 32 */ - u16 hdr_size; - u16 splithdr_enabled; - u32 databuffer_size; - u32 max_pkt_size; - u64 dma_ring_addr; - enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES - * VF sends this message to set parameters for all active TX and RX queues - * associated with the specified VSI. - * PF configures queues and returns status. - * If the number of queues specified is greater than the number of queues - * associated with the VSI, an error is returned and no queues are configured. - */ -struct i40e_virtchnl_queue_pair_info { - /* NOTE: vsi_id and queue_id should be identical for both queues. */ - struct i40e_virtchnl_txq_info txq; - struct i40e_virtchnl_rxq_info rxq; -}; - -struct i40e_virtchnl_vsi_queue_config_info { - u16 vsi_id; - u16 num_queue_pairs; - struct i40e_virtchnl_queue_pair_info qpair[1]; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP - * VF uses this message to map vectors to queues. - * The rxq_map and txq_map fields are bitmaps used to indicate which queues - * are to be associated with the specified vector. - * The "other" causes are always mapped to vector 0. - * PF configures interrupt mapping and returns status. - */ -struct i40e_virtchnl_vector_map { - u16 vsi_id; - u16 vector_id; - u16 rxq_map; - u16 txq_map; - u16 rxitr_idx; - u16 txitr_idx; -}; - -struct i40e_virtchnl_irq_map_info { - u16 num_vectors; - struct i40e_virtchnl_vector_map vecmap[1]; -}; - -/* I40E_VIRTCHNL_OP_ENABLE_QUEUES - * I40E_VIRTCHNL_OP_DISABLE_QUEUES - * VF sends these message to enable or disable TX/RX queue pairs. - * The queues fields are bitmaps indicating which queues to act upon. - * (Currently, we only support 16 queues per VF, but we make the field - * u32 to allow for expansion.) - * PF performs requested action and returns status. - */ -struct i40e_virtchnl_queue_select { - u16 vsi_id; - u16 pad; - u32 rx_queues; - u32 tx_queues; -}; - -/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS - * VF sends this message in order to add one or more unicast or multicast - * address filters for the specified VSI. - * PF adds the filters and returns status. - */ - -/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS - * VF sends this message in order to remove one or more unicast or multicast - * filters for the specified VSI. - * PF removes the filters and returns status. - */ - -struct i40e_virtchnl_ether_addr { - u8 addr[I40E_ETH_LENGTH_OF_ADDRESS]; - u8 pad[2]; -}; - -struct i40e_virtchnl_ether_addr_list { - u16 vsi_id; - u16 num_elements; - struct i40e_virtchnl_ether_addr list[1]; -}; - -#ifdef I40E_SOL_VF_SUPPORT -/* I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG - * VF sends this message to get the default MTU and list of additional ethernet - * addresses it is allowed to use. - * PF responds with an indirect message containing - * i40e_virtchnl_addnl_solaris_config with zero or more - * i40e_virtchnl_ether_addr structures. - * - * It is expected that this operation will only ever be needed for Solaris VFs - * running under a Solaris PF. - */ -struct i40e_virtchnl_addnl_solaris_config { - u16 default_mtu; - struct i40e_virtchnl_ether_addr_list al; -}; - -#endif -/* I40E_VIRTCHNL_OP_ADD_VLAN - * VF sends this message to add one or more VLAN tag filters for receives. - * PF adds the filters and returns status. - * If a port VLAN is configured by the PF, this operation will return an - * error to the VF. - */ - -/* I40E_VIRTCHNL_OP_DEL_VLAN - * VF sends this message to remove one or more VLAN tag filters for receives. - * PF removes the filters and returns status. - * If a port VLAN is configured by the PF, this operation will return an - * error to the VF. - */ - -struct i40e_virtchnl_vlan_filter_list { - u16 vsi_id; - u16 num_elements; - u16 vlan_id[1]; -}; - -/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE - * VF sends VSI id and flags. - * PF returns status code in retval. - * Note: we assume that broadcast accept mode is always enabled. - */ -struct i40e_virtchnl_promisc_info { - u16 vsi_id; - u16 flags; -}; - -#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001 -#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002 - -/* I40E_VIRTCHNL_OP_GET_STATS - * VF sends this message to request stats for the selected VSI. VF uses - * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id - * field is ignored by the PF. - * - * PF replies with struct i40e_eth_stats in an external buffer. - */ - -/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY - * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT - * VF sends these messages to configure RSS. Only supported if both PF - * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during - * configuration negotiation. If this is the case, then the rss fields in - * the vf resource struct are valid. - * Both the key and LUT are initialized to 0 by the PF, meaning that - * RSS is effectively disabled until set up by the VF. - */ -struct i40e_virtchnl_rss_key { - u16 vsi_id; - u16 key_len; - u8 key[1]; /* RSS hash key, packed bytes */ -}; - -struct i40e_virtchnl_rss_lut { - u16 vsi_id; - u16 lut_entries; - u8 lut[1]; /* RSS lookup table*/ -}; - -/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS - * I40E_VIRTCHNL_OP_SET_RSS_HENA - * VF sends these messages to get and set the hash filter enable bits for RSS. - * By default, the PF sets these to all possible traffic types that the - * hardware supports. The VF can query this value if it wants to change the - * traffic types that are hashed by the hardware. - * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h - */ -struct i40e_virtchnl_rss_hena { - u64 hena; -}; - -/* I40E_VIRTCHNL_OP_EVENT - * PF sends this message to inform the VF driver of events that may affect it. - * No direct response is expected from the VF, though it may generate other - * messages in response to this one. - */ -enum i40e_virtchnl_event_codes { - I40E_VIRTCHNL_EVENT_UNKNOWN = 0, - I40E_VIRTCHNL_EVENT_LINK_CHANGE, - I40E_VIRTCHNL_EVENT_RESET_IMPENDING, - I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, -}; -#define I40E_PF_EVENT_SEVERITY_INFO 0 -#define I40E_PF_EVENT_SEVERITY_ATTENTION 1 -#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED 2 -#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255 - -struct i40e_virtchnl_pf_event { - enum i40e_virtchnl_event_codes event; - union { - struct { - enum i40e_aq_link_speed link_speed; - bool link_status; - } link_event; - } event_data; - - int severity; -}; - -/* VF reset states - these are written into the RSTAT register: - * I40E_VFGEN_RSTAT1 on the PF - * I40E_VFGEN_RSTAT on the VF - * When the PF initiates a reset, it writes 0 - * When the reset is complete, it writes 1 - * When the PF detects that the VF has recovered, it writes 2 - * VF checks this register periodically to determine if a reset has occurred, - * then polls it to know when the reset is complete. - * If either the PF or VF reads the register while the hardware - * is in a reset state, it will return DEADBEEF, which, when masked - * will result in 3. - */ -enum i40e_vfr_states { - I40E_VFR_INPROGRESS = 0, - I40E_VFR_COMPLETED, - I40E_VFR_VFACTIVE, - I40E_VFR_UNKNOWN, -}; - -#endif /* _I40E_VIRTCHNL_H_ */ diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h new file mode 100644 index 0000000000..f00dd36048 --- /dev/null +++ b/drivers/net/i40e/base/virtchnl.h @@ -0,0 +1,772 @@ +/******************************************************************************* + +Copyright (c) 2013 - 2015, Intel Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +***************************************************************************/ + +#ifndef _VIRTCHNL_H_ +#define _VIRTCHNL_H_ + +/* Description: + * This header file describes the VF-PF communication protocol used + * by the drivers for all devices starting from our 40G product line + * + * Admin queue buffer usage: + * desc->opcode is always aqc_opc_send_msg_to_pf + * flags, retval, datalen, and data addr are all used normally. + * The Firmware copies the cookie fields when sending messages between the + * PF and VF, but uses all other fields internally. Due to this limitation, + * we must send all messages as "indirect", i.e. using an external buffer. + * + * All the VSI indexes are relative to the VF. Each VF can have maximum of + * three VSIs. All the queue indexes are relative to the VSI. Each VF can + * have a maximum of sixteen queues for all of its VSIs. + * + * The PF is required to return a status code in v_retval for all messages + * except RESET_VF, which does not require any response. The return value + * is of status_code type, defined in the shared type.h. + * + * In general, VF driver initialization should roughly follow the order of + * these opcodes. The VF driver must first validate the API version of the + * PF driver, then request a reset, then get resources, then configure + * queues and interrupts. After these operations are complete, the VF + * driver may start its queues, optionally add MAC and VLAN filters, and + * process traffic. + */ + +/* START GENERIC DEFINES + * Need to ensure the following enums and defines hold the same meaning and + * value in current and future projects + */ + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_NOT_SUPPORTED = -64, +}; + +#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 +#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 +#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 +#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 +#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 +#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 + +enum virtchnl_link_speed { + VIRTCHNL_LINK_SPEED_UNKNOWN = 0, + VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), + VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), + VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), + VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), + VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), + VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), +}; + +/* for hsplit_0 field of Rx HMC context */ +/* deprecated with AVF 1.0 */ +enum virtchnl_rx_hsplit { + VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, + VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, + VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, + VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, + VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, +}; + +#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 +/* END GENERIC DEFINES */ + +/* Opcodes for VF-PF communication. These are placed in the v_opcode field + * of the virtchnl_msg structure. + */ +enum virtchnl_ops { +/* The PF sends status change events to VFs using + * the VIRTCHNL_OP_EVENT opcode. + * VFs send requests to the PF using the other ops. + * Use of "advanced opcode" features must be negotiated as part of capabilities + * exchange and are not considered part of base mode feature set. + */ + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_GET_STATS = 15, + VIRTCHNL_OP_RSVD = 16, + VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ +#ifdef VIRTCHNL_SOL_VF_SUPPORT + VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19, +#endif +#ifdef VIRTCHNL_IWARP + VIRTCHNL_OP_IWARP = 20, /* advanced opcode */ + VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */ + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */ +#endif + VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + VIRTCHNL_OP_SET_RSS_HENA = 26, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + +}; + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ + {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)} + +/* Virtual channel message descriptor. This overlays the admin queue + * descriptor. All other data is passed in external buffers. + */ + +struct virtchnl_msg { + u8 pad[8]; /* AQ flags/opcode/len/retval fields */ + enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ + enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + u32 vfid; /* used by PF when sending to VF */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); + +/* Message descriptions and data structures.*/ + +/* VIRTCHNL_OP_VERSION + * VF posts its version number to the PF. PF responds with its version number + * in the same format, along with a return code. + * Reply from PF has its major/minor versions also in param0 and param1. + * If there is a major version mismatch, then the VF cannot operate. + * If there is a minor version mismatch, then the VF can operate but should + * add a warning to the system log. + * + * This enum element MUST always be specified as == 1, regardless of other + * changes in the API. The PF must always respond to this message without + * error regardless of version mismatch. + */ +#define VIRTCHNL_VERSION_MAJOR 1 +#define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 + +struct virtchnl_version_info { + u32 major; + u32 minor; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); + +#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) +#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) + +/* VIRTCHNL_OP_RESET_VF + * VF sends this request to PF with no parameters + * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register + * until reset completion is indicated. The admin queue must be reinitialized + * after this operation. + * + * When reset is complete, PF must ensure that all queues in all VSIs associated + * with the VF are stopped, all queue configurations in the HMC are set to 0, + * and all MAC and VLAN filters (except the default MAC address) on all VSIs + * are cleared. + */ + +/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV + * vsi_type should always be 6 for backward compatibility. Add other fields + * as needed. + */ +enum virtchnl_vsi_type { + VIRTCHNL_VSI_TYPE_INVALID = 0, + VIRTCHNL_VSI_SRIOV = 6, +}; + +/* VIRTCHNL_OP_GET_VF_RESOURCES + * Version 1.0 VF sends this request to PF with no parameters + * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities + * PF responds with an indirect message containing + * virtchnl_vf_resource and one or more + * virtchnl_vsi_resource structures. + */ + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + enum virtchnl_vsi_type vsi_type; + u16 qset_handle; + u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); + +/* VF offload flags + * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including + * TX/RX Checksum offloading and TSO for non-tunnelled packets. + */ +#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 +#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 +#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 +#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 +#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 +#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 +#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 +#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 +#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 +#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 + +#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF) + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_offload_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); + +/* VIRTCHNL_OP_CONFIG_TX_QUEUE + * VF sends this message to set up parameters for one TX queue. + * External data buffer contains one instance of virtchnl_txq_info. + * PF configures requested queue and returns a status code. + */ + +/* Tx queue config info */ +struct virtchnl_txq_info { + u16 vsi_id; + u16 queue_id; + u16 ring_len; /* number of descriptors, multiple of 8 */ + u16 headwb_enabled; /* deprecated with AVF 1.0 */ + u64 dma_ring_addr; + u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); + +/* VIRTCHNL_OP_CONFIG_RX_QUEUE + * VF sends this message to set up parameters for one RX queue. + * External data buffer contains one instance of virtchnl_rxq_info. + * PF configures requested queue and returns a status code. + */ + +/* Rx queue config info */ +struct virtchnl_rxq_info { + u16 vsi_id; + u16 queue_id; + u32 ring_len; /* number of descriptors, multiple of 32 */ + u16 hdr_size; + u16 splithdr_enabled; /* deprecated with AVF 1.0 */ + u32 databuffer_size; + u32 max_pkt_size; + u32 pad1; + u64 dma_ring_addr; + enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + u32 pad2; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); + +/* VIRTCHNL_OP_CONFIG_VSI_QUEUES + * VF sends this message to set parameters for all active TX and RX queues + * associated with the specified VSI. + * PF configures queues and returns status. + * If the number of queues specified is greater than the number of queues + * associated with the VSI, an error is returned and no queues are configured. + */ +struct virtchnl_queue_pair_info { + /* NOTE: vsi_id and queue_id should be identical for both queues. */ + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); + +struct virtchnl_vsi_queue_config_info { + u16 vsi_id; + u16 num_queue_pairs; + u32 pad; + struct virtchnl_queue_pair_info qpair[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + +/* VIRTCHNL_OP_REQUEST_QUEUES + * VF sends this message to request the PF to allocate additional queues to + * this VF. Each VF gets a guaranteed number of queues on init but asking for + * additional queues must be negotiated. This is a best effort request as it + * is possible the PF does not have enough queues left to support the request. + * If the PF cannot support the number requested it will respond with the + * maximum number it is able to support; otherwise it will respond with the + * number requested. + */ + +/* VF resource request */ +struct virtchnl_vf_res_request { + u16 num_queue_pairs; +}; + +/* VIRTCHNL_OP_CONFIG_IRQ_MAP + * VF uses this message to map vectors to queues. + * The rxq_map and txq_map fields are bitmaps used to indicate which queues + * are to be associated with the specified vector. + * The "other" causes are always mapped to vector 0. + * PF configures interrupt mapping and returns status. + */ +struct virtchnl_vector_map { + u16 vsi_id; + u16 vector_id; + u16 rxq_map; + u16 txq_map; + u16 rxitr_idx; + u16 txitr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); + +struct virtchnl_irq_map_info { + u16 num_vectors; + struct virtchnl_vector_map vecmap[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); + +/* VIRTCHNL_OP_ENABLE_QUEUES + * VIRTCHNL_OP_DISABLE_QUEUES + * VF sends these message to enable or disable TX/RX queue pairs. + * The queues fields are bitmaps indicating which queues to act upon. + * (Currently, we only support 16 queues per VF, but we make the field + * u32 to allow for expansion.) + * PF performs requested action and returns status. + */ +struct virtchnl_queue_select { + u16 vsi_id; + u16 pad; + u32 rx_queues; + u32 tx_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); + +/* VIRTCHNL_OP_ADD_ETH_ADDR + * VF sends this message in order to add one or more unicast or multicast + * address filters for the specified VSI. + * PF adds the filters and returns status. + */ + +/* VIRTCHNL_OP_DEL_ETH_ADDR + * VF sends this message in order to remove one or more unicast or multicast + * filters for the specified VSI. + * PF removes the filters and returns status. + */ + +struct virtchnl_ether_addr { + u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); + +#ifdef VIRTCHNL_SOL_VF_SUPPORT +/* VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG + * VF sends this message to get the default MTU and list of additional ethernet + * addresses it is allowed to use. + * PF responds with an indirect message containing + * virtchnl_addnl_solaris_config with zero or more + * virtchnl_ether_addr structures. + * + * It is expected that this operation will only ever be needed for Solaris VFs + * running under a Solaris PF. + */ +struct virtchnl_addnl_solaris_config { + u16 default_mtu; + struct virtchnl_ether_addr_list al; +}; + +#endif +/* VIRTCHNL_OP_ADD_VLAN + * VF sends this message to add one or more VLAN tag filters for receives. + * PF adds the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +/* VIRTCHNL_OP_DEL_VLAN + * VF sends this message to remove one or more VLAN tag filters for receives. + * PF removes the filters and returns status. + * If a port VLAN is configured by the PF, this operation will return an + * error to the VF. + */ + +struct virtchnl_vlan_filter_list { + u16 vsi_id; + u16 num_elements; + u16 vlan_id[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_GET_STATS + * VF sends this message to request stats for the selected VSI. VF uses + * the virtchnl_queue_select struct to specify the VSI. The queue_id + * field is ignored by the PF. + * + * PF replies with struct eth_stats in an external buffer. + */ + +/* VIRTCHNL_OP_CONFIG_RSS_KEY + * VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); + +struct virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); + +/* VIRTCHNL_OP_GET_RSS_HENA_CAPS + * VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + */ +struct virtchnl_rss_hena { + u64 hena; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, +}; + +#define PF_EVENT_SEVERITY_INFO 0 +#define PF_EVENT_SEVERITY_ATTENTION 1 +#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2 +#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 + +struct virtchnl_pf_event { + enum virtchnl_event_codes event; + union { + struct { + enum virtchnl_link_speed link_speed; + bool link_status; + } link_event; + } event_data; + + int severity; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); + +#ifdef VIRTCHNL_IWARP + +/* VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP + * VF uses this message to request PF to map IWARP vectors to IWARP queues. + * The request for this originates from the VF IWARP driver through + * a client interface between VF LAN and VF IWARP driver. + * A vector could have an AEQ and CEQ attached to it although + * there is a single AEQ per VF IWARP instance in which case + * most vectors will have an INVALID_IDX for aeq and valid idx for ceq. + * There will never be a case where there will be multiple CEQs attached + * to a single vector. + * PF configures interrupt mapping and returns status. + */ + +/* HW does not define a type value for AEQ; only for RX/TX and CEQ. + * In order for us to keep the interface simple, SW will define a + * unique type value for AEQ. + */ +#define QUEUE_TYPE_PE_AEQ 0x80 +#define QUEUE_INVALID_IDX 0xFFFF + +struct virtchnl_iwarp_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); + +struct virtchnl_iwarp_qvlist_info { + u32 num_vectors; + struct virtchnl_iwarp_qv_info qv_info[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info); + +#endif + +/* VF reset states - these are written into the RSTAT register: + * VFGEN_RSTAT on the VF + * When the PF initiates a reset, it writes 0 + * When the reset is complete, it writes 1 + * When the PF detects that the VF has recovered, it writes 2 + * VF checks this register periodically to determine if a reset has occurred, + * then polls it to know when the reset is complete. + * If either the PF or VF reads the register while the hardware + * is in a reset state, it will return DEADBEEF, which, when masked + * will result in 3. + */ +enum virtchnl_vfr_states { + VIRTCHNL_VFR_INPROGRESS = 0, + VIRTCHNL_VFR_COMPLETED, + VIRTCHNL_VFR_VFACTIVE, +}; + +/** + * virtchnl_vc_validate_vf_msg + * @ver: Virtchnl version info + * @v_opcode: Opcode for the message + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * validate msg format against struct for each opcode + */ +static inline int +virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, + u8 *msg, u16 msglen) +{ + bool err_msg_format = false; + int valid_len = 0; + + /* Validate message length. */ + switch (v_opcode) { + case VIRTCHNL_OP_VERSION: + valid_len = sizeof(struct virtchnl_version_info); + break; + case VIRTCHNL_OP_RESET_VF: + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + if (VF_IS_V11(ver)) + valid_len = sizeof(u32); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + valid_len = sizeof(struct virtchnl_txq_info); + break; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + valid_len = sizeof(struct virtchnl_rxq_info); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + valid_len = sizeof(struct virtchnl_vsi_queue_config_info); + if (msglen >= valid_len) { + struct virtchnl_vsi_queue_config_info *vqc = + (struct virtchnl_vsi_queue_config_info *)msg; + valid_len += (vqc->num_queue_pairs * + sizeof(struct + virtchnl_queue_pair_info)); + if (vqc->num_queue_pairs == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + valid_len = sizeof(struct virtchnl_irq_map_info); + if (msglen >= valid_len) { + struct virtchnl_irq_map_info *vimi = + (struct virtchnl_irq_map_info *)msg; + valid_len += (vimi->num_vectors * + sizeof(struct virtchnl_vector_map)); + if (vimi->num_vectors == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: + valid_len = sizeof(struct virtchnl_queue_select); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + case VIRTCHNL_OP_DEL_ETH_ADDR: + valid_len = sizeof(struct virtchnl_ether_addr_list); + if (msglen >= valid_len) { + struct virtchnl_ether_addr_list *veal = + (struct virtchnl_ether_addr_list *)msg; + valid_len += veal->num_elements * + sizeof(struct virtchnl_ether_addr); + if (veal->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_DEL_VLAN: + valid_len = sizeof(struct virtchnl_vlan_filter_list); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; + valid_len += vfl->num_elements * sizeof(u16); + if (vfl->num_elements == 0) + err_msg_format = true; + } + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + valid_len = sizeof(struct virtchnl_promisc_info); + break; + case VIRTCHNL_OP_GET_STATS: + valid_len = sizeof(struct virtchnl_queue_select); + break; +#ifdef VIRTCHNL_IWARP + case VIRTCHNL_OP_IWARP: + /* These messages are opaque to us and will be validated in + * the RDMA client code. We just need to check for nonzero + * length. The firmware will enforce max length restrictions. + */ + if (msglen) + valid_len = msglen; + else + err_msg_format = true; + break; + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + valid_len = sizeof(struct virtchnl_iwarp_qvlist_info); + if (msglen >= valid_len) { + struct virtchnl_iwarp_qvlist_info *qv = + (struct virtchnl_iwarp_qvlist_info *)msg; + if (qv->num_vectors == 0) { + err_msg_format = true; + break; + } + valid_len += ((qv->num_vectors - 1) * + sizeof(struct virtchnl_iwarp_qv_info)); + } + break; +#endif + case VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct virtchnl_rss_key); + if (msglen >= valid_len) { + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; + valid_len += vrk->key_len - 1; + } + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct virtchnl_rss_lut); + if (msglen >= valid_len) { + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; + valid_len += vrl->lut_entries - 1; + } + break; + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct virtchnl_rss_hena); + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + valid_len = sizeof(struct virtchnl_vf_res_request); + break; + /* These are always errors coming from the VF. */ + case VIRTCHNL_OP_EVENT: + case VIRTCHNL_OP_UNKNOWN: + default: + return VIRTCHNL_ERR_PARAM; + } + /* few more checks */ + if ((valid_len != msglen) || (err_msg_format)) + return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; + + return 0; +} +#endif /* _VIRTCHNL_H_ */ diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 0c5cdb163a..07677dac49 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -102,7 +102,7 @@ /* Linux PF host with virtchnl version 1.1 */ #define PF_IS_V11(vf) \ - (((vf)->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && \ + (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \ ((vf)->version_minor == 1)) /* index flex payload per layer */ @@ -768,7 +768,7 @@ struct i40e_vf { /* Event from pf */ bool dev_closed; bool link_up; - enum i40e_aq_link_speed link_speed; + enum virtchnl_link_speed link_speed; bool vf_reset; volatile uint32_t pend_cmd; /* pending command not finished yet */ int32_t cmd_retval; /* return value of the cmd response from PF */ @@ -776,8 +776,8 @@ struct i40e_vf { uint8_t *aq_resp; /* buffer to store the adminq response from PF */ /* VSI info */ - struct i40e_virtchnl_vf_resource *vf_res; /* All VSIs */ - struct i40e_virtchnl_vsi_resource *vsi_res; /* LAN VSI */ + struct virtchnl_vf_resource *vf_res; /* All VSIs */ + struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */ struct i40e_vsi vsi; uint64_t flags; }; diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 91c1c0f68b..ab5e98510b 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -77,7 +77,7 @@ #define MAX_RESET_WAIT_CNT 20 struct i40evf_arq_msg_info { - enum i40e_virtchnl_ops ops; + enum virtchnl_ops ops; enum i40e_status_code result; uint16_t buf_len; uint16_t msg_len; @@ -85,7 +85,7 @@ struct i40evf_arq_msg_info { }; struct vf_cmd_info { - enum i40e_virtchnl_ops ops; + enum virtchnl_ops ops; uint8_t *in_args; uint32_t in_args_size; uint8_t *out_buffer; @@ -244,7 +244,7 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_arq_event_info event; - enum i40e_virtchnl_ops opcode; + enum virtchnl_ops opcode; enum i40e_status_code retval; int ret; enum i40evf_aq_result result = I40EVF_MSG_NON; @@ -259,16 +259,16 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data) return result; } - opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); + opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high); retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low); /* pf sys event */ - if (opcode == I40E_VIRTCHNL_OP_EVENT) { - struct i40e_virtchnl_pf_event *vpe = - (struct i40e_virtchnl_pf_event *)event.msg_buf; + if (opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)event.msg_buf; result = I40EVF_MSG_SYS; switch (vpe->event) { - case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + case VIRTCHNL_EVENT_LINK_CHANGE: vf->link_up = vpe->event_data.link_event.link_status; vf->link_speed = @@ -277,12 +277,12 @@ i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data) PMD_DRV_LOG(INFO, "Link status update:%s", vf->link_up ? "up" : "down"); break; - case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + case VIRTCHNL_EVENT_RESET_IMPENDING: vf->vf_reset = true; vf->pend_msg |= PFMSG_RESET_IMPENDING; PMD_DRV_LOG(INFO, "vf is reseting"); break; - case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: vf->dev_closed = true; vf->pend_msg |= PFMSG_DRIVER_CLOSE; PMD_DRV_LOG(INFO, "PF driver closed"); @@ -312,17 +312,17 @@ static inline void _clear_cmd(struct i40e_vf *vf) { rte_wmb(); - vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN; + vf->pend_cmd = VIRTCHNL_OP_UNKNOWN; } /* * Check there is pending cmd in execution. If none, set new command. */ static inline int -_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops) +_atomic_set_cmd(struct i40e_vf *vf, enum virtchnl_ops ops) { int ret = rte_atomic32_cmpset(&vf->pend_cmd, - I40E_VIRTCHNL_OP_UNKNOWN, ops); + VIRTCHNL_OP_UNKNOWN, ops); if (!ret) PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd); @@ -347,7 +347,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) info.msg = args->out_buffer; info.buf_len = args->out_size; - info.ops = I40E_VIRTCHNL_OP_UNKNOWN; + info.ops = VIRTCHNL_OP_UNKNOWN; info.result = I40E_SUCCESS; err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS, @@ -359,12 +359,12 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) } switch (args->ops) { - case I40E_VIRTCHNL_OP_RESET_VF: + case VIRTCHNL_OP_RESET_VF: /*no need to process in this function */ err = 0; break; - case I40E_VIRTCHNL_OP_VERSION: - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_GET_VF_RESOURCES: /* for init adminq commands, need to poll the response */ err = -1; do { @@ -385,7 +385,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) /* for other adminq in running time, waiting the cmd done flag */ err = -1; do { - if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) { + if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN) { err = 0; break; } @@ -404,15 +404,15 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) static int i40evf_check_api_version(struct rte_eth_dev *dev) { - struct i40e_virtchnl_version_info version, *pver; + struct virtchnl_version_info version, *pver; int err; struct vf_cmd_info args; struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - version.major = I40E_VIRTCHNL_VERSION_MAJOR; - version.minor = I40E_VIRTCHNL_VERSION_MINOR; + version.major = VIRTCHNL_VERSION_MAJOR; + version.minor = VIRTCHNL_VERSION_MINOR; - args.ops = I40E_VIRTCHNL_OP_VERSION; + args.ops = VIRTCHNL_OP_VERSION; args.in_args = (uint8_t *)&version; args.in_args_size = sizeof(version); args.out_buffer = vf->aq_resp; @@ -424,19 +424,19 @@ i40evf_check_api_version(struct rte_eth_dev *dev) return err; } - pver = (struct i40e_virtchnl_version_info *)args.out_buffer; + pver = (struct virtchnl_version_info *)args.out_buffer; vf->version_major = pver->major; vf->version_minor = pver->minor; if (vf->version_major == I40E_DPDK_VERSION_MAJOR) PMD_DRV_LOG(INFO, "Peer is DPDK PF host"); - else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && - (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) + else if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && + (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) PMD_DRV_LOG(INFO, "Peer is Linux PF host"); else { PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)", vf->version_major, vf->version_minor, - I40E_VIRTCHNL_VERSION_MAJOR, - I40E_VIRTCHNL_VERSION_MINOR); + VIRTCHNL_VERSION_MAJOR, + VIRTCHNL_VERSION_MINOR); return -1; } @@ -452,15 +452,15 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev) struct vf_cmd_info args; uint32_t caps, len; - args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; + args.ops = VIRTCHNL_OP_GET_VF_RESOURCES; args.out_buffer = vf->aq_resp; args.out_size = I40E_AQ_BUF_SZ; if (PF_IS_V11(vf)) { - caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | - I40E_VIRTCHNL_VF_OFFLOAD_VLAN | - I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_AQ | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN | + VIRTCHNL_VF_OFFLOAD_RX_POLLING; args.in_args = (uint8_t *)∩︀ args.in_args_size = sizeof(caps); } else { @@ -474,8 +474,8 @@ i40evf_get_vf_resource(struct rte_eth_dev *dev) return err; } - len = sizeof(struct i40e_virtchnl_vf_resource) + - I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource); + len = sizeof(struct virtchnl_vf_resource) + + I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); (void)rte_memcpy(vf->vf_res, args.out_buffer, RTE_MIN(args.out_size, len)); @@ -492,18 +492,18 @@ i40evf_config_promisc(struct rte_eth_dev *dev, struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int err; struct vf_cmd_info args; - struct i40e_virtchnl_promisc_info promisc; + struct virtchnl_promisc_info promisc; promisc.flags = 0; promisc.vsi_id = vf->vsi_res->vsi_id; if (enable_unicast) - promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC; + promisc.flags |= FLAG_VF_UNICAST_PROMISC; if (enable_multicast) - promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC; + promisc.flags |= FLAG_VF_MULTICAST_PROMISC; - args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + args.ops = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; args.in_args = (uint8_t *)&promisc; args.in_args_size = sizeof(promisc); args.out_buffer = vf->aq_resp; @@ -525,12 +525,12 @@ i40evf_config_vlan_offload(struct rte_eth_dev *dev, struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int err; struct vf_cmd_info args; - struct i40e_virtchnl_vlan_offload_info offload; + struct virtchnl_vlan_offload_info offload; offload.vsi_id = vf->vsi_res->vsi_id; offload.enable_vlan_strip = enable_vlan_strip; - args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD; + args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD; args.in_args = (uint8_t *)&offload; args.in_args_size = sizeof(offload); args.out_buffer = vf->aq_resp; @@ -550,7 +550,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev, struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int err; struct vf_cmd_info args; - struct i40e_virtchnl_pvid_info tpid_info; + struct virtchnl_pvid_info tpid_info; if (info == NULL) { PMD_DRV_LOG(ERR, "invalid parameters"); @@ -561,7 +561,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev, tpid_info.vsi_id = vf->vsi_res->vsi_id; (void)rte_memcpy(&tpid_info.info, info, sizeof(*info)); - args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID; + args.ops = (enum virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID; args.in_args = (uint8_t *)&tpid_info; args.in_args_size = sizeof(tpid_info); args.out_buffer = vf->aq_resp; @@ -575,7 +575,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev, } static void -i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info, +i40evf_fill_virtchnl_vsi_txq_info(struct virtchnl_txq_info *txq_info, uint16_t vsi_id, uint16_t queue_id, uint16_t nb_txq, @@ -590,7 +590,7 @@ i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info, } static void -i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info, +i40evf_fill_virtchnl_vsi_rxq_info(struct virtchnl_rxq_info *rxq_info, uint16_t vsi_id, uint16_t queue_id, uint16_t nb_rxq, @@ -618,8 +618,8 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev) (struct i40e_rx_queue **)dev->data->rx_queues; struct i40e_tx_queue **txq = (struct i40e_tx_queue **)dev->data->tx_queues; - struct i40e_virtchnl_vsi_queue_config_info *vc_vqci; - struct i40e_virtchnl_queue_pair_info *vc_qpi; + struct virtchnl_vsi_queue_config_info *vc_vqci; + struct virtchnl_queue_pair_info *vc_qpi; struct vf_cmd_info args; uint16_t i, nb_qp = vf->num_queue_pairs; const uint32_t size = @@ -628,7 +628,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev) int ret; memset(buff, 0, sizeof(buff)); - vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff; + vc_vqci = (struct virtchnl_vsi_queue_config_info *)buff; vc_vqci->vsi_id = vf->vsi_res->vsi_id; vc_vqci->num_queue_pairs = nb_qp; @@ -640,7 +640,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev) vf->max_pkt_len, rxq[i]); } memset(&args, 0, sizeof(args)); - args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES; + args.ops = VIRTCHNL_OP_CONFIG_VSI_QUEUES; args.in_args = (uint8_t *)vc_vqci; args.in_args_size = size; args.out_buffer = vf->aq_resp; @@ -648,7 +648,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev) ret = i40evf_execute_vf_cmd(dev, &args); if (ret) PMD_DRV_LOG(ERR, "Failed to execute command of " - "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES"); + "VIRTCHNL_OP_CONFIG_VSI_QUEUES"); return ret; } @@ -662,8 +662,8 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev) (struct i40e_rx_queue **)dev->data->rx_queues; struct i40e_tx_queue **txq = (struct i40e_tx_queue **)dev->data->tx_queues; - struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei; - struct i40e_virtchnl_queue_pair_ext_info *vc_qpei; + struct virtchnl_vsi_queue_config_ext_info *vc_vqcei; + struct virtchnl_queue_pair_ext_info *vc_qpei; struct vf_cmd_info args; uint16_t i, nb_qp = vf->num_queue_pairs; const uint32_t size = @@ -672,7 +672,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev) int ret; memset(buff, 0, sizeof(buff)); - vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff; + vc_vqcei = (struct virtchnl_vsi_queue_config_ext_info *)buff; vc_vqcei->vsi_id = vf->vsi_res->vsi_id; vc_vqcei->num_queue_pairs = nb_qp; vc_qpei = vc_vqcei->qpair; @@ -693,7 +693,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev) } memset(&args, 0, sizeof(args)); args.ops = - (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT; + (enum virtchnl_ops)VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT; args.in_args = (uint8_t *)vc_vqcei; args.in_args_size = size; args.out_buffer = vf->aq_resp; @@ -701,7 +701,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev) ret = i40evf_execute_vf_cmd(dev, &args); if (ret) PMD_DRV_LOG(ERR, "Failed to execute command of " - "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT"); + "VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT"); return ret; } @@ -724,9 +724,9 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct vf_cmd_info args; - uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \ - sizeof(struct i40e_virtchnl_vector_map)]; - struct i40e_virtchnl_irq_map_info *map_info; + uint8_t cmd_buffer[sizeof(struct virtchnl_irq_map_info) + \ + sizeof(struct virtchnl_vector_map)]; + struct virtchnl_irq_map_info *map_info; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t vector_id; @@ -741,7 +741,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) vector_id = I40E_MISC_VEC_ID; } - map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer; + map_info = (struct virtchnl_irq_map_info *)cmd_buffer; map_info->num_vectors = 1; map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT; map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id; @@ -756,7 +756,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev) intr_handle->intr_vec[i] = vector_id; } - args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP; + args.ops = VIRTCHNL_OP_CONFIG_IRQ_MAP; args.in_args = (u8 *)cmd_buffer; args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; @@ -773,7 +773,7 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid, bool on) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct i40e_virtchnl_queue_select queue_select; + struct virtchnl_queue_select queue_select; int err; struct vf_cmd_info args; memset(&queue_select, 0, sizeof(queue_select)); @@ -785,9 +785,9 @@ i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid, queue_select.tx_queues |= 1 << qid; if (on) - args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES; + args.ops = VIRTCHNL_OP_ENABLE_QUEUES; else - args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES; + args.ops = VIRTCHNL_OP_DISABLE_QUEUES; args.in_args = (u8 *)&queue_select; args.in_args_size = sizeof(queue_select); args.out_buffer = vf->aq_resp; @@ -861,10 +861,10 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, __rte_unused uint32_t index, __rte_unused uint32_t pool) { - struct i40e_virtchnl_ether_addr_list *list; + struct virtchnl_ether_addr_list *list; struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \ - sizeof(struct i40e_virtchnl_ether_addr)]; + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \ + sizeof(struct virtchnl_ether_addr)]; int err; struct vf_cmd_info args; @@ -876,13 +876,13 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev, return I40E_ERR_INVALID_MAC_ADDR; } - list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer; + list = (struct virtchnl_ether_addr_list *)cmd_buffer; list->vsi_id = vf->vsi_res->vsi_id; list->num_elements = 1; (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, sizeof(addr->addr_bytes)); - args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS; + args.ops = VIRTCHNL_OP_ADD_ETH_ADDR; args.in_args = cmd_buffer; args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; @@ -899,10 +899,10 @@ static void i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev, struct ether_addr *addr) { - struct i40e_virtchnl_ether_addr_list *list; + struct virtchnl_ether_addr_list *list; struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \ - sizeof(struct i40e_virtchnl_ether_addr)]; + uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) + \ + sizeof(struct virtchnl_ether_addr)]; int err; struct vf_cmd_info args; @@ -914,13 +914,13 @@ i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev, return; } - list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer; + list = (struct virtchnl_ether_addr_list *)cmd_buffer; list->vsi_id = vf->vsi_res->vsi_id; list->num_elements = 1; (void)rte_memcpy(list->list[0].addr, addr->addr_bytes, sizeof(addr->addr_bytes)); - args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + args.ops = VIRTCHNL_OP_DEL_ETH_ADDR; args.in_args = cmd_buffer; args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; @@ -947,13 +947,13 @@ static int i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct i40e_virtchnl_queue_select q_stats; + struct virtchnl_queue_select q_stats; int err; struct vf_cmd_info args; memset(&q_stats, 0, sizeof(q_stats)); q_stats.vsi_id = vf->vsi_res->vsi_id; - args.ops = I40E_VIRTCHNL_OP_GET_STATS; + args.ops = VIRTCHNL_OP_GET_STATS; args.in_args = (u8 *)&q_stats; args.in_args_size = sizeof(q_stats); args.out_buffer = vf->aq_resp; @@ -1050,18 +1050,18 @@ static int i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct i40e_virtchnl_vlan_filter_list *vlan_list; - uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) + + struct virtchnl_vlan_filter_list *vlan_list; + uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) + sizeof(uint16_t)]; int err; struct vf_cmd_info args; - vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer; vlan_list->vsi_id = vf->vsi_res->vsi_id; vlan_list->num_elements = 1; vlan_list->vlan_id[0] = vlanid; - args.ops = I40E_VIRTCHNL_OP_ADD_VLAN; + args.ops = VIRTCHNL_OP_ADD_VLAN; args.in_args = (u8 *)&cmd_buffer; args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; @@ -1077,18 +1077,18 @@ static int i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - struct i40e_virtchnl_vlan_filter_list *vlan_list; - uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) + + struct virtchnl_vlan_filter_list *vlan_list; + uint8_t cmd_buffer[sizeof(struct virtchnl_vlan_filter_list) + sizeof(uint16_t)]; int err; struct vf_cmd_info args; - vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer; + vlan_list = (struct virtchnl_vlan_filter_list *)cmd_buffer; vlan_list->vsi_id = vf->vsi_res->vsi_id; vlan_list->num_elements = 1; vlan_list->vlan_id[0] = vlanid; - args.ops = I40E_VIRTCHNL_OP_DEL_VLAN; + args.ops = VIRTCHNL_OP_DEL_VLAN; args.in_args = (u8 *)&cmd_buffer; args.in_args_size = sizeof(cmd_buffer); args.out_buffer = vf->aq_resp; @@ -1178,7 +1178,7 @@ i40evf_reset_vf(struct i40e_hw *hw) reset = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT; - if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset) + if (VIRTCHNL_VFR_COMPLETED == reset || VIRTCHNL_VFR_VFACTIVE == reset) break; else rte_delay_ms(50); @@ -1242,8 +1242,8 @@ i40evf_init_vf(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "check_api version failed"); goto err_aq; } - bufsz = sizeof(struct i40e_virtchnl_vf_resource) + - (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource)); + bufsz = sizeof(struct virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); vf->vf_res = rte_zmalloc("vf_res", bufsz, 0); if (!vf->vf_res) { PMD_INIT_LOG(ERR, "unable to allocate vf_res memory"); @@ -1257,7 +1257,7 @@ i40evf_init_vf(struct rte_eth_dev *dev) /* got VF config message back from PF, now we can parse it */ for (i = 0; i < vf->vf_res->num_vsis; i++) { - if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV) vf->vsi_res = &vf->vf_res->vsi_res[i]; } @@ -1269,7 +1269,7 @@ i40evf_init_vf(struct rte_eth_dev *dev) if (hw->mac.type == I40E_MAC_X722_VF) vf->flags = I40E_FLAG_RSS_AQ_CAPABLE; vf->vsi.vsi_id = vf->vsi_res->vsi_id; - vf->vsi.type = vf->vsi_res->vsi_type; + vf->vsi.type = (enum i40e_vsi_type)vf->vsi_res->vsi_type; vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs; vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); @@ -1321,22 +1321,22 @@ static void i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, __rte_unused uint16_t msglen) { - struct i40e_virtchnl_pf_event *pf_msg = - (struct i40e_virtchnl_pf_event *)msg; + struct virtchnl_pf_event *pf_msg = + (struct virtchnl_pf_event *)msg; struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); switch (pf_msg->event) { - case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: + case VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL, NULL); break; - case I40E_VIRTCHNL_EVENT_LINK_CHANGE: + case VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); vf->link_up = pf_msg->event_data.link_event.link_status; vf->link_speed = pf_msg->event_data.link_event.link_speed; break; - case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE: + case VIRTCHNL_EVENT_PF_DRIVER_CLOSE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event"); break; default: @@ -1352,7 +1352,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_arq_event_info info; uint16_t pending, aq_opc; - enum i40e_virtchnl_ops msg_opc; + enum virtchnl_ops msg_opc; enum i40e_status_code msg_ret; int ret; @@ -1377,13 +1377,13 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev) * cookie_high of struct i40e_aq_desc, while return error code * are stored in cookie_low, Which is done by * i40e_aq_send_msg_to_vf in PF driver.*/ - msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32( + msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32( info.desc.cookie_high); msg_ret = (enum i40e_status_code)rte_le_to_cpu_32( info.desc.cookie_low); switch (aq_opc) { case i40e_aqc_opc_send_msg_to_vf: - if (msg_opc == I40E_VIRTCHNL_OP_EVENT) + if (msg_opc == VIRTCHNL_OP_EVENT) /* process event*/ i40evf_handle_pf_event(dev, info.msg_buf, info.msg_len); @@ -1592,8 +1592,8 @@ i40evf_dev_configure(struct rte_eth_dev *dev) */ if (!conf->rxmode.hw_strip_crc) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); - if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && - (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) { + if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) && + (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) { /* Peer is running non-DPDK PF driver. */ PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip"); return -EINVAL; @@ -2001,7 +2001,7 @@ i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) static void i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add) { - struct i40e_virtchnl_ether_addr_list *list; + struct virtchnl_ether_addr_list *list; struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); int err, i, j; int next_begin = 0; @@ -2012,11 +2012,11 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add) do { j = 0; - len = sizeof(struct i40e_virtchnl_ether_addr_list); + len = sizeof(struct virtchnl_ether_addr_list); for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) { if (is_zero_ether_addr(&dev->data->mac_addrs[i])) continue; - len += sizeof(struct i40e_virtchnl_ether_addr); + len += sizeof(struct virtchnl_ether_addr); if (len >= I40E_AQ_BUF_SZ) { next_begin = i + 1; break; @@ -2043,8 +2043,8 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add) } list->vsi_id = vf->vsi_res->vsi_id; list->num_elements = j; - args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS : - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS; + args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : + VIRTCHNL_OP_DEL_ETH_ADDR; args.in_args = (uint8_t *)list; args.in_args_size = len; args.out_buffer = vf->aq_resp; diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index a21fae1d7e..61a3a36e1b 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -61,7 +61,7 @@ static int i40e_pf_host_switch_queues(struct i40e_pf_vf *vf, - struct i40e_virtchnl_queue_select *qsel, + struct virtchnl_queue_select *qsel, bool on); /** @@ -128,7 +128,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) struct i40e_pf *pf; uint16_t vf_id, abs_vf_id, vf_msix_num; int ret; - struct i40e_virtchnl_queue_select qsel; + struct virtchnl_queue_select qsel; if (vf == NULL) return -EINVAL; @@ -139,7 +139,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) abs_vf_id = vf_id + hw->func_caps.vf_base_id; /* Notify VF that we are in VFR progress */ - I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS); + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_INPROGRESS); /* * If require a SW VF reset, a VFLR interrupt will be generated, @@ -220,7 +220,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) } /* Reset done, Set COMPLETE flag and clear reset bit */ - I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED); + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_COMPLETED); val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id)); val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val); @@ -248,7 +248,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) return -EFAULT; } - I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE); + I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), VIRTCHNL_VFR_VFACTIVE); return ret; } @@ -277,7 +277,7 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, static void i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op) { - struct i40e_virtchnl_version_info info; + struct virtchnl_version_info info; /* Respond like a Linux PF host in order to support both DPDK VF and * Linux VF driver. The expense is original DPDK host specific feature @@ -286,16 +286,16 @@ i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op) * DPDK VF also can't identify host driver by version number returned. * It always assume talking with Linux PF. */ - info.major = I40E_VIRTCHNL_VERSION_MAJOR; - info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + info.major = VIRTCHNL_VERSION_MAJOR; + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; if (b_op) - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, (uint8_t *)&info, sizeof(info)); else - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, I40E_NOT_SUPPORTED, (uint8_t *)&info, sizeof(info)); @@ -313,22 +313,22 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf) static int i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) { - struct i40e_virtchnl_vf_resource *vf_res = NULL; + struct virtchnl_vf_resource *vf_res = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint32_t len = 0; int ret = I40E_SUCCESS; if (!b_op) { i40e_pf_host_send_msg_to_vf(vf, - I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_OP_GET_VF_RESOURCES, I40E_NOT_SUPPORTED, NULL, 0); return ret; } /* only have 1 VSI by default */ - len = sizeof(struct i40e_virtchnl_vf_resource) + + len = sizeof(struct virtchnl_vf_resource) + I40E_DEFAULT_VF_VSI_NUM * - sizeof(struct i40e_virtchnl_vsi_resource); + sizeof(struct virtchnl_vsi_resource); vf_res = rte_zmalloc("i40e_vf_res", len, 0); if (vf_res == NULL) { @@ -339,21 +339,21 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) goto send_msg; } - vf_res->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 | - I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_VLAN; vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf; vf_res->num_queue_pairs = vf->vsi->nb_qps; vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM; /* Change below setting if PF host can support more VSIs for VF */ - vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV; + vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id; vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps; ether_addr_copy(&vf->mac_addr, (struct ether_addr *)vf_res->vsi_res[0].default_mac_addr); send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, ret, (uint8_t *)vf_res, len); rte_free(vf_res); @@ -363,7 +363,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) static int i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw, struct i40e_pf_vf *vf, - struct i40e_virtchnl_rxq_info *rxq, + struct virtchnl_rxq_info *rxq, uint8_t crcstrip) { int err = I40E_SUCCESS; @@ -431,7 +431,7 @@ i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi, static int i40e_pf_host_hmc_config_txq(struct i40e_hw *hw, struct i40e_pf_vf *vf, - struct i40e_virtchnl_txq_info *txq) + struct virtchnl_txq_info *txq) { int err = I40E_SUCCESS; struct i40e_hmc_obj_txq tx_ctx; @@ -480,14 +480,14 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf, { struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); struct i40e_vsi *vsi = vf->vsi; - struct i40e_virtchnl_vsi_queue_config_info *vc_vqci = - (struct i40e_virtchnl_vsi_queue_config_info *)msg; - struct i40e_virtchnl_queue_pair_info *vc_qpi; + struct virtchnl_vsi_queue_config_info *vc_vqci = + (struct virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_queue_pair_info *vc_qpi; int i, ret = I40E_SUCCESS; if (!b_op) { i40e_pf_host_send_msg_to_vf(vf, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -511,9 +511,9 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf, /* * Apply VF RX queue setting to HMC. - * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, * then the extra information of - * 'struct i40e_virtchnl_queue_pair_extra_info' is needed, + * 'struct virtchnl_queue_pair_extra_info' is needed, * otherwise set the last parameter to NULL. */ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpi[i].rxq, @@ -533,7 +533,7 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf, } send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, ret, NULL, 0); return ret; @@ -547,15 +547,15 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf, { struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); struct i40e_vsi *vsi = vf->vsi; - struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei = - (struct i40e_virtchnl_vsi_queue_config_ext_info *)msg; - struct i40e_virtchnl_queue_pair_ext_info *vc_qpei; + struct virtchnl_vsi_queue_config_ext_info *vc_vqcei = + (struct virtchnl_vsi_queue_config_ext_info *)msg; + struct virtchnl_queue_pair_ext_info *vc_qpei; int i, ret = I40E_SUCCESS; if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -578,9 +578,9 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf, } /* * Apply VF RX queue setting to HMC. - * If the opcode is I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + * If the opcode is VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, * then the extra information of - * 'struct i40e_virtchnl_queue_pair_ext_info' is needed, + * 'struct virtchnl_queue_pair_ext_info' is needed, * otherwise set the last parameter to NULL. */ if (i40e_pf_host_hmc_config_rxq(hw, vf, &vc_qpei[i].rxq, @@ -600,7 +600,7 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf, } send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, ret, NULL, 0); return ret; @@ -608,7 +608,7 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf, static void i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf, - struct i40e_virtchnl_vector_map *vvm) + struct virtchnl_vector_map *vvm) { #define BITS_PER_CHAR 8 uint64_t linklistmap = 0, tempmap; @@ -711,9 +711,9 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, int ret = I40E_SUCCESS; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); - struct i40e_virtchnl_irq_map_info *irqmap = - (struct i40e_virtchnl_irq_map_info *)msg; - struct i40e_virtchnl_vector_map *map; + struct virtchnl_irq_map_info *irqmap = + (struct virtchnl_irq_map_info *)msg; + struct virtchnl_vector_map *map; int i; uint16_t vector_id; unsigned long qbit_max; @@ -721,12 +721,12 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_NOT_SUPPORTED, NULL, 0); return ret; } - if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) { + if (msg == NULL || msglen < sizeof(struct virtchnl_irq_map_info)) { PMD_DRV_LOG(ERR, "buffer too short"); ret = I40E_ERR_PARAM; goto send_msg; @@ -773,7 +773,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, } send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, ret, NULL, 0); return ret; @@ -781,7 +781,7 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf, static int i40e_pf_host_switch_queues(struct i40e_pf_vf *vf, - struct i40e_virtchnl_queue_select *qsel, + struct virtchnl_queue_select *qsel, bool on) { int ret = I40E_SUCCESS; @@ -831,8 +831,8 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf, uint16_t msglen) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_queue_select *q_sel = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *q_sel = + (struct virtchnl_queue_select *)msg; if (msg == NULL || msglen != sizeof(*q_sel)) { ret = I40E_ERR_PARAM; @@ -841,7 +841,7 @@ i40e_pf_host_process_cmd_enable_queues(struct i40e_pf_vf *vf, ret = i40e_pf_host_switch_queues(vf, q_sel, true); send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, ret, NULL, 0); return ret; @@ -854,13 +854,13 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf, bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_queue_select *q_sel = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *q_sel = + (struct virtchnl_queue_select *)msg; if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_DISABLE_QUEUES, + VIRTCHNL_OP_DISABLE_QUEUES, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -872,7 +872,7 @@ i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf, ret = i40e_pf_host_switch_queues(vf, q_sel, false); send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, ret, NULL, 0); return ret; @@ -886,8 +886,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_ether_addr_list *addr_list = - (struct i40e_virtchnl_ether_addr_list *)msg; + struct virtchnl_ether_addr_list *addr_list = + (struct virtchnl_ether_addr_list *)msg; struct i40e_mac_filter_info filter; int i; struct ether_addr *mac; @@ -895,7 +895,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + VIRTCHNL_OP_ADD_ETH_ADDR, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -920,7 +920,7 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf, } send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, ret, NULL, 0); return ret; @@ -933,15 +933,15 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf, bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_ether_addr_list *addr_list = - (struct i40e_virtchnl_ether_addr_list *)msg; + struct virtchnl_ether_addr_list *addr_list = + (struct virtchnl_ether_addr_list *)msg; int i; struct ether_addr *mac; if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + VIRTCHNL_OP_DEL_ETH_ADDR, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -962,7 +962,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf, } send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret, NULL, 0); return ret; @@ -974,15 +974,15 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf, bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_vlan_filter_list *vlan_filter_list = - (struct i40e_virtchnl_vlan_filter_list *)msg; + struct virtchnl_vlan_filter_list *vlan_filter_list = + (struct virtchnl_vlan_filter_list *)msg; int i; uint16_t *vid; if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_ADD_VLAN, + VIRTCHNL_OP_ADD_VLAN, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -1002,7 +1002,7 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf, } send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, ret, NULL, 0); return ret; @@ -1015,15 +1015,15 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf, bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_vlan_filter_list *vlan_filter_list = - (struct i40e_virtchnl_vlan_filter_list *)msg; + struct virtchnl_vlan_filter_list *vlan_filter_list = + (struct virtchnl_vlan_filter_list *)msg; int i; uint16_t *vid; if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_DEL_VLAN, + VIRTCHNL_OP_DEL_VLAN, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -1042,7 +1042,7 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf, } send_msg: - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, ret, NULL, 0); return ret; @@ -1056,15 +1056,15 @@ i40e_pf_host_process_cmd_config_promisc_mode( bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_promisc_info *promisc = - (struct i40e_virtchnl_promisc_info *)msg; + struct virtchnl_promisc_info *promisc = + (struct virtchnl_promisc_info *)msg; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); bool unicast = FALSE, multicast = FALSE; if (!b_op) { i40e_pf_host_send_msg_to_vf( vf, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_NOT_SUPPORTED, NULL, 0); return ret; } @@ -1074,21 +1074,21 @@ i40e_pf_host_process_cmd_config_promisc_mode( goto send_msg; } - if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC) + if (promisc->flags & FLAG_VF_UNICAST_PROMISC) unicast = TRUE; ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi->seid, unicast, NULL, true); if (ret != I40E_SUCCESS) goto send_msg; - if (promisc->flags & I40E_FLAG_VF_MULTICAST_PROMISC) + if (promisc->flags & FLAG_VF_MULTICAST_PROMISC) multicast = TRUE; ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi->seid, multicast, NULL); send_msg: i40e_pf_host_send_msg_to_vf(vf, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0); + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, ret, NULL, 0); return ret; } @@ -1099,12 +1099,12 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op) i40e_update_vsi_stats(vf->vsi); if (b_op) - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats, sizeof(vf->vsi->eth_stats)); else - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, I40E_NOT_SUPPORTED, (uint8_t *)&vf->vsi->eth_stats, sizeof(vf->vsi->eth_stats)); @@ -1120,8 +1120,8 @@ i40e_pf_host_process_cmd_cfg_vlan_offload( bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_vlan_offload_info *offload = - (struct i40e_virtchnl_vlan_offload_info *)msg; + struct virtchnl_vlan_offload_info *offload = + (struct virtchnl_vlan_offload_info *)msg; if (!b_op) { i40e_pf_host_send_msg_to_vf( @@ -1155,8 +1155,8 @@ i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf, bool b_op) { int ret = I40E_SUCCESS; - struct i40e_virtchnl_pvid_info *tpid_info = - (struct i40e_virtchnl_pvid_info *)msg; + struct virtchnl_pvid_info *tpid_info = + (struct virtchnl_pvid_info *)msg; if (!b_op) { i40e_pf_host_send_msg_to_vf( @@ -1183,39 +1183,39 @@ i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf, void i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) { - struct i40e_virtchnl_pf_event event; + struct virtchnl_pf_event event; - event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; + event.event = VIRTCHNL_EVENT_LINK_CHANGE; event.event_data.link_event.link_status = dev->data->dev_link.link_status; - /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */ + /* need to convert the ETH_SPEED_xxx into VIRTCHNL_LINK_SPEED_xxx */ switch (dev->data->dev_link.link_speed) { case ETH_SPEED_NUM_100M: - event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB; + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_100MB; break; case ETH_SPEED_NUM_1G: - event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB; + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_1GB; break; case ETH_SPEED_NUM_10G: - event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB; + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_10GB; break; case ETH_SPEED_NUM_20G: - event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB; + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_20GB; break; case ETH_SPEED_NUM_25G: - event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB; + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_25GB; break; case ETH_SPEED_NUM_40G: - event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB; + event.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB; break; default: event.event_data.link_event.link_speed = - I40E_LINK_SPEED_UNKNOWN; + VIRTCHNL_LINK_SPEED_UNKNOWN; break; } - i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT, + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); } @@ -1274,71 +1274,71 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, } switch (opcode) { - case I40E_VIRTCHNL_OP_VERSION : + case VIRTCHNL_OP_VERSION: PMD_DRV_LOG(INFO, "OP_VERSION received"); i40e_pf_host_process_cmd_version(vf, b_op); break; - case I40E_VIRTCHNL_OP_RESET_VF : + case VIRTCHNL_OP_RESET_VF: PMD_DRV_LOG(INFO, "OP_RESET_VF received"); i40e_pf_host_process_cmd_reset_vf(vf); break; - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + case VIRTCHNL_OP_GET_VF_RESOURCES: PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received"); i40e_pf_host_process_cmd_get_vf_resource(vf, b_op); break; - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received"); i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT: + case VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT: PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received"); i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received"); i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_ENABLE_QUEUES: PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received"); if (b_op) { i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen); i40e_notify_vf_link_status(dev, vf); } else { i40e_pf_host_send_msg_to_vf( - vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_NOT_SUPPORTED, NULL, 0); } break; - case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received"); i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + case VIRTCHNL_OP_ADD_ETH_ADDR: PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received"); i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + case VIRTCHNL_OP_DEL_ETH_ADDR: PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received"); i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_ADD_VLAN: PMD_DRV_LOG(INFO, "OP_ADD_VLAN received"); i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_DEL_VLAN: + case VIRTCHNL_OP_DEL_VLAN: PMD_DRV_LOG(INFO, "OP_DEL_VLAN received"); i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received"); i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen, b_op); break; - case I40E_VIRTCHNL_OP_GET_STATS: + case VIRTCHNL_OP_GET_STATS: PMD_DRV_LOG(INFO, "OP_GET_STATS received"); i40e_pf_host_process_cmd_get_stats(vf, b_op); break; diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h index 0961f06581..4f93a4d0d8 100644 --- a/drivers/net/i40e/i40e_pf.h +++ b/drivers/net/i40e/i40e_pf.h @@ -35,7 +35,7 @@ #define _I40E_PF_H_ /* VERSION info to exchange between VF and PF host. In case VF works with - * ND kernel driver, it reads I40E_VIRTCHNL_VERSION_MAJOR/MINOR. In + * ND kernel driver, it reads VIRTCHNL_VERSION_MAJOR/MINOR. In * case works with DPDK host, it reads version below. Then VF realize who it * is talking to and use proper language to communicate. * */ @@ -49,45 +49,45 @@ #define I40E_DPDK_OFFSET 0x100 /* DPDK pf driver specific command to VF */ -enum i40e_virtchnl_ops_dpdk { +enum virtchnl_ops_dpdk { /* * Keep some gap between Linux PF commands and * DPDK PF extended commands. */ - I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD = I40E_VIRTCHNL_OP_VERSION + + I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD = VIRTCHNL_OP_VERSION + I40E_DPDK_OFFSET, I40E_VIRTCHNL_OP_CFG_VLAN_PVID, - I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, + VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT, }; /* A structure to support extended info of a receive queue. */ -struct i40e_virtchnl_rxq_ext_info { +struct virtchnl_rxq_ext_info { uint8_t crcstrip; }; /* * A structure to support extended info of queue pairs, an additional field - * is added, comparing to original 'struct i40e_virtchnl_queue_pair_info'. + * is added, comparing to original 'struct virtchnl_queue_pair_info'. */ -struct i40e_virtchnl_queue_pair_ext_info { +struct virtchnl_queue_pair_ext_info { /* vsi_id and queue_id should be identical for both rx and tx queues.*/ - struct i40e_virtchnl_txq_info txq; - struct i40e_virtchnl_rxq_info rxq; - struct i40e_virtchnl_rxq_ext_info rxq_ext; + struct virtchnl_txq_info txq; + struct virtchnl_rxq_info rxq; + struct virtchnl_rxq_ext_info rxq_ext; }; /* * A structure to support extended info of VSI queue pairs, - * 'struct i40e_virtchnl_queue_pair_ext_info' is used, see its original - * of 'struct i40e_virtchnl_queue_pair_info'. + * 'struct virtchnl_queue_pair_ext_info' is used, see its original + * of 'struct virtchnl_queue_pair_info'. */ -struct i40e_virtchnl_vsi_queue_config_ext_info { +struct virtchnl_vsi_queue_config_ext_info { uint16_t vsi_id; uint16_t num_queue_pairs; - struct i40e_virtchnl_queue_pair_ext_info qpair[0]; + struct virtchnl_queue_pair_ext_info qpair[0]; }; -struct i40e_virtchnl_vlan_offload_info { +struct virtchnl_vlan_offload_info { uint16_t vsi_id; uint8_t enable_vlan_strip; uint8_t reserved; @@ -106,7 +106,7 @@ struct i40e_virtchnl_vlan_offload_info { * enable op, needs to specify the pvid. PF returns status * code in retval. */ -struct i40e_virtchnl_pvid_info { +struct virtchnl_pvid_info { uint16_t vsi_id; struct i40e_vsi_vlan_pvid_info info; }; diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h index 11adfd0f9b..4a867ad6fe 100644 --- a/drivers/net/i40e/rte_pmd_i40e.h +++ b/drivers/net/i40e/rte_pmd_i40e.h @@ -59,7 +59,7 @@ enum rte_pmd_i40e_mb_event_rsp { */ struct rte_pmd_i40e_mb_event_param { uint16_t vfid; /**< Virtual Function number */ - uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */ + uint16_t msg_type; /**< VF to PF message type, see virtchnl_ops */ uint16_t retval; /**< return value */ void *msg; /**< pointer to message */ uint16_t msglen; /**< length of the message */