qede: add SRIOV support

This patch adds following SRIOV features to qede PMD:
 - VF configuration
 - VF intialization/de-initialization
 - VF PF communications channel
 - statistics capture and query

Signed-off-by: Harish Patil <harish.patil@qlogic.com>
Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
Signed-off-by: Sony Chacko <sony.chacko@qlogic.com>
This commit is contained in:
Rasesh Mody 2016-04-27 07:18:40 -07:00 committed by Bruce Richardson
parent 5cdd769a26
commit 86a2265e59
22 changed files with 7968 additions and 95 deletions

View File

@ -82,6 +82,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sriov.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c

View File

@ -14,6 +14,7 @@
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_hw.h"
#include "ecore_iov_api.h"
unsigned long qede_log2_align(unsigned long n)
{
@ -81,6 +82,14 @@ inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
}
void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn,
__rte_unused struct vf_pf_resc_request *resc_req,
struct ecore_vf_acquire_sw_info *vf_sw_info)
{
vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE;
vf_sw_info->override_fw_version = 1;
}
void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
dma_addr_t *phys, size_t size)
{

View File

@ -22,6 +22,9 @@
/* Forward declaration */
struct ecore_dev;
struct ecore_hwfn;
struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
void qed_link_update(struct ecore_hwfn *hwfn);
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#undef __BIG_ENDIAN
@ -302,7 +305,7 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_BUILD_BUG_ON(cond) nothing
#define ETH_ALEN ETHER_ADDR_LEN
#define OSAL_LINK_UPDATE(hwfn) nothing
#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
/* SR-IOV channel */
@ -315,12 +318,15 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing
#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) nothing
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
#define OSAL_IOV_GET_OS_TYPE() 0
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
struct ecore_vf_acquire_sw_info *);
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \
qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info)
#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \
qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf)

View File

@ -50,6 +50,7 @@ enum ecore_nvm_cmd {
#ifndef LINUX_REMOVE
#if !defined(CONFIG_ECORE_L2)
#define CONFIG_ECORE_L2
#define CONFIG_ECORE_SRIOV
#endif
#endif
@ -79,6 +80,15 @@ static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
return db_addr;
}
/* @DPDK: This is a backport from latest ecore for TSS fix */
static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
return db_addr;
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
~((1 << (p_hwfn->p_dev->cache_shift)) - 1))

View File

@ -21,6 +21,8 @@
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
#include "ecore_dev_api.h"
#include "ecore_sriov.h"
#include "ecore_vf.h"
#include "ecore_mcp.h"
#include "ecore_hw_defs.h"
#include "mcp_public.h"
@ -126,6 +128,9 @@ void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
if (IS_VF(p_dev))
return;
OSAL_FREE(p_dev, p_dev->fw_data);
p_dev->fw_data = OSAL_NULL;
@ -149,6 +154,7 @@ void ecore_resc_free(struct ecore_dev *p_dev)
ecore_eq_free(p_hwfn, p_hwfn->p_eq);
ecore_consq_free(p_hwfn, p_hwfn->p_consq);
ecore_int_free(p_hwfn);
ecore_iov_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
}
@ -161,7 +167,11 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
u16 num_pqs, multi_cos_tcs = 1;
#ifdef CONFIG_ECORE_SRIOV
u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
#else
u16 num_vfs = 0;
#endif
OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
@ -363,6 +373,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
struct ecore_eq *p_eq;
int i;
if (IS_VF(p_dev))
return rc;
p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(struct ecore_fw_data));
if (!p_dev->fw_data)
@ -440,6 +453,10 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
if (rc)
goto alloc_err;
rc = ecore_iov_alloc(p_hwfn);
if (rc)
goto alloc_err;
/* EQ */
p_eq = ecore_eq_alloc(p_hwfn, 256);
if (!p_eq)
@ -481,6 +498,9 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
{
int i;
if (IS_VF(p_dev))
return;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@ -496,6 +516,8 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
p_hwfn->mcp_info->mfw_mb_length);
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
}
}
@ -1250,13 +1272,22 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
u32 load_code, param;
int i, j;
if (IS_PF(p_dev)) {
rc = ecore_init_fw_data(p_dev, bin_fw_data);
if (rc != ECORE_SUCCESS)
return rc;
}
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (IS_VF(p_dev)) {
rc = ecore_vf_pf_init(p_hwfn);
if (rc)
return rc;
continue;
}
/* Enable DMAE in PXP */
rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@ -1416,6 +1447,11 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
@ -1454,6 +1490,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
OSAL_MSLEEP(1);
}
if (IS_PF(p_dev)) {
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
@ -1462,6 +1499,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
p_dev->hwfns[0].p_main_ptt, false);
if (t_rc != ECORE_SUCCESS)
rc = t_rc;
}
return rc;
}
@ -1474,6 +1512,11 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
continue;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
"Shutting down the fastpath\n");
@ -1499,6 +1542,9 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
if (IS_VF(p_hwfn->p_dev))
return;
/* Re-open incoming traffic */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@ -1528,6 +1574,13 @@ enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (IS_VF(p_dev)) {
rc = ecore_vf_pf_reset(p_hwfn);
if (rc)
return rc;
continue;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */
@ -1657,7 +1710,11 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
#ifdef CONFIG_ECORE_SRIOV
max_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS;
#else
max_vf_vlan_filters = 0;
#endif
ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[ECORE_SB] = OSAL_MIN_T(u32,
@ -2020,6 +2077,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
{
enum _ecore_status_t rc;
rc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc)
return rc;
/* TODO In get_hw_info, amoungst others:
* Get MCP FW revision and determine according to it the supported
* featrues (e.g. DCB)
@ -2177,6 +2238,9 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
{
int j;
if (IS_VF(p_dev))
return;
for_each_hwfn(p_dev, j) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
@ -2276,6 +2340,9 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
enum _ecore_status_t rc;
if (IS_VF(p_dev))
return ecore_vf_hw_prepare(p_dev);
/* Store the precompiled init data ptrs */
ecore_init_iro_array(p_dev);
@ -2327,6 +2394,11 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (IS_VF(p_dev)) {
ecore_vf_pf_release(p_hwfn);
continue;
}
ecore_init_free(p_hwfn);
ecore_hw_hwfn_free(p_hwfn);
ecore_mcp_free(p_hwfn);
@ -2954,6 +3026,11 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
{
struct coalescing_timeset *p_coalesce_timeset;
if (IS_VF(p_hwfn->p_dev)) {
DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
return ECORE_INVAL;
}
if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
DP_NOTICE(p_hwfn, true,
"Coalescing configuration not enabled\n");

View File

@ -13,6 +13,7 @@
#include "ecore_hw.h"
#include "reg_addr.h"
#include "ecore_utils.h"
#include "ecore_iov_api.h"
#ifndef ASIC_ONLY
#define ECORE_EMUL_FACTOR 2000
@ -243,8 +244,12 @@ static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
quota = OSAL_MIN_T(osal_size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
if (IS_PF(p_hwfn->p_dev)) {
ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = ecore_ptt_get_bar_addr(p_ptt);
} else {
hw_offset = hw_addr + done;
}
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);

View File

@ -16,6 +16,7 @@
#include "ecore_init_fw_funcs.h"
#include "ecore_iro_values.h"
#include "ecore_sriov.h"
#include "ecore_gtt_values.h"
#include "reg_addr.h"
#include "ecore_init_ops.h"
@ -102,6 +103,9 @@ enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
if (IS_VF(p_hwfn->p_dev))
return ECORE_SUCCESS;
rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(bool) * RUNTIME_ARRAY_SIZE);
if (!rt_data->b_valid)

View File

@ -16,6 +16,8 @@
#include "ecore_int.h"
#include "reg_addr.h"
#include "ecore_hw.h"
#include "ecore_sriov.h"
#include "ecore_vf.h"
#include "ecore_hw_defs.h"
#include "ecore_hsi_common.h"
#include "ecore_mcp.h"
@ -373,6 +375,9 @@ void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
struct cau_pi_entry pi_entry;
u32 sb_offset, pi_offset;
if (IS_VF(p_hwfn->p_dev))
return; /* @@@TBD MichalK- VF CAU... */
sb_offset = igu_sb_id * PIS_PER_SB;
OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
@ -401,6 +406,7 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
sb_info->sb_ack = 0;
OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
if (IS_PF(p_hwfn->p_dev))
ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
sb_info->igu_sb_id, 0, 0);
}
@ -421,8 +427,10 @@ static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
/* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == ECORE_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else
else if (IS_PF(p_hwfn->p_dev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
else
igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
if (sb_id == ECORE_SP_SB_ID)
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
@ -457,9 +465,17 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
if (IS_PF(p_hwfn->p_dev)) {
sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
} else {
sb_info->igu_addr =
(u8 OSAL_IOMEM *)p_hwfn->regview +
PXP_VF_BAR0_START_IGU +
((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
}
sb_info->flags |= ECORE_SB_INFO_INIT;
ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
@ -687,6 +703,9 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
{
p_hwfn->b_int_enabled = 0;
if (IS_VF(p_hwfn->p_dev))
return;
ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
}
@ -853,8 +872,14 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
#ifdef CONFIG_ECORE_SRIOV
min_vf = p_hwfn->hw_info.first_vf_in_pf;
max_vf = p_hwfn->hw_info.first_vf_in_pf +
p_hwfn->p_dev->sriov_info.total_vfs;
#else
min_vf = 0;
max_vf = 0;
#endif
for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
sb_id++) {

View File

@ -0,0 +1,933 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SRIOV_API_H__
#define __ECORE_SRIOV_API_H__
#include "ecore_status.h"
#define ECORE_VF_ARRAY_LENGTH (3)
#define IS_VF(p_dev) ((p_dev)->b_is_vf)
#define IS_PF(p_dev) (!((p_dev)->b_is_vf))
#ifdef CONFIG_ECORE_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->sriov_info.total_vfs))
#else
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
/* @@@ TBD MichalK - what should this number be*/
#define ECORE_MAX_VF_CHAINS_PER_PF 16
/* vport update extended feature tlvs flags */
enum ecore_iov_vport_update_flag {
ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
ECORE_IOV_VP_UPDATE_MCAST = 3,
ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
ECORE_IOV_VP_UPDATE_RSS = 5,
ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
ECORE_IOV_VP_UPDATE_MAX = 8,
};
struct ecore_mcp_link_params;
struct ecore_mcp_link_state;
struct ecore_mcp_link_capabilities;
/* These defines are used by the hw-channel; should never change order */
#define VFPF_ACQUIRE_OS_LINUX (0)
#define VFPF_ACQUIRE_OS_WINDOWS (1)
#define VFPF_ACQUIRE_OS_ESX (2)
#define VFPF_ACQUIRE_OS_SOLARIS (3)
#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
struct ecore_vf_acquire_sw_info {
u32 driver_version;
u8 os_type;
bool override_fw_version;
};
struct ecore_public_vf_info {
/* These copies will later be reflected in the bulletin board,
* but this copy should be newer.
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
};
#ifdef CONFIG_ECORE_SW_CHANNEL
/* This is SW channel related only... */
enum mbx_state {
VF_PF_UNKNOWN_STATE = 0,
VF_PF_WAIT_FOR_START_REQUEST = 1,
VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
VF_PF_REQUEST_IN_PROCESSING = 3,
VF_PF_RESPONSE_READY = 4,
};
struct ecore_iov_sw_mbx {
enum mbx_state mbx_state;
u32 request_size;
u32 request_offset;
u32 response_size;
u32 response_offset;
};
/**
* @brief Get the vf sw mailbox params
*
* @param p_hwfn
* @param rel_vf_id
*
* @return struct ecore_iov_sw_mbx*
*/
struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
#endif
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief mark/clear all VFs before/after an incoming PCIe sriov
* disable.
*
* @param p_hwfn
* @param to_disable
*/
void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable);
/**
* @brief mark/clear chosen VFs before/after an incoming PCIe
* sriov disable.
*
* @param p_hwfn
* @param to_disable
*/
void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id, u8 to_disable);
/**
*
* @brief ecore_iov_init_hw_for_vf - initialize the HW for
* enabling access of a VF. Also includes preparing the
* IGU for VF access. This needs to be called AFTER hw is
* initialized and BEFORE VF is loaded inside the VM.
*
* @param p_hwfn
* @param p_ptt
* @param rel_vf_id
* @param num_rx_queues
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id, u16 num_rx_queues);
/**
* @brief ecore_iov_process_mbx_req - process a request received
* from the VF
*
* @param p_hwfn
* @param p_ptt
* @param vfid
*/
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, int vfid);
/**
* @brief ecore_iov_release_hw_for_vf - called once upper layer
* knows VF is done with - can release any resources
* allocated for VF at this point. this must be done once
* we know VF is no longer loaded in VM.
*
* @param p_hwfn
* @param p_ptt
* @param rel_vf_id
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id);
#ifndef LINUX_REMOVE
/**
* @brief ecore_iov_set_vf_ctx - set a context for a given VF
*
* @param p_hwfn
* @param vf_id
* @param ctx
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
u16 vf_id, void *ctx);
#endif
/**
* @brief FLR cleanup for all VFs
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief FLR cleanup for single VF
*
* @param p_hwfn
* @param p_ptt
* @param rel_vf_id
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 rel_vf_id);
/**
* @brief Update the bulletin with link information. Notice this does NOT
* send a bulletin update, only updates the PF's bulletin.
*
* @param p_hwfn
* @param p_vf
* @param params - the link params to use for the VF link configuration
* @param link - the link output to use for the VF link configuration
* @param p_caps - the link default capabilities.
*/
void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
u16 vfid,
struct ecore_mcp_link_params *params,
struct ecore_mcp_link_state *link,
struct ecore_mcp_link_capabilities *p_caps);
/**
* @brief Returns link information as perceived by VF.
*
* @param p_hwfn
* @param p_vf
* @param p_params - the link params visible to vf.
* @param p_link - the link state visible to vf.
* @param p_caps - the link default capabilities visible to vf.
*/
void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
u16 vfid,
struct ecore_mcp_link_params *params,
struct ecore_mcp_link_state *link,
struct ecore_mcp_link_capabilities *p_caps);
/**
* @brief return if the VF is pending FLR
*
* @param p_hwfn
* @param rel_vf_id
*
* @return bool
*/
bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief Check if given VF ID @vfid is valid
* w.r.t. @b_enabled_only value
* if b_enabled_only = true - only enabled VF id is valid
* else any VF id less than max_vfs is valid
*
* @param p_hwfn
* @param rel_vf_id - Relative VF ID
* @param b_enabled_only - consider only enabled VF
*
* @return bool - true for valid VF ID
*/
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
int rel_vf_id, bool b_enabled_only);
/**
* @brief Get VF's public info structure
*
* @param p_hwfn
* @param vfid - Relative VF ID
* @param b_enabled_only - false if want to access even if vf is disabled
*
* @return struct ecore_public_vf_info *
*/
struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
*p_hwfn, u16 vfid,
bool b_enabled_only);
/**
* @brief Set pending events bitmap for given @vfid
*
* @param p_hwfn
* @param vfid
*/
void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);
/**
* @brief Copy pending events bitmap in @events and clear
* original copy of events
*
* @param p_hwfn
*/
void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
u64 *events);
/**
* @brief Copy VF's message to PF's buffer
*
* @param p_hwfn
* @param ptt
* @param vfid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *ptt, int vfid);
/**
* @brief Set forced MAC address in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
*
* @param p_hwfn
* @param mac
* @param vfid
*/
void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
u8 *mac, int vfid);
/**
* @brief Set MAC address in PFs copy of bulletin board without
* configuring FW/HW.
*
* @param p_hwfn
* @param mac
* @param vfid
*/
enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
u8 *mac, int vfid);
/**
* @brief Set forced VLAN [pvid] in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
* Setting of pvid 0 would clear the feature.
* @param p_hwfn
* @param pvid
* @param vfid
*/
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid);
/**
* @brief Set default behaviour of VF in case no vlans are configured for it
* whether to accept only untagged traffic or all.
* Must be called prior to the VF vport-start.
*
* @param p_hwfn
* @param b_untagged_only
* @param vfid
*
* @return ECORE_SUCCESS if configuration would stick.
*/
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
bool b_untagged_only, int vfid);
/**
* @brief Get VFs opaque fid.
*
* @param p_hwfn
* @param vfid
* @param opaque_fid
*/
void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
u16 *opaque_fid);
/**
* @brief Get VFs VPORT id.
*
* @param p_hwfn
* @param vfid
* @param vport id
*/
void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
u8 *p_vport_id);
/**
* @brief Check if VF has VPORT instance. This can be used
* to check if VPORT is active.
*
* @param p_hwfn
*/
bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief PF posts the bulletin to the VF
*
* @param p_hwfn
* @param p_vf
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
int vfid,
struct ecore_ptt *p_ptt);
/**
* @brief Check if given VF (@vfid) is marked as stopped
*
* @param p_hwfn
* @param vfid
*
* @return bool : true if stopped
*/
bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief Configure VF anti spoofing
*
* @param p_hwfn
* @param vfid
* @param val - spoofchk value - true/false
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
int vfid, bool val);
/**
* @brief Get VF's configured spoof value.
*
* @param p_hwfn
* @param vfid
*
* @return bool - spoofchk value - true/false
*/
bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief Check for SRIOV sanity by PF.
*
* @param p_hwfn
* @param vfid
*
* @return bool - true if sanity checks passes, else false
*/
bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief Get the num of VF chains.
*
* @param p_hwfn
*
* @return u8
*/
u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
/**
* @brief Get vf request mailbox params
*
* @param p_hwfn
* @param rel_vf_id
* @param pp_req_virt_addr
* @param p_req_virt_size
*/
void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id,
void **pp_req_virt_addr,
u16 *p_req_virt_size);
/**
* @brief Get vf mailbox params
*
* @param p_hwfn
* @param rel_vf_id
* @param pp_reply_virt_addr
* @param p_reply_virt_size
*/
void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id,
void **pp_reply_virt_addr,
u16 *p_reply_virt_size);
/**
* @brief Validate if the given length is a valid vfpf message
* length
*
* @param length
*
* @return bool
*/
bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
/**
* @brief Return the max pfvf message length
*
* @return u32
*/
u32 ecore_iov_pfvf_msg_length(void);
/**
* @brief Returns forced MAC address if one is configured
*
* @parm p_hwfn
* @parm rel_vf_id
*
* @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
*/
u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief Returns pvid if one is configured
*
* @parm p_hwfn
* @parm rel_vf_id
*
* @return 0 if no pvid is configured, otherwise the pvid.
*/
u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief Configure VFs tx rate
*
* @param p_hwfn
* @param p_ptt
* @param vfid
* @param val - tx rate value in Mb/sec.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid, int val);
/**
* @brief - Retrieves the statistics associated with a VF
*
* @param p_hwfn
* @param p_ptt
* @param vfid
* @param p_stats - this will be filled with the VF statistics
*
* @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
*/
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,
struct ecore_eth_stats *p_stats);
/**
* @brief - Retrieves num of rxqs chains
*
* @param p_hwfn
* @param rel_vf_id
*
* @return num of rxqs chains.
*/
u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief - Retrieves num of active rxqs chains
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief - Retrieves ctx pointer
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief - Retrieves VF`s num sbs
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief - Returm true if VF is waiting for acquire
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief - Returm true if VF is acquired but not initialized
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Returm true if VF is acquired and initialized
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief - Get VF's vport min rate configured.
* @param p_hwfn
* @param rel_vf_id
*
* @return - rate in Mbps
*/
int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief - Configure min rate for VF's vport.
* @param p_dev
* @param vfid
* @param - rate in Mbps
*
* @return
*/
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate);
#else
static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn,
u8 to_disable)
{
}
static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id,
u8 to_disable)
{
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct
ecore_hwfn
* p_hwfn,
struct
ecore_ptt
* p_ptt,
u16 rel_vf_id,
u16
num_rx_queues)
{
return ECORE_INVAL;
}
static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid)
{
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct
ecore_hwfn
* p_hwfn,
struct
ecore_ptt
* p_ptt,
u16
rel_vf_id)
{
return ECORE_SUCCESS;
}
#ifndef LINUX_REMOVE
static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn
*p_hwfn, u16 vf_id,
void *ctx)
{
return ECORE_INVAL;
}
#endif
static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct
ecore_hwfn
* p_hwfn,
struct
ecore_ptt
* p_ptt)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id)
{
return ECORE_INVAL;
}
static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid,
struct ecore_mcp_link_params *params,
struct ecore_mcp_link_state *link,
struct ecore_mcp_link_capabilities
*p_caps)
{
}
static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid,
struct ecore_mcp_link_params *params,
struct ecore_mcp_link_state *link,
struct ecore_mcp_link_capabilities
*p_caps)
{
}
static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id)
{
return false;
}
static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only)
{
return false;
}
static OSAL_INLINE struct ecore_public_vf_info *
ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid,
bool b_enabled_only)
{
return OSAL_NULL;
}
static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn
*p_hwfn, u8 vfid)
{
}
static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct
ecore_hwfn
* p_hwfn,
u64 *events)
{
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn
*p_hwfn,
struct ecore_ptt
*ptt, int vfid)
{
return ECORE_INVAL;
}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn
*p_hwfn, u8 *mac,
int vfid)
{
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct
ecore_hwfn
* p_hwfn,
u8 *mac,
int vfid)
{
return ECORE_INVAL;
}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn
p_hwfn, u16 pvid,
int vfid)
{
}
static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn,
int vfid, u16 *opaque_fid)
{
}
static OSAL_INLINE void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn,
int vfid, u8 *p_vport_id)
{
}
static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn
*p_hwfn, int vfid)
{
return false;
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct
ecore_hwfn
* p_hwfn,
int vfid,
struct
ecore_ptt
* p_ptt)
{
return ECORE_INVAL;
}
static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn,
int vfid)
{
return false;
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn
*p_hwfn,
int vfid,
bool val)
{
return ECORE_INVAL;
}
static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn,
int vfid)
{
return false;
}
static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn,
int vfid)
{
return false;
}
static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
{
return 0;
}
static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn
*p_hwfn,
u16 rel_vf_id,
void
**pp_req_virt_addr,
u16 *
p_req_virt_size)
{
}
static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn
*p_hwfn,
u16 rel_vf_id,
void
**pp_reply_virt_addr,
u16 *
p_reply_virt_size)
{
}
static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
{
return false;
}
static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void)
{
return 0;
}
static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn
*p_hwfn, u16 rel_vf_id)
{
return OSAL_NULL;
}
static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn
*p_hwfn,
u16 rel_vf_id)
{
return 0;
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct
ecore_hwfn
* p_hwfn,
struct
ecore_ptt
* p_ptt,
int vfid,
int val)
{
return ECORE_INVAL;
}
static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id)
{
return 0;
}
static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn
*p_hwfn, u16 rel_vf_id)
{
return 0;
}
static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id)
{
return OSAL_NULL;
}
static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id)
{
return 0;
}
static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn
*p_hwfn, u16 rel_vf_id)
{
return false;
}
static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct
ecore_hwfn
* p_hwfn,
u16 rel_vf_id)
{
return false;
}
static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id)
{
return false;
}
static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn,
int vfid)
{
return 0;
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(
struct ecore_dev *p_dev, int vfid, u32 rate)
{
return ECORE_INVAL;
}
#endif
#endif

View File

@ -22,6 +22,8 @@
#include "reg_addr.h"
#include "ecore_int.h"
#include "ecore_hw.h"
#include "ecore_vf.h"
#include "ecore_sriov.h"
#include "ecore_mcp.h"
#define ECORE_MAX_SGES_NUM 16
@ -106,6 +108,14 @@ enum _ecore_status_t
ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params)
{
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
p_params->mtu,
p_params->remove_inner_vlan,
p_params->tpa_mode,
p_params->max_buffers_per_cqe,
p_params->only_untagged);
return ecore_sp_eth_vport_start(p_hwfn, p_params);
}
@ -339,6 +349,11 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
u8 abs_vport_id = 0, val;
u16 wordval;
if (IS_VF(p_hwfn->p_dev)) {
rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
return rc;
}
rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
@ -428,6 +443,9 @@ enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc;
u8 abs_vport_id = 0;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_vport_stop(p_hwfn);
rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
@ -450,6 +468,19 @@ enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
static enum _ecore_status_t
ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
struct ecore_filter_accept_flags *p_accept_flags)
{
struct ecore_sp_vport_update_params s_params;
OSAL_MEMSET(&s_params, 0, sizeof(s_params));
OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
sizeof(struct ecore_filter_accept_flags));
return ecore_vf_pf_vport_update(p_hwfn, &s_params);
}
enum _ecore_status_t
ecore_filter_accept_cmd(struct ecore_dev *p_dev,
u8 vport,
@ -474,6 +505,13 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
if (IS_VF(p_dev)) {
rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
if (rc != ECORE_SUCCESS)
return rc;
continue;
}
rc = ecore_sp_vport_update(p_hwfn, &update_params,
comp_mode, p_comp_data);
if (rc != ECORE_SUCCESS) {
@ -593,6 +631,17 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc;
u64 init_prod_val = 0;
if (IS_VF(p_hwfn->p_dev)) {
return ecore_vf_pf_rxq_start(p_hwfn,
rx_queue_id,
sb,
sb_index,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
cqe_pbl_size, pp_prod);
}
rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
if (rc != ECORE_SUCCESS)
return rc;
@ -651,6 +700,13 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
u16 qid, abs_rx_q_id = 0;
u8 i;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxqs_update(p_hwfn,
rx_queue_id,
num_rxqs,
complete_cqe_flg,
complete_event_flg);
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
@ -697,6 +753,10 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
u16 abs_rx_q_id = 0;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
cqe_completion);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = p_rx_cid->cid;
@ -814,6 +874,14 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc;
u8 abs_stats_id = 0;
if (IS_VF(p_hwfn->p_dev)) {
return ecore_vf_pf_txq_start(p_hwfn,
tx_queue_id,
sb,
sb_index,
pbl_addr, pbl_size, pp_doorbell);
}
rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
if (rc != ECORE_SUCCESS)
return rc;
@ -867,6 +935,9 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = p_tx_cid->cid;
@ -1274,6 +1345,11 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (IS_VF(p_dev)) {
ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
rc = ecore_sp_eth_filter_mcast(p_hwfn,
p_hwfn->hw_info.opaque_fid,
p_filter_cmd,
@ -1297,6 +1373,11 @@ ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (IS_VF(p_dev)) {
rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
continue;
}
rc = ecore_sp_eth_filter_ucast(p_hwfn,
p_hwfn->hw_info.opaque_fid,
p_filter_cmd,
@ -1308,14 +1389,93 @@ ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
return rc;
}
/* IOV related */
enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
u32 concrete_vfid, u16 opaque_vfid)
{
struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = opaque_vfid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_VF_START,
PROTOCOLID_COMMON, &init_data);
if (rc != ECORE_SUCCESS)
return rc;
p_ramrod = &p_ent->ramrod.vf_start;
p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(opaque_vfid);
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
p_ramrod->personality = PERSONALITY_ETH;
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
p_hwfn->hw_info.personality);
return ECORE_INVAL;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn)
{
return ECORE_NOTIMPL;
}
enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
u32 concrete_vfid, u16 opaque_vfid)
{
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = opaque_vfid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_VF_STOP,
PROTOCOLID_COMMON, &init_data);
if (rc != ECORE_SUCCESS)
return rc;
p_ramrod = &p_ent->ramrod.vf_stop;
p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/* Statistics related code */
static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
u32 *p_addr, u32 *p_len,
u16 statistics_bin)
{
if (IS_PF(p_hwfn->p_dev)) {
*p_addr = BAR0_MAP_REG_PSDM_RAM +
PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_pstorm_per_queue_stat);
} else {
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
*p_len = p_resp->pfdev_info.stats_info.pstats.len;
}
}
static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
@ -1349,9 +1509,17 @@ static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
if (IS_PF(p_hwfn->p_dev)) {
tstats_addr = BAR0_MAP_REG_TSDM_RAM +
TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
tstats_len = sizeof(struct tstorm_per_port_stat);
} else {
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
}
OSAL_MEMSET(&tstats, 0, sizeof(tstats));
ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
@ -1366,9 +1534,17 @@ static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
u32 *p_addr, u32 *p_len,
u16 statistics_bin)
{
if (IS_PF(p_hwfn->p_dev)) {
*p_addr = BAR0_MAP_REG_USDM_RAM +
USTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_ustorm_per_queue_stat);
} else {
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
*p_len = p_resp->pfdev_info.stats_info.ustats.len;
}
}
static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
@ -1397,9 +1573,17 @@ static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
u32 *p_addr, u32 *p_len,
u16 statistics_bin)
{
if (IS_PF(p_hwfn->p_dev)) {
*p_addr = BAR0_MAP_REG_MSDM_RAM +
MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_mstorm_per_queue_stat);
} else {
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
*p_len = p_resp->pfdev_info.stats_info.mstats.len;
}
}
static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
@ -1524,23 +1708,27 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
if (IS_PF(p_dev)) {
/* The main vport index is relative first */
if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
DP_ERR(p_hwfn, "No vport available!\n");
goto out;
}
}
if (!p_ptt) {
if (IS_PF(p_dev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
true);
IS_PF(p_dev) ? true : false);
out:
if (IS_PF(p_dev))
ecore_ptt_release(p_hwfn, p_ptt);
}
}
@ -1575,10 +1763,11 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
u32 addr = 0, len = 0;
if (!p_ptt) {
if (IS_PF(p_dev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
@ -1595,6 +1784,7 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
__ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
if (IS_PF(p_dev))
ecore_ptt_release(p_hwfn, p_ptt);
}

View File

@ -14,6 +14,56 @@
#include "ecore_spq.h"
#include "ecore_l2_api.h"
/**
* @brief ecore_sp_vf_start - VF Function Start
*
* This ramrod is sent to initialize a virtual function (VF) is loaded.
* It will configure the function related parameters.
*
* @note Final phase API.
*
* @param p_hwfn
* @param concrete_vfid VF ID
* @param opaque_vfid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
u32 concrete_vfid, u16 opaque_vfid);
/**
* @brief ecore_sp_vf_update - VF Function Update Ramrod
*
* This ramrod performs updates of a virtual function (VF).
* It currently contains no functionality.
*
* @note Final phase API.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_vf_stop - VF Function Stop Ramrod
*
* This ramrod is sent to unload a virtual function (VF).
*
* @note Final phase API.
*
* @param p_hwfn
* @param concrete_vfid
* @param opaque_vfid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
u32 concrete_vfid, u16 opaque_vfid);
/**
* @brief ecore_sp_eth_tx_queue_update -
*

View File

@ -14,6 +14,8 @@
#include "reg_addr.h"
#include "ecore_hw.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_sriov.h"
#include "ecore_iov_api.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
@ -517,6 +519,9 @@ static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
"FLR-ed VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
}
if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
OSAL_VF_FLR_UPDATE(p_hwfn);
}
enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
@ -793,6 +798,10 @@ u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
{
u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
/* TODO - Add support for VFs */
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PATH);
path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
@ -1054,6 +1063,20 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
}
#endif
if (IS_VF(p_dev)) {
if (p_hwfn->vf_iov_info) {
struct pfvf_acquire_resp_tlv *p_resp;
p_resp = &p_hwfn->vf_iov_info->acquire_resp;
*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
return ECORE_SUCCESS;
}
DP_VERBOSE(p_dev, ECORE_MSG_IOV,
"VF requested MFW vers prior to ACQUIRE\n");
return ECORE_INVAL;
}
global_offsize = ecore_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
public_base,
@ -1080,6 +1103,10 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
struct ecore_ptt *p_ptt;
/* TODO - Add support for VFs */
if (IS_VF(p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
return ECORE_BUSY;
@ -1295,6 +1322,9 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
}
#endif
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;

View File

@ -20,6 +20,7 @@
#include "ecore_dev_api.h"
#include "ecore_mcp.h"
#include "ecore_hw.h"
#include "ecore_sriov.h"
/***************************************************************************
* Structures & Definitions
@ -250,7 +251,9 @@ ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
{
switch (p_eqe->protocol_id) {
case PROTOCOLID_COMMON:
return ECORE_SUCCESS;
return ecore_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
default:
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
@ -386,6 +389,9 @@ static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
*cqe,
enum protocol_type protocol)
{
if (IS_VF(p_hwfn->p_dev))
return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,390 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_SRIOV_H__
#define __ECORE_SRIOV_H__
#include "ecore_status.h"
#include "ecore_vfpf_if.h"
#include "ecore_iov_api.h"
#include "ecore_hsi_common.h"
#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
* of this message, it fills the request part and sends it to
* the PF. The PF will copy the response to the response part for
* the VF to later read it. The PF needs to hold a message like this
* per VF, the request that is copied to the PF is placed in the
* request size, and the response is filled by the PF before sending
* it to the VF.
*/
struct ecore_vf_mbx_msg {
union vfpf_tlvs req;
union pfvf_tlvs resp;
};
/* This data is held in the ecore_hwfn structure for VFs only. */
struct ecore_vf_iov {
union vfpf_tlvs *vf2pf_request;
dma_addr_t vf2pf_request_phys;
union pfvf_tlvs *pf2vf_reply;
dma_addr_t pf2vf_reply_phys;
/* Should be taken whenever the mailbox buffers are accessed */
osal_mutex_t mutex;
u8 *offset;
/* Bulletin Board */
struct ecore_bulletin bulletin;
struct ecore_bulletin_content bulletin_shadow;
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
};
/* This mailbox is maintained per VF in its PF
* contains all information required for sending / receiving
* a message
*/
struct ecore_iov_vf_mbx {
union vfpf_tlvs *req_virt;
dma_addr_t req_phys;
union pfvf_tlvs *reply_virt;
dma_addr_t reply_phys;
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
u8 *offset;
#ifdef CONFIG_ECORE_SW_CHANNEL
struct ecore_iov_sw_mbx sw_mbx;
#endif
/* VF GPA address */
u32 vf_addr_lo;
u32 vf_addr_hi;
struct vfpf_first_tlv first_tlv; /* saved VF request header */
u8 flags;
#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
* more then one pending msg
*/
};
struct ecore_vf_q_info {
u16 fw_rx_qid;
u16 fw_tx_qid;
u8 fw_cid;
u8 rxq_active;
u8 txq_active;
};
enum int_mod {
VPORT_INT_MOD_UNDEFINED = 0,
VPORT_INT_MOD_ADAPTIVE = 1,
VPORT_INT_MOD_OFF = 2,
VPORT_INT_MOD_LOW = 100,
VPORT_INT_MOD_MEDIUM = 200,
VPORT_INT_MOD_HIGH = 300
};
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
VF_ENABLED = 2, /* VF, Enabled */
VF_RESET = 3, /* VF, FLR'd, pending cleanup */
VF_STOPPED = 4 /* VF, Stopped */
};
struct ecore_vf_vlan_shadow {
bool used;
u16 vid;
};
struct ecore_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
u8 inner_vlan_removal;
};
/* PFs maintain an array of this structure, per VF */
struct ecore_vf_info {
struct ecore_iov_vf_mbx vf_mbx;
enum vf_state state;
u8 to_disable;
struct ecore_bulletin bulletin;
dma_addr_t vf_bulletin;
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
u8 vport_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id)
u8 vport_instance; /* Number of active vports */
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
/* TODO - Only windows is using it - should be removed */
u8 was_malicious;
u8 num_active_rxqs;
void *ctx;
struct ecore_public_vf_info p_vf_info;
bool spoof_chk; /* Current configured on HW */
bool req_spoofchk_val; /* Requested value */
/* Stores the configuration requested by VF */
struct ecore_vf_shadow_config shadow_config;
/* A bitfield using bulletin's valid-map bits, used to indicate
* which of the bulletin board features have been configured.
*/
u64 configured_features;
#define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
(1 << VLAN_ADDR_FORCED))
};
/* This structure is part of ecore_hwfn and used only for PFs that have sriov
* capability enabled.
*/
struct ecore_pf_iov {
struct ecore_vf_info vfs_array[MAX_NUM_VFS];
u64 pending_events[ECORE_VF_ARRAY_LENGTH];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
u16 base_vport_id;
/* Allocate message address continuosuly and split to each VF */
void *mbx_msg_virt_addr;
dma_addr_t mbx_msg_phys_addr;
u32 mbx_msg_size;
void *mbx_reply_virt_addr;
dma_addr_t mbx_reply_phys_addr;
u32 mbx_reply_size;
void *p_bulletins;
dma_addr_t bulletins_phys;
u32 bulletins_size;
};
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, and allocates the VF
* database in the PF.
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
*
* @param p_hwfn
* @param p_iov
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
*
* @param p_hwfn
* @param tlvs_list
*/
void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list);
/**
* @brief ecore_iov_alloc - allocate sriov related resources
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_iov_setup - setup sriov related resources
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_iov_free - free sriov related resources
*
* @param p_hwfn
*/
void ecore_iov_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data);
/**
* @brief calculate CRC for bulletin board validation
*
* @param basic crc seed
* @param ptr to beginning of buffer
* @length in bytes of buffer
*
* @return calculated crc over buffer [with respect to seed].
*/
u32 ecore_crc32(u32 crc, u8 *ptr, u32 length);
/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
*
* @param p_hwfn
* @param p_tlvs_list - Pointer to tlvs list
* @param req_type - Type of TLV
*
* @return pointer to tlv type if found, otherwise returns NULL.
*/
void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
/**
* @brief ecore_iov_get_vf_info - return the database of a
* specific VF
*
* @param p_hwfn
* @param relative_vf_id - relative id of the VF for which info
* is requested
* @param b_enabled_only - false iff want to access even if vf is disabled
*
* @return struct ecore_vf_info*
*/
struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
u16 relative_vf_id,
bool b_enabled_only);
#else
static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn
*p_hwfn,
struct ecore_ptt
*p_ptt)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset,
u16 type, u16 length)
{
return OSAL_NULL;
}
static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
void *tlvs_list)
{
}
static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn
*p_hwfn)
{
return ECORE_SUCCESS;
}
static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
}
static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn)
{
}
static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn
*p_hwfn,
u8 opcode,
__le16 echo,
union
event_ring_data
* data)
{
return ECORE_INVAL;
}
static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
{
return 0;
}
static OSAL_INLINE int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
u32 *disabled_vfs)
{
return 0;
}
static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
void *p_tlvs_list,
u16 req_type)
{
return OSAL_NULL;
}
static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn
*p_hwfn,
u16
relative_vf_id,
bool
b_enabled_only)
{
return OSAL_NULL;
}
#endif
#endif /* __ECORE_SRIOV_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,415 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_VF_H__
#define __ECORE_VF_H__
#include "ecore_status.h"
#include "ecore_vf_api.h"
#include "ecore_l2_api.h"
#include "ecore_vfpf_if.h"
#ifdef CONFIG_ECORE_SRIOV
/**
*
* @brief hw preparation for VF
* sends ACQUIRE message
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev);
/**
*
* @brief VF init in hw (equivalent to hw_init in PF)
* mark interrupts as enabled
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn);
/**
*
* @brief VF - start the RX Queue by sending a message to the PF
*
* @param p_hwfn
* @param cid - zero based within the VF
* @param rx_queue_id - zero based within the VF
* @param sb - VF status block for this queue
* @param sb_index - Index within the status block
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
* @param cqe_pbl_size - pbl size
* @param pp_prod - pointer to the producer to be
* used in fasthwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
u8 rx_queue_id,
u16 sb,
u8 sb_index,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod);
/**
*
* @brief VF - start the TX queue by sending a message to the
* PF.
*
* @param p_hwfn
* @param tx_queue_id - zero based within the VF
* @param sb - status block for this queue
* @param sb_index - index within the status block
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
u16 tx_queue_id,
u16 sb,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell);
/**
*
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
* @param rx_qid
* @param cqe_completion
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
u16 rx_qid, bool cqe_completion);
/**
*
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
* @param tx_qid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
u16 tx_qid);
/**
* @brief VF - update the RX queue by sending a message to the
* PF
*
* @param p_hwfn
* @param rx_queue_id
* @param num_rxqs
* @param init_sge_ring
* @param comp_cqe_flg
* @param comp_event_flg
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
u16 rx_queue_id,
u8 num_rxqs,
u8 comp_cqe_flg,
u8 comp_event_flg);
/**
*
* @brief VF - send a vport update command
*
* @param p_hwfn
* @param params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_params);
/**
*
* @brief VF - send a close message to PF
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
/**
*
* @brief VF - free vf`s memories
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
/**
*
* @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
* @param p_hwfn
* @param sb_id
*
* @return INLINE u16
*/
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
/**
* @brief ecore_vf_pf_vport_start - perform vport start for VF.
*
* @param p_hwfn
* @param vport_id
* @param mtu
* @param inner_vlan_removal
* @param tpa_mode
* @param max_buffers_per_cqe,
* @param only_untagged - default behavior regarding vlan acceptance
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
u8 inner_vlan_removal,
enum ecore_tpa_mode tpa_mode,
u8 max_buffers_per_cqe,
u8 only_untagged);
/**
* @brief ecore_vf_pf_vport_stop - stop the VF's vport
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
struct ecore_filter_ucast
*p_param);
void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
struct ecore_filter_mcast *p_filter_cmd);
/**
* @brief ecore_vf_pf_int_cleanup - clean the SB of the VF
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
/**
* @brief - return the link params in a given bulletin board
*
* @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev
*p_dev)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn
*p_hwfn)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn
*p_hwfn,
u8 rx_queue_id,
u16 sb,
u8 sb_index,
u16 bd_max_bytes,
dma_addr_t
bd_chain_phys_adr,
dma_addr_t
cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM *
*pp_prod)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn
*p_hwfn,
u16 tx_queue_id,
u16 sb,
u8 sb_index,
dma_addr_t
pbl_addr,
u16 pbl_size,
void OSAL_IOMEM *
*pp_doorbell)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn
*p_hwfn,
u16 rx_qid,
bool
cqe_completion)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn
*p_hwfn,
u16 tx_qid)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct
ecore_hwfn
* p_hwfn,
u16 rx_queue_id,
u8 num_rxqs,
u8 comp_cqe_flg,
u8
comp_event_flg)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(
struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_params)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn
*p_hwfn)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn
*p_hwfn)
{
return ECORE_INVAL;
}
static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{
return 0;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(
struct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu,
u8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode,
u8 max_buffers_per_cqe, u8 only_untagged)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(
struct ecore_hwfn *p_hwfn)
{
return ECORE_INVAL;
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(
struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param)
{
return ECORE_INVAL;
}
static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
struct ecore_filter_mcast
*p_filter_cmd)
{
}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct
ecore_hwfn
* p_hwfn)
{
return ECORE_INVAL;
}
static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params
*p_params,
struct ecore_bulletin_content
*p_bulletin)
{
}
static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state
*p_link,
struct ecore_bulletin_content
*p_bulletin)
{
}
static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct
ecore_mcp_link_capabilities
* p_link_caps,
struct ecore_bulletin_content
*p_bulletin)
{
}
#endif
#endif /* __ECORE_VF_H__ */

View File

@ -0,0 +1,200 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_VF_API_H__
#define __ECORE_VF_API_H__
#include "ecore_sp_api.h"
#include "ecore_mcp_api.h"
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief Read the VF bulletin and act on it if needed
*
* @param p_hwfn
* @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise.
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
u8 *p_change);
/**
* @brief Get link parameters for VF from ecore
*
* @param p_hwfn
* @param params - the link params structure to be filled for the VF
*/
void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *params);
/**
* @brief Get link state for VF from ecore
*
* @param p_hwfn
* @param link - the link state structure to be filled for the VF
*/
void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *link);
/**
* @brief Get link capabilities for VF from ecore
*
* @param p_hwfn
* @param p_link_caps - the link capabilities structure to be filled for the VF
*/
void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps);
/**
* @brief Get number of Rx queues allocated for VF by ecore
*
* @param p_hwfn
* @param num_rxqs - allocated RX queues
*/
void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs);
/**
* @brief Get port mac address for VF
*
* @param p_hwfn
* @param port_mac - destination location for port mac
*/
void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac);
/**
* @brief Get number of VLAN filters allocated for VF by ecore
*
* @param p_hwfn
* @param num_rxqs - allocated VLAN filters
*/
void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
* @brief Get number of MAC filters allocated for VF by ecore
*
* @param p_hwfn
* @param num_mac - allocated MAC filters
*/
void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
u32 *num_mac_filters);
/**
* @brief Check if VF can set a MAC address
*
* @param p_hwfn
* @param mac
*
* @return bool
*/
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
/**
* @brief Copy forced MAC address from bulletin board
*
* @param hwfn
* @param dst_mac
* @param p_is_forced - out param which indicate in case mac
* exist if it forced or not.
*
* @return bool - return true if mac exist and false if
* not.
*/
bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
u8 *p_is_forced);
/**
* @brief Check if force vlan is set and copy the forced vlan
* from bulletin board
*
* @param hwfn
* @param dst_pvid
* @return bool
*/
bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
/**
* @brief Set firmware version information in dev_info from VFs acquire response
* tlv
*
* @param p_hwfn
* @param fw_major
* @param fw_minor
* @param fw_rev
* @param fw_eng
*/
void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_major,
u16 *fw_minor, u16 *fw_rev, u16 *fw_eng);
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn
*p_hwfn,
u8 *p_change)
{
return ECORE_INVAL;
}
static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params
*params)
{
}
static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state
*link)
{
}
static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct
ecore_mcp_link_capabilities
* p_link_caps)
{
}
static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
u8 *num_rxqs)
{
}
static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
u8 *port_mac)
{
}
static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
u8 *num_vlan_filters)
{
}
static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
u32 *num_mac)
{
}
static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
{
return false;
}
static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn
*hwfn, u8 *dst_mac,
u8 *p_is_forced)
{
return false;
}
static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
{
}
#endif
#endif

View File

@ -0,0 +1,590 @@
/*
* Copyright (c) 2016 QLogic Corporation.
* All rights reserved.
* www.qlogic.com
*
* See LICENSE.qede_pmd for copyright and licensing details.
*/
#ifndef __ECORE_VF_PF_IF_H__
#define __ECORE_VF_PF_IF_H__
#define T_ETH_INDIRECTION_TABLE_SIZE 128
#define T_ETH_RSS_KEY_SIZE 10
#ifndef aligned_u64
#define aligned_u64 u64
#endif
/***********************************************
*
* Common definitions for all HVs
*
**/
struct vf_pf_resc_request {
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters; /* No limit so superfluous */
u16 padding;
};
struct hw_sb_info {
u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */
u8 sb_qid; /* used to update DHC for sb */
u8 padding[5];
};
/***********************************************
*
* HW VF-PF channel definitions
*
* A.K.A VF-PF mailbox
*
**/
#define TLV_BUFFER_SIZE 1024
#define TLV_ALIGN sizeof(u64)
#define PF_VF_BULLETIN_SIZE 512
#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
/* TODO: #define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020 */
#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
#define BULLETIN_CRC_SEED 0
enum {
PFVF_STATUS_WAITING = 0,
PFVF_STATUS_SUCCESS,
PFVF_STATUS_FAILURE,
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
};
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
u16 type;
u16 length;
};
/* header of first vf->pf tlv carries the offset used to calculate response
* buffer address
*/
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 padding;
aligned_u64 reply_address;
};
/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
struct channel_tlv tl;
u8 status;
u8 padding[3];
};
/* response tlv used for most tlvs */
struct pfvf_def_resp_tlv {
struct pfvf_tlv hdr;
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
struct channel_tlv tl;
u8 padding[4];
};
/* Acquire */
struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER (1 << 0)
aligned_u64 capabilties;
u8 fw_major;
u8 fw_minor;
u8 fw_revision;
u8 fw_engineering;
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
u8 padding[5];
} vfdev_info;
struct vf_pf_resc_request resc_request;
aligned_u64 bulletin_addr;
u32 bulletin_size;
u32 padding;
};
/* receive side scaling tlv */
struct vfpf_vport_update_rss_tlv {
struct channel_tlv tl;
u8 update_rss_flags;
#define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
#define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
#define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
#define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
u8 rss_enable;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY_SIZE];
};
struct pfvf_storm_stats {
u32 address;
u32 len;
};
struct pfvf_stats_info {
struct pfvf_storm_stats mstats;
struct pfvf_storm_stats pstats;
struct pfvf_storm_stats tstats;
struct pfvf_storm_stats ustats;
};
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
struct pf_vf_pfdev_info {
u32 chip_num;
u32 mfw_ver;
u16 fw_major;
u16 fw_minor;
u16 fw_rev;
u16 fw_eng;
aligned_u64 capabilities;
#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0)
u16 db_size;
u8 indices_per_sb;
u8 os_type;
/* Thesee should match the PF's ecore_dev values */
u16 chip_rev;
u8 dev_type;
u8 padding;
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
u8 padding2[2];
} pfdev_info;
struct pf_vf_resc {
/* in case of status NO_RESOURCE in message hdr, pf will fill
* this struct with suggested amount of resources for next
* acquire request
*/
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 cid[PFVF_MAX_QUEUES_PER_VF];
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 padding[2];
} resc;
u32 bulletin_size;
u32 padding;
};
/* Init VF */
struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 stats_addr;
u16 rx_mask;
u16 tx_mask;
u8 drop_ttl0_flg;
u8 padding[3];
};
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
aligned_u64 rxq_addr;
aligned_u64 deprecated_sge_addr;
aligned_u64 cqe_pbl_addr;
u16 cqe_pbl_size;
u16 hw_sb;
u16 rx_qid;
u16 hc_rate; /* desired interrupts per sec. */
u16 bd_max_bytes;
u16 stat_id;
u8 sb_index;
u8 padding[3];
};
struct vfpf_start_txq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
aligned_u64 pbl_addr;
u16 pbl_size;
u16 stat_id;
u16 tx_qid;
u16 hw_sb;
u32 flags; /* VFPF_QUEUE_FLG_X flags */
u16 hc_rate; /* desired interrupts per sec. */
u8 sb_index;
u8 padding[3];
};
/* Stop RX Queue */
struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
u8 num_rxqs;
u8 cqe_completion;
u8 padding[4];
};
/* Stop TX Queues */
struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
u8 num_txqs;
u8 padding[5];
};
struct vfpf_update_rxq_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
u16 rx_qid;
u8 num_rxqs;
u8 flags;
#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
u8 padding[4];
};
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
u8 padding[4];
};
/* Start a vport */
struct vfpf_vport_start_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF];
u32 tpa_mode;
u16 dep1;
u16 mtu;
u8 vport_id;
u8 inner_vlan_removal;
u8 only_untagged;
u8 max_buffers_per_cqe;
u8 padding[4];
};
/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
struct vfpf_vport_update_activate_tlv {
struct channel_tlv tl;
u8 update_rx;
u8 update_tx;
u8 active_rx;
u8 active_tx;
};
struct vfpf_vport_update_tx_switch_tlv {
struct channel_tlv tl;
u8 tx_switching;
u8 padding[3];
};
struct vfpf_vport_update_vlan_strip_tlv {
struct channel_tlv tl;
u8 remove_vlan;
u8 padding[3];
};
struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
aligned_u64 bins[8];
};
struct vfpf_vport_update_accept_param_tlv {
struct channel_tlv tl;
u8 update_rx_mode;
u8 update_tx_mode;
u8 rx_accept_filter;
u8 tx_accept_filter;
};
struct vfpf_vport_update_accept_any_vlan_tlv {
struct channel_tlv tl;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 padding[2];
};
struct vfpf_vport_update_sge_tpa_tlv {
struct channel_tlv tl;
u16 sge_tpa_flags;
#define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
#define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
u8 update_sge_tpa_flags;
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
#define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
u8 max_buffers_per_cqe;
u16 deprecated_sge_buff_size;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
u8 tpa_max_aggs_num;
u8 padding[7];
};
/* Primary tlv as a header for various extended tlvs for
* various functionalities in vport update ramrod.
*/
struct vfpf_vport_update_tlv {
struct vfpf_first_tlv first_tlv;
};
struct vfpf_ucast_filter_tlv {
struct vfpf_first_tlv first_tlv;
u8 opcode;
u8 type;
u8 mac[ETH_ALEN];
u16 vlan;
u16 padding[3];
};
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
struct vfpf_init_tlv init;
struct vfpf_start_rxq_tlv start_rxq;
struct vfpf_start_txq_tlv start_txq;
struct vfpf_stop_rxqs_tlv stop_rxqs;
struct vfpf_stop_txqs_tlv stop_txqs;
struct vfpf_update_rxq_tlv update_rxq;
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
struct pfvf_def_resp_tlv default_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
/* This is a structure which is allocated in the VF, which the PF may update
* when it deems it necessary to do so. The bulletin board is sampled
* periodically by the VF. A copy per VF is maintained in the PF (to prevent
* loss of data upon multiple updates (or the need for read modify write)).
*/
enum ecore_bulletin_bit {
/* Alert the VF that a forced MAC was set by the PF */
MAC_ADDR_FORCED = 0,
/* The VF should not access the vfpf channel */
VFPF_CHANNEL_INVALID = 1,
/* Alert the VF that a forced VLAN was set by the PF */
VLAN_ADDR_FORCED = 2,
/* Indicate that `default_only_untagged' contains actual data */
VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
/* Alert the VF that suggested mac was sent by the PF.
* MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set
*/
VFPF_BULLETIN_MAC_ADDR = 5
};
struct ecore_bulletin_content {
u32 crc; /* crc of structure to ensure is not in
* mid-update
*/
u32 version;
aligned_u64 valid_bitmap; /* bitmap indicating wich fields
* hold valid values
*/
u8 mac[ETH_ALEN]; /* used for MAC_ADDR or MAC_ADDR_FORCED */
u8 default_only_untagged; /* If valid, 1 => only untagged Rx
* if no vlan filter is configured.
*/
u8 padding;
/* The following is a 'copy' of ecore_mcp_link_state,
* ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's
* possible the structs will increase further along the road we cannot
* have it here; Instead we need to have all of its fields.
*/
u8 req_autoneg;
u8 req_autoneg_pause;
u8 req_forced_rx;
u8 req_forced_tx;
u8 padding2[4];
u32 req_adv_speed;
u32 req_forced_speed;
u32 req_loopback;
u32 padding3;
u8 link_up;
u8 full_duplex;
u8 autoneg;
u8 autoneg_complete;
u8 parallel_detection;
u8 pfc_enabled;
u8 partner_tx_flow_ctrl_en;
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
u8 padding4[6];
u32 speed;
u32 partner_adv_speed;
u32 capability_speed;
/* Forced vlan */
u16 pvid;
u16 padding5;
};
struct ecore_bulletin {
dma_addr_t phys;
struct ecore_bulletin_content *p_virt;
u32 size;
};
#ifndef print_enum
enum {
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_VPORT_START,
CHANNEL_TLV_VPORT_UPDATE,
CHANNEL_TLV_VPORT_TEARDOWN,
CHANNEL_TLV_START_RXQ,
CHANNEL_TLV_START_TXQ,
CHANNEL_TLV_STOP_RXQS,
CHANNEL_TLV_STOP_TXQS,
CHANNEL_TLV_UPDATE_RXQ,
CHANNEL_TLV_INT_CLEANUP,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_UCAST_FILTER,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_MAX
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
};
extern const char *ecore_channel_tlvs_string[];
#else
print_enum(channel_tlvs, CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_VPORT_START,
CHANNEL_TLV_VPORT_UPDATE,
CHANNEL_TLV_VPORT_TEARDOWN,
CHANNEL_TLV_SETUP_RXQ,
CHANNEL_TLV_SETUP_TXQ,
CHANNEL_TLV_STOP_RXQS,
CHANNEL_TLV_STOP_TXQS,
CHANNEL_TLV_UPDATE_RXQ,
CHANNEL_TLV_INT_CLEANUP,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_UCAST_FILTER,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_MAX);
#endif
#endif /* __ECORE_VF_PF_IF_H__ */

View File

@ -143,6 +143,14 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
int rc;
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
DP_ERR(edev, "Setting MAC address is not allowed\n");
ether_addr_copy(&qdev->primary_mac,
&eth_dev->data->mac_addrs[0]);
return;
}
/* First remove the primary mac */
rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
qdev->primary_mac.addr_bytes);
@ -418,6 +426,9 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
dev_info->max_tx_queues = dev_info->max_rx_queues;
dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
if (IS_VF(edev))
dev_info->max_vfs = 0;
else
dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
dev_info->driver_name = qdev->drv_ver;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
@ -771,6 +782,30 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
};
static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.dev_configure = qede_dev_configure,
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
.dev_set_link_up = qede_dev_set_link_up,
.dev_set_link_down = qede_dev_set_link_down,
.link_update = qede_link_update,
.promiscuous_enable = qede_promiscuous_enable,
.promiscuous_disable = qede_promiscuous_disable,
.allmulticast_enable = qede_allmulticast_enable,
.allmulticast_disable = qede_allmulticast_disable,
.dev_stop = qede_dev_stop,
.dev_close = qede_dev_close,
.stats_get = qede_get_stats,
.stats_reset = qede_reset_stats,
.vlan_offload_set = qede_vlan_offload_set,
.vlan_filter_set = qede_vlan_filter_set,
.dev_supported_ptypes_get = qede_dev_supported_ptypes_get,
};
static void qede_update_pf_params(struct ecore_dev *edev)
{
struct ecore_pf_params pf_params;
@ -884,7 +919,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
ECORE_MAC);
else
adapter->dev_info.num_mac_addrs = 1;
ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
&adapter->dev_info.num_mac_addrs);
/* Allocate memory for storing MAC addr */
eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
@ -899,11 +935,35 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
return -ENOMEM;
}
if (!is_vf) {
ether_addr_copy((struct ether_addr *)edev->hwfns[0].
hw_info.hw_mac_addr,
&eth_dev->data->mac_addrs[0]);
ether_addr_copy(&eth_dev->data->mac_addrs[0],
&adapter->primary_mac);
} else {
ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev),
&bulletin_change);
if (bulletin_change) {
is_mac_exist =
ecore_vf_bulletin_get_forced_mac(
ECORE_LEADING_HWFN(edev),
vf_mac,
&is_mac_forced);
if (is_mac_exist && is_mac_forced) {
DP_INFO(edev, "VF macaddr received from PF\n");
ether_addr_copy((struct ether_addr *)&vf_mac,
&eth_dev->data->mac_addrs[0]);
ether_addr_copy(&eth_dev->data->mac_addrs[0],
&adapter->primary_mac);
} else {
DP_NOTICE(edev, false,
"No VF macaddr assigned\n");
}
}
}
eth_dev->dev_ops = &qede_eth_dev_ops;
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
if (do_once) {
qede_print_adapter_info(adapter);

View File

@ -19,14 +19,14 @@
#include "base/ecore.h"
#include "base/ecore_dev_api.h"
#include "base/ecore_l2_api.h"
#include "base/ecore_sp_api.h"
#include "base/ecore_mcp_api.h"
#include "base/ecore_vf_api.h"
#include "base/ecore_hsi_common.h"
#include "base/ecore_int_api.h"
#include "base/ecore_chain.h"
#include "base/ecore_status.h"
#include "base/ecore_hsi_eth.h"
#include "base/ecore_dev_api.h"
#include "base/ecore_iov_api.h"
#include "qede_logs.h"
#include "qede_if.h"

View File

@ -11,11 +11,15 @@
#include <unistd.h>
#include <zlib.h>
#include <limits.h>
#include <rte_alarm.h>
#include "qede_ethdev.h"
static uint8_t npar_tx_switching = 1;
/* Alarm timeout. */
#define QEDE_ALARM_TIMEOUT_US 100000
#define CONFIG_QED_BINARY_FW
/* Global variable to hold absolute path of fw file */
char fw_file[PATH_MAX];
@ -155,6 +159,56 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
return 0;
}
static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
{
uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced;
is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac,
&is_mac_forced);
if (is_mac_exist && is_mac_forced)
rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN);
/* Always update link configuration according to bulletin */
qed_link_update(hwfn);
}
static void qede_vf_task(void *arg)
{
struct ecore_hwfn *p_hwfn = arg;
uint8_t change = 0;
/* Read the bulletin board, and re-schedule the task */
ecore_vf_read_bulletin(p_hwfn, &change);
if (change)
qed_handle_bulletin_change(p_hwfn);
rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn);
}
static void qed_start_iov_task(struct ecore_dev *edev)
{
struct ecore_hwfn *p_hwfn;
int i;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
if (!IS_PF(edev))
rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task,
p_hwfn);
}
}
static void qed_stop_iov_task(struct ecore_dev *edev)
{
struct ecore_hwfn *p_hwfn;
int i;
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
if (!IS_PF(edev))
rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
}
}
static int qed_slowpath_start(struct ecore_dev *edev,
struct qed_slowpath_params *params)
{
@ -169,12 +223,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
#endif
#ifdef CONFIG_QED_BINARY_FW
if (IS_PF(edev)) {
rc = qed_load_firmware_data(edev);
if (rc) {
DP_NOTICE(edev, true,
"Failed to find fw file %s\n", fw_file);
goto err;
}
}
#endif
rc = qed_nic_setup(edev);
@ -185,6 +241,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
/* Should go with CONFIG_QED_BINARY_FW */
if (IS_PF(edev)) {
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(edev);
if (rc) {
@ -192,9 +249,13 @@ static int qed_slowpath_start(struct ecore_dev *edev,
"Failed to allocate stream memory\n");
goto err2;
}
}
qed_start_iov_task(edev);
/* Start the slowpath */
#ifdef CONFIG_QED_BINARY_FW
if (IS_PF(edev))
data = edev->firmware;
#endif
allow_npar_tx_switching = npar_tx_switching ? true : false;
@ -221,6 +282,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
DP_INFO(edev, "HW inited and function started\n");
if (IS_PF(edev)) {
hwfn = ECORE_LEADING_HWFN(edev);
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
@ -235,6 +297,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
"Failed sending drv version command\n");
return rc;
}
}
ecore_reset_vport_stats(edev);
@ -245,10 +308,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
ecore_resc_free(edev);
err:
#ifdef CONFIG_QED_BINARY_FW
if (IS_PF(edev)) {
if (edev->firmware)
rte_free(edev->firmware);
edev->firmware = NULL;
}
#endif
qed_stop_iov_task(edev);
return rc;
}
@ -263,13 +330,20 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
ETHER_ADDR_LEN);
if (IS_PF(edev)) {
dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = edev->mf_mode;
dev_info->tx_switching = false;
} else {
ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
&dev_info->fw_eng);
}
if (IS_PF(edev)) {
ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
if (ptt) {
ecore_mcp_get_mfw_ver(edev, ptt,
@ -286,6 +360,9 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
}
} else {
ecore_mcp_get_mfw_ver(edev, ptt, &dev_info->mfw_rev, NULL);
}
return 0;
}
@ -300,6 +377,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
if (IS_PF(edev)) {
info->num_queues = 0;
for_each_hwfn(edev, i)
info->num_queues +=
@ -309,9 +387,21 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
ETHER_ADDR_LEN);
} else {
ecore_vf_get_num_rxqs(&edev->hwfns[0], &info->num_queues);
ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
&info->num_vlan_filters);
ecore_vf_get_port_mac(&edev->hwfns[0],
(uint8_t *)&info->port_mac);
}
qed_fill_dev_info(edev, &info->common);
if (IS_VF(edev))
memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN);
return 0;
}
@ -373,11 +463,18 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
if (IS_PF(hwfn->p_dev)) {
rte_memcpy(&params, ecore_mcp_get_link_params(hwfn),
sizeof(params));
rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link));
rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn),
sizeof(link_caps));
} else {
ecore_vf_read_bulletin(hwfn, &change);
ecore_vf_get_link_params(hwfn, &params);
ecore_vf_get_link_state(hwfn, &link);
ecore_vf_get_link_caps(hwfn, &link_caps);
}
/* Set the link parameters to pass to protocol driver */
if (link.link_up)
@ -423,6 +520,9 @@ static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
struct ecore_mcp_link_params *link_params;
int rc;
if (IS_VF(edev))
return 0;
/* The link should be set only once per PF */
hwfn = &edev->hwfns[0];
@ -456,12 +556,22 @@ static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params)
return rc;
}
void qed_link_update(struct ecore_hwfn *hwfn)
{
struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link);
}
static int qed_drain(struct ecore_dev *edev)
{
struct ecore_hwfn *hwfn;
struct ecore_ptt *ptt;
int i, rc;
if (IS_VF(edev))
return 0;
for_each_hwfn(edev, i) {
hwfn = &edev->hwfns[i];
ptt = ecore_ptt_acquire(hwfn);
@ -514,11 +624,18 @@ static int qed_slowpath_stop(struct ecore_dev *edev)
if (!edev)
return -ENODEV;
if (IS_PF(edev)) {
qed_free_stream_mem(edev);
#ifdef CONFIG_QED_SRIOV
if (IS_QED_ETH_IF(edev))
qed_sriov_disable(edev, true);
#endif
qed_nic_stop(edev);
}
qed_nic_reset(edev);
qed_stop_iov_task(edev);
return 0;
}