2017-12-19 15:49:01 +00:00
|
|
|
/* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
* Copyright(c) 2010-2016 Intel Corporation
|
2013-06-03 00:00:00 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
|
|
|
#include <rte_interrupts.h>
|
|
|
|
#include <rte_log.h>
|
|
|
|
#include <rte_debug.h>
|
|
|
|
#include <rte_eal.h>
|
|
|
|
#include <rte_ether.h>
|
2021-01-29 16:48:19 +00:00
|
|
|
#include <ethdev_driver.h>
|
2013-06-03 00:00:00 +00:00
|
|
|
#include <rte_memcpy.h>
|
|
|
|
#include <rte_malloc.h>
|
|
|
|
#include <rte_random.h>
|
|
|
|
|
2015-05-15 15:56:53 +00:00
|
|
|
#include "base/ixgbe_common.h"
|
2013-06-03 00:00:00 +00:00
|
|
|
#include "ixgbe_ethdev.h"
|
2016-10-10 14:34:15 +00:00
|
|
|
#include "rte_pmd_ixgbe.h"
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
#define IXGBE_MAX_VFTA (128)
|
2015-01-12 05:59:09 +00:00
|
|
|
#define IXGBE_VF_MSG_SIZE_DEFAULT 1
|
|
|
|
#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5
|
2015-10-23 05:52:25 +00:00
|
|
|
#define IXGBE_ETHERTYPE_FLOW_CTRL 0x8808
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
static inline uint16_t
|
|
|
|
dev_num_vf(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
2017-05-15 10:24:03 +00:00
|
|
|
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
|
2016-12-23 15:57:57 +00:00
|
|
|
|
|
|
|
return pci_dev->max_vfs;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
static inline
|
2013-06-03 00:00:00 +00:00
|
|
|
int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
|
|
|
|
{
|
2019-05-21 16:13:05 +00:00
|
|
|
unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN];
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2013-06-03 00:00:00 +00:00
|
|
|
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
|
|
|
|
uint16_t vfn;
|
|
|
|
|
|
|
|
for (vfn = 0; vfn < vf_num; vfn++) {
|
2019-05-21 16:13:04 +00:00
|
|
|
rte_eth_random_addr(vf_mac_addr);
|
2013-06-03 00:00:00 +00:00
|
|
|
/* keep the random address as default */
|
2014-06-03 23:42:50 +00:00
|
|
|
memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
|
2019-05-21 16:13:05 +00:00
|
|
|
RTE_ETHER_ADDR_LEN);
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
|
|
|
|
{
|
|
|
|
struct ixgbe_interrupt *intr =
|
|
|
|
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
|
|
|
|
|
|
|
|
intr->mask |= IXGBE_EICR_MAILBOX;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-20 10:02:47 +00:00
|
|
|
int ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info **vfinfo =
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
|
2013-09-18 10:00:00 +00:00
|
|
|
struct ixgbe_uta_info *uta_info =
|
2016-04-07 23:45:28 +00:00
|
|
|
IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_hw *hw =
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
|
|
|
|
uint16_t vf_num;
|
|
|
|
uint8_t nb_queue;
|
2020-10-20 10:02:47 +00:00
|
|
|
int ret = 0;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
|
2016-04-07 23:45:28 +00:00
|
|
|
vf_num = dev_num_vf(eth_dev);
|
|
|
|
if (vf_num == 0)
|
2020-10-20 10:02:47 +00:00
|
|
|
return ret;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
|
2020-10-20 10:02:48 +00:00
|
|
|
if (*vfinfo == NULL) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"Cannot allocate memory for private VF data");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2020-10-20 10:02:47 +00:00
|
|
|
ret = rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
|
|
|
|
if (ret) {
|
|
|
|
PMD_INIT_LOG(ERR,
|
|
|
|
"failed to allocate switch domain for device %d", ret);
|
|
|
|
rte_free(*vfinfo);
|
|
|
|
*vfinfo = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
2018-04-26 10:41:05 +00:00
|
|
|
|
2016-04-07 23:45:28 +00:00
|
|
|
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
|
2013-09-18 10:00:00 +00:00
|
|
|
hw->mac.mc_filter_type = 0;
|
|
|
|
|
2021-10-22 11:03:12 +00:00
|
|
|
if (vf_num >= RTE_ETH_32_POOLS) {
|
2013-06-03 00:00:00 +00:00
|
|
|
nb_queue = 2;
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_64_POOLS;
|
|
|
|
} else if (vf_num >= RTE_ETH_16_POOLS) {
|
2013-06-03 00:00:00 +00:00
|
|
|
nb_queue = 4;
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_32_POOLS;
|
2013-06-03 00:00:00 +00:00
|
|
|
} else {
|
|
|
|
nb_queue = 8;
|
2021-10-22 11:03:12 +00:00
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).active = RTE_ETH_16_POOLS;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
|
|
|
|
|
|
|
|
ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
|
|
|
|
|
|
|
|
/* init_mailbox_params */
|
|
|
|
hw->mbx.ops.init_params(hw);
|
|
|
|
|
|
|
|
/* set mb interrupt mask */
|
|
|
|
ixgbe_mb_intr_setup(eth_dev);
|
2020-10-20 10:02:47 +00:00
|
|
|
|
|
|
|
return ret;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 14:36:49 +00:00
|
|
|
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct ixgbe_vf_info **vfinfo;
|
|
|
|
uint16_t vf_num;
|
2018-04-26 10:41:05 +00:00
|
|
|
int ret;
|
2015-07-02 14:36:49 +00:00
|
|
|
|
|
|
|
PMD_INIT_FUNC_TRACE();
|
|
|
|
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
|
|
|
|
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = 0;
|
|
|
|
|
|
|
|
vf_num = dev_num_vf(eth_dev);
|
|
|
|
if (vf_num == 0)
|
|
|
|
return;
|
|
|
|
|
2018-07-16 12:47:04 +00:00
|
|
|
vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
|
|
|
|
if (*vfinfo == NULL)
|
|
|
|
return;
|
|
|
|
|
2018-05-31 09:53:07 +00:00
|
|
|
ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
|
|
|
|
if (ret)
|
|
|
|
PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
|
|
|
|
|
2015-07-02 14:36:49 +00:00
|
|
|
rte_free(*vfinfo);
|
|
|
|
*vfinfo = NULL;
|
|
|
|
}
|
|
|
|
|
2015-10-23 05:52:25 +00:00
|
|
|
static void
|
|
|
|
ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw =
|
|
|
|
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
|
|
|
|
struct ixgbe_filter_info *filter_info =
|
|
|
|
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
|
|
|
|
uint16_t vf_num;
|
|
|
|
int i;
|
2017-01-13 08:12:59 +00:00
|
|
|
struct ixgbe_ethertype_filter ethertype_filter;
|
2015-10-23 05:52:25 +00:00
|
|
|
|
|
|
|
if (!hw->mac.ops.set_ethertype_anti_spoofing) {
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n");
|
2015-10-23 05:52:25 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:12:59 +00:00
|
|
|
i = ixgbe_ethertype_filter_lookup(filter_info,
|
|
|
|
IXGBE_ETHERTYPE_FLOW_CTRL);
|
|
|
|
if (i >= 0) {
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n");
|
2017-01-13 08:12:59 +00:00
|
|
|
return;
|
2015-10-23 05:52:25 +00:00
|
|
|
}
|
2017-01-13 08:12:59 +00:00
|
|
|
|
|
|
|
ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
|
|
|
|
ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
|
|
|
|
IXGBE_ETQF_TX_ANTISPOOF |
|
|
|
|
IXGBE_ETHERTYPE_FLOW_CTRL;
|
|
|
|
ethertype_filter.etqs = 0;
|
2017-01-13 08:13:04 +00:00
|
|
|
ethertype_filter.conf = TRUE;
|
2017-01-13 08:12:59 +00:00
|
|
|
i = ixgbe_ethertype_filter_insert(filter_info,
|
|
|
|
ðertype_filter);
|
|
|
|
if (i < 0) {
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n");
|
2015-10-23 05:52:25 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
|
|
|
|
(IXGBE_ETQF_FILTER_EN |
|
|
|
|
IXGBE_ETQF_TX_ANTISPOOF |
|
|
|
|
IXGBE_ETHERTYPE_FLOW_CTRL));
|
|
|
|
|
|
|
|
vf_num = dev_num_vf(eth_dev);
|
|
|
|
for (i = 0; i < vf_num; i++)
|
|
|
|
hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
|
|
|
|
}
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
uint32_t vtctl, fcrth;
|
|
|
|
uint32_t vfre_slot, vfre_offset;
|
|
|
|
uint16_t vf_num;
|
|
|
|
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
|
|
|
|
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
|
|
|
|
uint32_t gpie, gcr_ext;
|
|
|
|
uint32_t vlanctrl;
|
|
|
|
int i;
|
|
|
|
|
2016-04-07 23:45:28 +00:00
|
|
|
vf_num = dev_num_vf(eth_dev);
|
|
|
|
if (vf_num == 0)
|
2013-06-03 00:00:00 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* enable VMDq and set the default pool for PF */
|
|
|
|
vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
|
|
|
|
vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
|
|
|
|
vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
|
2014-06-03 23:42:50 +00:00
|
|
|
vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
|
2013-06-03 00:00:00 +00:00
|
|
|
<< IXGBE_VT_CTL_POOL_SHIFT;
|
|
|
|
vtctl |= IXGBE_VT_CTL_REPLEN;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
vfre_offset = vf_num & VFRE_MASK;
|
2013-06-03 00:00:00 +00:00
|
|
|
vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* Enable pools reserved to PF only */
|
2016-03-22 21:37:17 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset);
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
|
2016-03-22 21:37:17 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset);
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
|
|
|
|
|
|
|
|
/* PFDMA Tx General Switch Control Enables VMDQ loopback */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
|
|
|
|
|
2021-11-29 16:08:02 +00:00
|
|
|
/* clear VMDq map to permanent rar 0 */
|
2013-06-03 00:00:00 +00:00
|
|
|
hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
|
|
|
|
|
|
|
|
/* clear VMDq map to scan rar 127 */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
|
|
|
|
|
|
|
|
/* set VMDq map to default PF pool */
|
|
|
|
hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
|
|
|
|
*/
|
|
|
|
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
|
|
|
|
gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
|
|
|
|
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
|
2017-11-20 03:37:45 +00:00
|
|
|
gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT;
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_64_POOLS:
|
2013-06-03 00:00:00 +00:00
|
|
|
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
|
|
|
|
gpie |= IXGBE_GPIE_VTMODE_64;
|
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_32_POOLS:
|
2013-06-03 00:00:00 +00:00
|
|
|
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
|
|
|
|
gpie |= IXGBE_GPIE_VTMODE_32;
|
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_16_POOLS:
|
2013-06-03 00:00:00 +00:00
|
|
|
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
|
|
|
|
gpie |= IXGBE_GPIE_VTMODE_16;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
|
2016-04-07 23:45:28 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2016-04-07 23:45:28 +00:00
|
|
|
/*
|
2014-06-03 23:42:50 +00:00
|
|
|
* enable vlan filtering and allow all vlan tags through
|
2013-06-03 00:00:00 +00:00
|
|
|
*/
|
2016-04-07 23:45:28 +00:00
|
|
|
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
|
|
|
|
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2016-04-07 23:45:28 +00:00
|
|
|
/* VFTA - enable all vlan filters */
|
|
|
|
for (i = 0; i < IXGBE_MAX_VFTA; i++)
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* Enable MAC Anti-Spoofing */
|
|
|
|
hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
/* set flow control threshold to max to avoid tx switch hang */
|
2013-06-03 00:00:00 +00:00
|
|
|
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
|
|
|
|
fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
|
|
|
|
}
|
|
|
|
|
2015-10-23 05:52:25 +00:00
|
|
|
ixgbe_add_tx_flow_control_drop_filter(eth_dev);
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
static void
|
2013-06-03 00:00:00 +00:00
|
|
|
set_rx_mode(struct rte_eth_dev *dev)
|
|
|
|
{
|
2016-03-24 07:07:45 +00:00
|
|
|
struct rte_eth_dev_data *dev_data = dev->data;
|
2013-06-03 00:00:00 +00:00
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
|
|
|
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
|
|
|
|
uint16_t vfn = dev_num_vf(dev);
|
|
|
|
|
|
|
|
/* Check for Promiscuous and All Multicast modes */
|
|
|
|
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
|
|
|
|
|
|
|
/* set all bits that we expect to always be set */
|
|
|
|
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
|
|
|
|
fctrl |= IXGBE_FCTRL_BAM;
|
|
|
|
|
|
|
|
/* clear the bits we are changing the status of */
|
|
|
|
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
|
|
|
|
|
|
|
|
if (dev_data->promiscuous) {
|
|
|
|
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
|
|
|
|
vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
|
|
|
|
} else {
|
|
|
|
if (dev_data->all_multicast) {
|
|
|
|
fctrl |= IXGBE_FCTRL_MPE;
|
|
|
|
vmolr |= IXGBE_VMOLR_MPE;
|
|
|
|
} else {
|
|
|
|
vmolr |= IXGBE_VMOLR_ROMPE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hw->mac.type != ixgbe_mac_82598EB) {
|
|
|
|
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
|
|
|
|
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
|
|
|
|
IXGBE_VMOLR_ROPE);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
|
|
|
|
}
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
|
|
|
|
|
2018-03-22 03:41:00 +00:00
|
|
|
ixgbe_vlan_hw_strip_config(dev);
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
static inline void
|
2013-06-03 00:00:00 +00:00
|
|
|
ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
|
|
|
|
{
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_hw *hw =
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2013-06-03 00:00:00 +00:00
|
|
|
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
|
|
|
|
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
|
|
|
|
uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
|
|
|
|
|
2019-01-07 07:22:56 +00:00
|
|
|
vmolr |= (IXGBE_VMOLR_ROPE |
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* reset multicast table array for vf */
|
|
|
|
vfinfo[vf].num_vf_mc_hashes = 0;
|
|
|
|
|
|
|
|
/* reset rx mode */
|
|
|
|
set_rx_mode(dev);
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
hw->mac.ops.clear_rar(hw, rar_entry);
|
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
static inline void
|
2013-06-03 00:00:00 +00:00
|
|
|
ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
|
|
|
uint32_t reg;
|
|
|
|
uint32_t reg_offset, vf_shift;
|
|
|
|
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
|
|
|
|
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
|
2017-02-22 02:59:35 +00:00
|
|
|
uint8_t nb_q_per_pool;
|
|
|
|
int i;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
vf_shift = vf & VFRE_MASK;
|
|
|
|
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
|
|
|
|
|
2017-02-22 02:59:35 +00:00
|
|
|
/* enable transmit for vf */
|
2013-06-03 00:00:00 +00:00
|
|
|
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
|
|
|
|
reg |= (reg | (1 << vf_shift));
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
|
|
|
|
|
2017-02-22 02:59:35 +00:00
|
|
|
/* enable all queue drop for IOV */
|
|
|
|
nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
|
|
|
|
for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
|
|
|
|
IXGBE_WRITE_FLUSH(hw);
|
|
|
|
reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
|
|
|
|
reg |= i << IXGBE_QDE_IDX_SHIFT;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* enable receive for vf */
|
2013-06-03 00:00:00 +00:00
|
|
|
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
|
|
|
|
reg |= (reg | (1 << vf_shift));
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
|
|
|
|
|
|
|
|
/* Enable counting of spoofed packets in the SSVPC register */
|
|
|
|
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
|
|
|
|
reg |= (1 << vf_shift);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
|
|
|
|
|
|
|
|
ixgbe_vf_reset_event(dev, vf);
|
|
|
|
}
|
|
|
|
|
2016-02-14 06:24:47 +00:00
|
|
|
static int
|
|
|
|
ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
|
|
|
uint32_t vmolr;
|
|
|
|
|
|
|
|
vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
|
|
|
|
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf);
|
2016-02-14 06:24:47 +00:00
|
|
|
|
|
|
|
vmolr &= ~IXGBE_VMOLR_MPE;
|
|
|
|
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
static int
|
|
|
|
ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2013-06-03 00:00:00 +00:00
|
|
|
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
|
|
|
|
unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
|
|
|
|
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
|
|
|
|
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
|
|
|
|
|
|
|
|
ixgbe_vf_reset_msg(dev, vf);
|
|
|
|
|
|
|
|
hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
|
|
|
|
|
2016-02-14 06:24:47 +00:00
|
|
|
/* Disable multicast promiscuous at reset */
|
|
|
|
ixgbe_disable_vf_mc_promisc(dev, vf);
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* reply to reset with ack and vf mac address */
|
|
|
|
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
|
2019-05-21 16:13:05 +00:00
|
|
|
rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN);
|
2013-06-03 00:00:00 +00:00
|
|
|
/*
|
|
|
|
* Piggyback the multicast filter type so VF can compute the
|
|
|
|
* correct vectors
|
|
|
|
*/
|
|
|
|
msgbuf[3] = hw->mac.mc_filter_type;
|
|
|
|
ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2013-06-03 00:00:00 +00:00
|
|
|
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
|
|
|
|
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
|
|
|
|
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
|
|
|
|
|
2019-05-21 16:13:04 +00:00
|
|
|
if (rte_is_valid_assigned_ether_addr(
|
|
|
|
(struct rte_ether_addr *)new_mac)) {
|
2013-06-03 00:00:00 +00:00
|
|
|
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
|
|
|
|
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-05-12 10:33:03 +00:00
|
|
|
ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2013-06-03 00:00:00 +00:00
|
|
|
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
|
2014-06-03 23:42:50 +00:00
|
|
|
int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_VT_MSGINFO_SHIFT;
|
|
|
|
uint16_t *hash_list = (uint16_t *)&msgbuf[1];
|
|
|
|
uint32_t mta_idx;
|
|
|
|
uint32_t mta_shift;
|
|
|
|
const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
|
|
|
|
const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
|
|
|
|
const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
|
|
|
|
uint32_t reg_val;
|
|
|
|
int i;
|
2019-01-07 07:22:56 +00:00
|
|
|
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
|
2014-06-03 23:42:50 +00:00
|
|
|
|
2016-02-14 06:24:47 +00:00
|
|
|
/* Disable multicast promiscuous first */
|
|
|
|
ixgbe_disable_vf_mc_promisc(dev, vf);
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* only so many hash values supported */
|
|
|
|
nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
|
|
|
|
|
|
|
|
/* store the mc entries */
|
|
|
|
vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
|
|
|
|
for (i = 0; i < nb_entries; i++) {
|
|
|
|
vfinfo->vf_mc_hashes[i] = hash_list[i];
|
|
|
|
}
|
|
|
|
|
2019-01-07 07:22:56 +00:00
|
|
|
if (nb_entries == 0) {
|
|
|
|
vmolr &= ~IXGBE_VMOLR_ROMPE;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
|
2014-06-03 23:42:50 +00:00
|
|
|
mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
|
2013-06-03 00:00:00 +00:00
|
|
|
& IXGBE_MTA_INDEX_MASK;
|
|
|
|
mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
|
|
|
|
reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
|
|
|
|
reg_val |= (1 << mta_shift);
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
|
|
|
|
}
|
|
|
|
|
2019-01-07 07:22:56 +00:00
|
|
|
vmolr |= IXGBE_VMOLR_ROMPE;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
|
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
|
|
|
{
|
|
|
|
int add, vid;
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2013-06-03 00:00:00 +00:00
|
|
|
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
|
|
|
|
|
|
|
|
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
|
|
|
|
>> IXGBE_VT_MSGINFO_SHIFT;
|
|
|
|
vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
|
|
|
|
|
|
|
|
if (add)
|
|
|
|
vfinfo[vf].vlan_count++;
|
|
|
|
else if (vfinfo[vf].vlan_count)
|
|
|
|
vfinfo[vf].vlan_count--;
|
2016-06-23 07:22:30 +00:00
|
|
|
return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
static int
|
2021-01-19 05:25:51 +00:00
|
|
|
ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
2013-06-03 00:00:00 +00:00
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2021-01-19 05:25:51 +00:00
|
|
|
uint32_t max_frame = msgbuf[1];
|
2013-06-03 00:00:00 +00:00
|
|
|
uint32_t max_frs;
|
2019-12-02 20:03:27 +00:00
|
|
|
uint32_t hlreg0;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-09-29 07:16:26 +00:00
|
|
|
/* X540 and X550 support jumbo frames in IOV mode */
|
|
|
|
if (hw->mac.type != ixgbe_mac_X540 &&
|
|
|
|
hw->mac.type != ixgbe_mac_X550 &&
|
2016-02-14 08:55:06 +00:00
|
|
|
hw->mac.type != ixgbe_mac_X550EM_x &&
|
2021-01-19 05:25:51 +00:00
|
|
|
hw->mac.type != ixgbe_mac_X550EM_a) {
|
|
|
|
struct ixgbe_vf_info *vfinfo =
|
|
|
|
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
|
|
|
|
|
|
|
|
switch (vfinfo[vf].api_version) {
|
|
|
|
case ixgbe_mbox_api_11:
|
|
|
|
case ixgbe_mbox_api_12:
|
|
|
|
case ixgbe_mbox_api_13:
|
|
|
|
/**
|
|
|
|
* Version 1.1&1.2&1.3 supports jumbo frames on VFs
|
|
|
|
* if PF has jumbo frames enabled which means legacy
|
|
|
|
* VFs are disabled.
|
|
|
|
*/
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
if (dev->data->mtu > RTE_ETHER_MTU)
|
2021-01-19 05:25:51 +00:00
|
|
|
break;
|
|
|
|
/* fall through */
|
|
|
|
default:
|
|
|
|
/**
|
|
|
|
* If the PF or VF are running w/ jumbo frames enabled,
|
|
|
|
* we return -1 as we cannot support jumbo frames on
|
|
|
|
* legacy VFs.
|
|
|
|
*/
|
|
|
|
if (max_frame > IXGBE_ETH_MAX_LEN ||
|
ethdev: fix max Rx packet length
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.
'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.
Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.
These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.
Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
Ethernet frame overhead, and this overhead may be different from
device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
which adds additional confusion and some APIs and PMDs already
discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
field, this adds configuration complexity for application.
As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.
For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.
When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.
Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
2021-10-18 13:48:48 +00:00
|
|
|
dev->data->mtu > RTE_ETHER_MTU)
|
2021-01-19 05:25:51 +00:00
|
|
|
return -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2019-05-21 16:13:05 +00:00
|
|
|
if (max_frame < RTE_ETHER_MIN_LEN ||
|
|
|
|
max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
|
2013-06-03 00:00:00 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
|
|
|
|
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
|
2021-01-19 05:25:51 +00:00
|
|
|
if (max_frs < max_frame) {
|
2019-12-02 20:03:27 +00:00
|
|
|
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
|
2021-10-18 13:48:51 +00:00
|
|
|
if (max_frame > IXGBE_ETH_MAX_LEN)
|
2019-12-02 20:03:27 +00:00
|
|
|
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
|
2021-10-18 13:48:51 +00:00
|
|
|
else
|
2019-12-02 20:03:27 +00:00
|
|
|
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
|
|
|
|
|
2021-01-19 05:25:51 +00:00
|
|
|
max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-12 05:59:08 +00:00
|
|
|
static int
|
|
|
|
ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
|
|
|
{
|
|
|
|
uint32_t api_version = msgbuf[1];
|
|
|
|
struct ixgbe_vf_info *vfinfo =
|
|
|
|
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
|
|
|
|
|
|
|
|
switch (api_version) {
|
|
|
|
case ixgbe_mbox_api_10:
|
|
|
|
case ixgbe_mbox_api_11:
|
2016-02-14 06:24:47 +00:00
|
|
|
case ixgbe_mbox_api_12:
|
2019-03-08 02:46:17 +00:00
|
|
|
case ixgbe_mbox_api_13:
|
2015-01-12 05:59:08 +00:00
|
|
|
vfinfo[vf].api_version = (uint8_t)api_version;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n",
|
2015-01-12 05:59:08 +00:00
|
|
|
api_version, vf);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2015-01-12 05:59:09 +00:00
|
|
|
static int
|
|
|
|
ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
|
|
|
{
|
|
|
|
struct ixgbe_vf_info *vfinfo =
|
|
|
|
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
|
|
|
|
uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
|
2017-08-21 06:21:09 +00:00
|
|
|
struct rte_eth_conf *eth_conf;
|
|
|
|
struct rte_eth_vmdq_dcb_tx_conf *vmdq_dcb_tx_conf;
|
|
|
|
u8 num_tcs;
|
|
|
|
struct ixgbe_hw *hw;
|
|
|
|
u32 vmvir;
|
|
|
|
#define IXGBE_VMVIR_VLANA_MASK 0xC0000000
|
|
|
|
#define IXGBE_VMVIR_VLAN_VID_MASK 0x00000FFF
|
|
|
|
#define IXGBE_VMVIR_VLAN_UP_MASK 0x0000E000
|
|
|
|
#define VLAN_PRIO_SHIFT 13
|
|
|
|
u32 vlana;
|
|
|
|
u32 vid;
|
|
|
|
u32 user_priority;
|
2015-01-12 05:59:09 +00:00
|
|
|
|
|
|
|
/* Verify if the PF supports the mbox APIs version or not */
|
|
|
|
switch (vfinfo[vf].api_version) {
|
|
|
|
case ixgbe_mbox_api_20:
|
|
|
|
case ixgbe_mbox_api_11:
|
2016-02-14 06:24:47 +00:00
|
|
|
case ixgbe_mbox_api_12:
|
2019-08-28 08:16:52 +00:00
|
|
|
case ixgbe_mbox_api_13:
|
2015-01-12 05:59:09 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Notify VF of Rx and Tx queue number */
|
|
|
|
msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
|
|
|
|
msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
|
|
|
|
|
|
|
|
/* Notify VF of default queue */
|
|
|
|
msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
|
|
|
|
|
2017-08-21 06:21:09 +00:00
|
|
|
/* Notify VF of number of DCB traffic classes */
|
|
|
|
eth_conf = &dev->data->dev_conf;
|
|
|
|
switch (eth_conf->txmode.mq_mode) {
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_MQ_TX_NONE:
|
|
|
|
case RTE_ETH_MQ_TX_DCB:
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u"
|
2017-08-21 06:21:09 +00:00
|
|
|
", but its tx mode = %d\n", vf,
|
|
|
|
eth_conf->txmode.mq_mode);
|
|
|
|
return -1;
|
|
|
|
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_MQ_TX_VMDQ_DCB:
|
2017-08-21 06:21:09 +00:00
|
|
|
vmdq_dcb_tx_conf = ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
|
|
|
|
switch (vmdq_dcb_tx_conf->nb_queue_pools) {
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_16_POOLS:
|
|
|
|
num_tcs = RTE_ETH_8_TCS;
|
2017-08-21 06:21:09 +00:00
|
|
|
break;
|
2021-10-22 11:03:12 +00:00
|
|
|
case RTE_ETH_32_POOLS:
|
|
|
|
num_tcs = RTE_ETH_4_TCS;
|
2017-08-21 06:21:09 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2021-10-22 11:03:12 +00:00
|
|
|
/* RTE_ETH_MQ_TX_VMDQ_ONLY, DCB not enabled */
|
|
|
|
case RTE_ETH_MQ_TX_VMDQ_ONLY:
|
2017-08-21 06:21:09 +00:00
|
|
|
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
|
|
|
vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
|
|
|
|
vlana = vmvir & IXGBE_VMVIR_VLANA_MASK;
|
|
|
|
vid = vmvir & IXGBE_VMVIR_VLAN_VID_MASK;
|
|
|
|
user_priority =
|
|
|
|
(vmvir & IXGBE_VMVIR_VLAN_UP_MASK) >> VLAN_PRIO_SHIFT;
|
|
|
|
if ((vlana == IXGBE_VMVIR_VLANA_DEFAULT) &&
|
|
|
|
((vid != 0) || (user_priority != 0)))
|
|
|
|
num_tcs = 1;
|
|
|
|
else
|
|
|
|
num_tcs = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n",
|
2017-08-21 06:21:09 +00:00
|
|
|
eth_conf->txmode.mq_mode);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
|
2015-01-12 05:59:09 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-14 06:24:47 +00:00
|
|
|
static int
|
|
|
|
ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
|
|
|
{
|
|
|
|
struct ixgbe_vf_info *vfinfo =
|
|
|
|
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
|
2019-03-08 02:46:17 +00:00
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
|
|
|
int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */
|
|
|
|
u32 vmolr, fctrl, disable, enable;
|
2016-02-14 06:24:47 +00:00
|
|
|
|
|
|
|
switch (vfinfo[vf].api_version) {
|
|
|
|
case ixgbe_mbox_api_12:
|
2019-03-08 02:46:17 +00:00
|
|
|
/* promisc introduced in 1.3 version */
|
|
|
|
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
/* Fall threw */
|
|
|
|
case ixgbe_mbox_api_13:
|
2016-02-14 06:24:47 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-03-08 02:46:17 +00:00
|
|
|
if (vfinfo[vf].xcast_mode == xcast_mode)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
switch (xcast_mode) {
|
|
|
|
case IXGBEVF_XCAST_MODE_NONE:
|
2022-09-29 12:09:00 +00:00
|
|
|
disable = IXGBE_VMOLR_ROMPE |
|
2019-03-08 02:46:17 +00:00
|
|
|
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
2022-09-29 12:09:00 +00:00
|
|
|
enable = IXGBE_VMOLR_BAM;
|
2019-03-08 02:46:17 +00:00
|
|
|
break;
|
|
|
|
case IXGBEVF_XCAST_MODE_MULTI:
|
|
|
|
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
|
|
|
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
|
|
|
|
break;
|
|
|
|
case IXGBEVF_XCAST_MODE_ALLMULTI:
|
|
|
|
disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
|
|
|
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
|
|
|
|
break;
|
|
|
|
case IXGBEVF_XCAST_MODE_PROMISC:
|
|
|
|
if (hw->mac.type <= ixgbe_mac_82599EB)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
|
|
|
if (!(fctrl & IXGBE_FCTRL_UPE)) {
|
|
|
|
/* VF promisc requires PF in promisc */
|
2019-07-16 15:40:10 +00:00
|
|
|
PMD_DRV_LOG(ERR,
|
2019-03-08 02:46:17 +00:00
|
|
|
"Enabling VF promisc requires PF in promisc\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-09-29 12:09:01 +00:00
|
|
|
disable = IXGBE_VMOLR_VPE;
|
2019-03-08 02:46:17 +00:00
|
|
|
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
|
2022-09-29 12:09:01 +00:00
|
|
|
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
|
2019-03-08 02:46:17 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
|
|
|
|
vmolr &= ~disable;
|
|
|
|
vmolr |= enable;
|
|
|
|
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
|
|
|
|
vfinfo[vf].xcast_mode = xcast_mode;
|
|
|
|
|
|
|
|
out:
|
|
|
|
msgbuf[1] = xcast_mode;
|
|
|
|
|
|
|
|
return 0;
|
2016-02-14 06:24:47 +00:00
|
|
|
}
|
|
|
|
|
2019-12-24 03:23:57 +00:00
|
|
|
static int
|
|
|
|
ixgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
|
|
|
|
{
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
|
|
|
struct ixgbe_vf_info *vf_info =
|
|
|
|
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
|
|
|
|
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
|
|
|
|
int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
|
|
|
|
IXGBE_VT_MSGINFO_SHIFT;
|
|
|
|
|
|
|
|
if (index) {
|
|
|
|
if (!rte_is_valid_assigned_ether_addr(
|
|
|
|
(struct rte_ether_addr *)new_mac)) {
|
|
|
|
PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
vf_info[vf].mac_count++;
|
|
|
|
|
|
|
|
hw->mac.ops.set_rar(hw, vf_info[vf].mac_count,
|
|
|
|
new_mac, vf, IXGBE_RAH_AV);
|
|
|
|
} else {
|
2020-03-11 09:06:51 +00:00
|
|
|
if (vf_info[vf].mac_count) {
|
|
|
|
hw->mac.ops.clear_rar(hw, vf_info[vf].mac_count);
|
|
|
|
vf_info[vf].mac_count = 0;
|
|
|
|
}
|
2019-12-24 03:23:57 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
static int
|
2013-06-03 00:00:00 +00:00
|
|
|
ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
|
|
|
|
{
|
|
|
|
uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
|
2015-01-12 05:59:09 +00:00
|
|
|
uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
|
2013-06-03 00:00:00 +00:00
|
|
|
uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
|
|
|
|
int32_t retval;
|
|
|
|
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2014-02-17 18:57:55 +00:00
|
|
|
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
|
2017-06-15 12:29:50 +00:00
|
|
|
struct rte_pmd_ixgbe_mb_event_param ret_param;
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
|
|
|
|
if (retval) {
|
2014-09-17 13:46:33 +00:00
|
|
|
PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
|
2013-06-03 00:00:00 +00:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do nothing with the message already been processed */
|
|
|
|
if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
/* flush the ack before we write any messages back */
|
|
|
|
IXGBE_WRITE_FLUSH(hw);
|
|
|
|
|
2016-10-10 14:34:15 +00:00
|
|
|
/**
|
|
|
|
* initialise structure to send to user application
|
|
|
|
* will return response from user in retval field
|
|
|
|
*/
|
2017-06-15 12:29:50 +00:00
|
|
|
ret_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
|
|
|
|
ret_param.vfid = vf;
|
|
|
|
ret_param.msg_type = msgbuf[0] & 0xFFFF;
|
|
|
|
ret_param.msg = (void *)msgbuf;
|
2016-10-10 14:34:15 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* perform VF reset */
|
|
|
|
if (msgbuf[0] == IXGBE_VF_RESET) {
|
2014-02-17 18:57:55 +00:00
|
|
|
int ret = ixgbe_vf_reset(dev, vf, msgbuf);
|
2016-04-07 23:45:28 +00:00
|
|
|
|
2014-02-17 18:57:55 +00:00
|
|
|
vfinfo[vf].clear_to_send = true;
|
2016-10-10 14:34:15 +00:00
|
|
|
|
|
|
|
/* notify application about VF reset */
|
2020-09-09 13:01:48 +00:00
|
|
|
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX,
|
2018-01-04 16:01:08 +00:00
|
|
|
&ret_param);
|
2014-02-17 18:57:55 +00:00
|
|
|
return ret;
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
2016-10-10 14:34:15 +00:00
|
|
|
/**
|
|
|
|
* ask user application if we allowed to perform those functions
|
2017-06-15 12:29:50 +00:00
|
|
|
* if we get ret_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
|
2016-10-10 14:34:15 +00:00
|
|
|
* then business as usual,
|
|
|
|
* if 0, do nothing and send ACK to VF
|
2017-06-15 12:29:50 +00:00
|
|
|
* if ret_param.retval > 1, do nothing and send NAK to VF
|
2016-10-10 14:34:15 +00:00
|
|
|
*/
|
2020-09-09 13:01:48 +00:00
|
|
|
rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param);
|
2016-10-10 14:34:15 +00:00
|
|
|
|
2017-06-15 12:29:50 +00:00
|
|
|
retval = ret_param.retval;
|
2016-10-10 14:34:15 +00:00
|
|
|
|
2013-06-03 00:00:00 +00:00
|
|
|
/* check & process VF to PF mailbox message */
|
|
|
|
switch ((msgbuf[0] & 0xFFFF)) {
|
|
|
|
case IXGBE_VF_SET_MAC_ADDR:
|
2016-10-10 14:34:15 +00:00
|
|
|
if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
|
|
|
|
retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
|
2013-06-03 00:00:00 +00:00
|
|
|
break;
|
|
|
|
case IXGBE_VF_SET_MULTICAST:
|
2016-10-10 14:34:15 +00:00
|
|
|
if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
|
|
|
|
retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
|
2013-06-03 00:00:00 +00:00
|
|
|
break;
|
|
|
|
case IXGBE_VF_SET_LPE:
|
2016-10-10 14:34:15 +00:00
|
|
|
if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
|
|
|
|
retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
|
2013-06-03 00:00:00 +00:00
|
|
|
break;
|
|
|
|
case IXGBE_VF_SET_VLAN:
|
2016-10-10 14:34:15 +00:00
|
|
|
if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
|
|
|
|
retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
|
2013-06-03 00:00:00 +00:00
|
|
|
break;
|
2015-01-12 05:59:08 +00:00
|
|
|
case IXGBE_VF_API_NEGOTIATE:
|
|
|
|
retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
|
|
|
|
break;
|
2015-01-12 05:59:09 +00:00
|
|
|
case IXGBE_VF_GET_QUEUES:
|
|
|
|
retval = ixgbe_get_vf_queues(dev, vf, msgbuf);
|
|
|
|
msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
|
|
|
|
break;
|
2016-02-14 06:24:47 +00:00
|
|
|
case IXGBE_VF_UPDATE_XCAST_MODE:
|
2016-10-10 14:34:15 +00:00
|
|
|
if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
|
|
|
|
retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
|
2016-02-14 06:24:47 +00:00
|
|
|
break;
|
2019-12-24 03:23:57 +00:00
|
|
|
case IXGBE_VF_SET_MACVLAN:
|
|
|
|
if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
|
|
|
|
retval = ixgbe_set_vf_macvlan_msg(dev, vf, msgbuf);
|
|
|
|
break;
|
2013-06-03 00:00:00 +00:00
|
|
|
default:
|
2014-09-17 13:46:33 +00:00
|
|
|
PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
|
2013-06-03 00:00:00 +00:00
|
|
|
retval = IXGBE_ERR_MBX;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* response the VF according to the message process result */
|
|
|
|
if (retval)
|
|
|
|
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
|
|
|
|
else
|
|
|
|
msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
|
|
|
|
|
|
|
|
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
|
|
|
|
|
2015-01-12 05:59:09 +00:00
|
|
|
ixgbe_write_mbx(hw, msgbuf, msg_size, vf);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2014-06-03 23:42:50 +00:00
|
|
|
static inline void
|
2013-06-03 00:00:00 +00:00
|
|
|
ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
|
|
|
|
{
|
|
|
|
uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_hw *hw =
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_vf_info *vfinfo =
|
2014-02-17 18:57:55 +00:00
|
|
|
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
|
2013-06-03 00:00:00 +00:00
|
|
|
|
2014-02-17 18:57:55 +00:00
|
|
|
if (!vfinfo[vf].clear_to_send)
|
|
|
|
ixgbe_write_mbx(hw, &msg, 1, vf);
|
2013-06-03 00:00:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
|
|
|
|
{
|
|
|
|
uint16_t vf;
|
2014-06-03 23:42:50 +00:00
|
|
|
struct ixgbe_hw *hw =
|
2013-06-03 00:00:00 +00:00
|
|
|
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
|
|
|
|
|
|
|
|
for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
|
|
|
|
/* check & process vf function level reset */
|
|
|
|
if (!ixgbe_check_for_rst(hw, vf))
|
|
|
|
ixgbe_vf_reset_event(eth_dev, vf);
|
|
|
|
|
|
|
|
/* check & process vf mailbox messages */
|
|
|
|
if (!ixgbe_check_for_msg(hw, vf))
|
|
|
|
ixgbe_rcv_msg_from_vf(eth_dev, vf);
|
|
|
|
|
|
|
|
/* check & process acks from vf */
|
|
|
|
if (!ixgbe_check_for_ack(hw, vf))
|
|
|
|
ixgbe_rcv_ack_from_vf(eth_dev, vf);
|
|
|
|
}
|
|
|
|
}
|