net/bnxt: add port representor infrastructure

Defines data structures and code to init/uninit
VF representors during pci_probe and pci_remove
respectively.
Most of the dev_ops for the VF representor are just
stubs for now and will be will be filled out in next patch.

To create a representor using testpmd:
testpmd -c 0xff -wB:D.F,representor=1 -- -i
testpmd -c 0xff -w05:02.0,representor=[1] -- -i

To create a representor using ovs-dpdk:
1. First add the trusted VF port to a bridge
ovs-vsctl add-port ovsbr0 vf_rep1 -- set Interface vf_rep1 type=dpdk
options:dpdk-devargs=0000:06:02.0
2. Add the representor port to the bridge
ovs-vsctl add-port ovsbr0 vf_rep1 -- set Interface vf_rep1 type=dpdk
options:dpdk-devargs=0000:06:02.0,representor=1

Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Somnath Kotur 2020-07-02 16:27:48 -07:00 committed by Ferruh Yigit
parent c052554fcd
commit 322bd6e702
6 changed files with 559 additions and 48 deletions

View File

@ -14,6 +14,7 @@ LIB = librte_pmd_bnxt.a
EXPORT_MAP := rte_pmd_bnxt_version.map
CFLAGS += -O3
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
@ -38,6 +39,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_util.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_reps.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c
ifeq ($(CONFIG_RTE_ARCH_X86), y)
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxtx_vec_sse.c

View File

@ -220,6 +220,7 @@ struct bnxt_child_vf_info {
struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1
#define BNXT_MAX_VFS(bp) ((bp)->pf->max_vfs)
#define BNXT_MAX_VF_REPS 64
#define BNXT_TOTAL_VFS(bp) ((bp)->pf->total_vfs)
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
@ -492,6 +493,10 @@ struct bnxt_mark_info {
bool valid;
};
struct bnxt_rep_info {
struct rte_eth_dev *vfr_eth_dev;
};
/* address space location of register */
#define BNXT_FW_STATUS_REG_TYPE_MASK 3
/* register is located in PCIe config space */
@ -515,6 +520,40 @@ struct bnxt_mark_info {
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
#define BNXT_ETH_RSS_SUPPORT ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
ETH_RSS_NONFRAG_IPV4_UDP | \
ETH_RSS_IPV6 | \
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
DEV_TX_OFFLOAD_IPV4_CKSUM | \
DEV_TX_OFFLOAD_TCP_CKSUM | \
DEV_TX_OFFLOAD_UDP_CKSUM | \
DEV_TX_OFFLOAD_TCP_TSO | \
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
DEV_TX_OFFLOAD_GRE_TNL_TSO | \
DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
DEV_TX_OFFLOAD_QINQ_INSERT | \
DEV_TX_OFFLOAD_MULTI_SEGS)
#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_VLAN_EXTEND | \
DEV_RX_OFFLOAD_TCP_LRO | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt_flow_stat_info {
@ -682,6 +721,9 @@ struct bnxt {
#define BNXT_MAX_RINGS(bp) \
(RTE_MIN((((bp)->max_cp_rings - BNXT_NUM_ASYNC_CPR(bp)) / 2U), \
BNXT_MAX_TX_RINGS(bp)))
#define BNXT_MAX_VF_REP_RINGS 8
uint16_t max_nq_rings;
uint16_t max_l2_ctx;
uint16_t max_rx_em_flows;
@ -711,7 +753,9 @@ struct bnxt {
uint16_t fw_reset_min_msecs;
uint16_t fw_reset_max_msecs;
uint16_t switch_domain_id;
uint16_t num_reps;
struct bnxt_rep_info rep_info[BNXT_MAX_VF_REPS];
/* Struct to hold adapter error recovery related info */
struct bnxt_error_recovery_info *recovery_info;
#define BNXT_MARK_TABLE_SZ (sizeof(struct bnxt_mark_info) * 64 * 1024)
@ -732,6 +776,18 @@ struct bnxt {
#define BNXT_FC_TIMER 1 /* Timer freq in Sec Flow Counters */
/**
* Structure to store private data for each VF representor instance
*/
struct bnxt_vf_representor {
uint16_t switch_domain_id;
uint16_t vf_id;
/* Private data store of associated PF/Trusted VF */
struct bnxt *parent_priv;
uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
uint8_t dflt_mac_addr[RTE_ETHER_ADDR_LEN];
};
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
bool exp_link_status);
@ -744,7 +800,13 @@ void bnxt_schedule_fw_health_check(struct bnxt *bp);
bool is_bnxt_supported(struct rte_eth_dev *dev);
bool bnxt_stratus_device(struct bnxt *bp);
void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp);
int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
int wait_to_complete);
extern const struct rte_flow_ops bnxt_flow_ops;
#define bnxt_acquire_flow_lock(bp) \
pthread_mutex_lock(&(bp)->flow_lock)

View File

@ -18,6 +18,7 @@
#include "bnxt_filter.h"
#include "bnxt_hwrm.h"
#include "bnxt_irq.h"
#include "bnxt_reps.h"
#include "bnxt_ring.h"
#include "bnxt_rxq.h"
#include "bnxt_rxr.h"
@ -92,40 +93,6 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
#define BNXT_ETH_RSS_SUPPORT ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
ETH_RSS_NONFRAG_IPV4_UDP | \
ETH_RSS_IPV6 | \
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
DEV_TX_OFFLOAD_IPV4_CKSUM | \
DEV_TX_OFFLOAD_TCP_CKSUM | \
DEV_TX_OFFLOAD_UDP_CKSUM | \
DEV_TX_OFFLOAD_TCP_TSO | \
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
DEV_TX_OFFLOAD_GRE_TNL_TSO | \
DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
DEV_TX_OFFLOAD_QINQ_INSERT | \
DEV_TX_OFFLOAD_MULTI_SEGS)
#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_VLAN_EXTEND | \
DEV_RX_OFFLOAD_TCP_LRO | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
#define BNXT_DEVARG_TRUFLOW "host-based-truflow"
#define BNXT_DEVARG_FLOW_XSTAT "flow-xstat"
#define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows"
@ -162,7 +129,6 @@ static int bnxt_devarg_max_num_kflow_invalid(uint16_t max_num_kflows)
}
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
static int bnxt_init_resources(struct bnxt *bp, bool reconfig_dev);
static int bnxt_uninit_resources(struct bnxt *bp, bool reconfig_dev);
@ -197,7 +163,7 @@ static uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
BNXT_RSS_ENTRIES_PER_CTX_THOR;
}
static uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
{
if (!BNXT_CHIP_THOR(bp))
return HW_HASH_INDEX_SIZE;
@ -1046,7 +1012,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
return -ENOSPC;
}
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
{
struct rte_eth_link *link = &eth_dev->data->dev_link;
@ -1272,6 +1238,12 @@ static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev)
return 0;
}
static void bnxt_free_switch_domain(struct bnxt *bp)
{
if (bp->switch_domain_id)
rte_eth_switch_domain_free(bp->switch_domain_id);
}
/* Unload the driver, release resources */
static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
@ -1340,6 +1312,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
if (eth_dev->data->dev_started)
bnxt_dev_stop_op(eth_dev);
bnxt_free_switch_domain(bp);
bnxt_uninit_resources(bp, false);
bnxt_free_leds_info(bp);
@ -1521,8 +1495,8 @@ int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
return rc;
}
static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
int wait_to_complete)
int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
int wait_to_complete)
{
return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
}
@ -5476,8 +5450,26 @@ bnxt_parse_dev_args(struct bnxt *bp, struct rte_devargs *devargs)
rte_kvargs_free(kvlist);
}
static int bnxt_alloc_switch_domain(struct bnxt *bp)
{
int rc = 0;
if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) {
rc = rte_eth_switch_domain_alloc(&bp->switch_domain_id);
if (rc)
PMD_DRV_LOG(ERR,
"Failed to alloc switch domain: %d\n", rc);
else
PMD_DRV_LOG(INFO,
"Switch domain allocated %d\n",
bp->switch_domain_id);
}
return rc;
}
static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
static int version_printed;
@ -5556,6 +5548,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (rc)
goto error_free;
bnxt_alloc_switch_domain(bp);
/* Pass the information to the rte_eth_dev_close() that it should also
* release the private port resources.
*/
@ -5688,25 +5682,162 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
static int bnxt_pci_remove_dev_with_reps(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_dev *vf_rep_eth_dev;
int ret = 0, i;
if (!bp)
return -EINVAL;
for (i = 0; i < bp->num_reps; i++) {
vf_rep_eth_dev = bp->rep_info[i].vfr_eth_dev;
if (!vf_rep_eth_dev)
continue;
rte_eth_dev_destroy(vf_rep_eth_dev, bnxt_vf_representor_uninit);
}
ret = rte_eth_dev_destroy(eth_dev, bnxt_dev_uninit);
return ret;
}
static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
bnxt_dev_init);
char name[RTE_ETH_NAME_MAX_LEN];
struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
struct rte_eth_dev *backing_eth_dev, *vf_rep_eth_dev;
uint16_t num_rep;
int i, ret = 0;
struct bnxt *backing_bp;
if (pci_dev->device.devargs) {
ret = rte_eth_devargs_parse(pci_dev->device.devargs->args,
&eth_da);
if (ret)
return ret;
}
num_rep = eth_da.nb_representor_ports;
PMD_DRV_LOG(DEBUG, "nb_representor_ports = %d\n",
num_rep);
/* We could come here after first level of probe is already invoked
* as part of an application bringup(OVS-DPDK vswitchd), so first check
* for already allocated eth_dev for the backing device (PF/Trusted VF)
*/
backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
if (backing_eth_dev == NULL) {
ret = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
sizeof(struct bnxt),
eth_dev_pci_specific_init, pci_dev,
bnxt_dev_init, NULL);
if (ret || !num_rep)
return ret;
}
if (num_rep > BNXT_MAX_VF_REPS) {
PMD_DRV_LOG(ERR, "nb_representor_ports = %d > %d MAX VF REPS\n",
eth_da.nb_representor_ports, BNXT_MAX_VF_REPS);
ret = -EINVAL;
return ret;
}
/* probe representor ports now */
if (!backing_eth_dev)
backing_eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
if (backing_eth_dev == NULL) {
ret = -ENODEV;
return ret;
}
backing_bp = backing_eth_dev->data->dev_private;
if (!(BNXT_PF(backing_bp) || BNXT_VF_IS_TRUSTED(backing_bp))) {
PMD_DRV_LOG(ERR,
"Not a PF or trusted VF. No Representor support\n");
/* Returning an error is not an option.
* Applications are not handling this correctly
*/
return ret;
}
for (i = 0; i < eth_da.nb_representor_ports; i++) {
struct bnxt_vf_representor representor = {
.vf_id = eth_da.representor_ports[i],
.switch_domain_id = backing_bp->switch_domain_id,
.parent_priv = backing_bp
};
if (representor.vf_id >= BNXT_MAX_VF_REPS) {
PMD_DRV_LOG(ERR, "VF-Rep id %d >= %d MAX VF ID\n",
representor.vf_id, BNXT_MAX_VF_REPS);
continue;
}
/* representor port net_bdf_port */
snprintf(name, sizeof(name), "net_%s_representor_%d",
pci_dev->device.name, eth_da.representor_ports[i]);
ret = rte_eth_dev_create(&pci_dev->device, name,
sizeof(struct bnxt_vf_representor),
NULL, NULL,
bnxt_vf_representor_init,
&representor);
if (!ret) {
vf_rep_eth_dev = rte_eth_dev_allocated(name);
if (!vf_rep_eth_dev) {
PMD_DRV_LOG(ERR, "Failed to find the eth_dev"
" for VF-Rep: %s.", name);
bnxt_pci_remove_dev_with_reps(backing_eth_dev);
ret = -ENODEV;
return ret;
}
backing_bp->rep_info[representor.vf_id].vfr_eth_dev =
vf_rep_eth_dev;
backing_bp->num_reps++;
} else {
PMD_DRV_LOG(ERR, "failed to create bnxt vf "
"representor %s.", name);
bnxt_pci_remove_dev_with_reps(backing_eth_dev);
}
}
return ret;
}
static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
{
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
return rte_eth_dev_pci_generic_remove(pci_dev,
bnxt_dev_uninit);
else
struct rte_eth_dev *eth_dev;
eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
if (!eth_dev)
return 0; /* Invoked typically only by OVS-DPDK, by the
* time it comes here the eth_dev is already
* deleted by rte_eth_dev_close(), so returning
* +ve value will at least help in proper cleanup
*/
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
if (eth_dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
return rte_eth_dev_destroy(eth_dev,
bnxt_vf_representor_uninit);
else
return rte_eth_dev_destroy(eth_dev,
bnxt_dev_uninit);
} else {
return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
}
}
static struct rte_pci_driver bnxt_rte_pmd = {
.id_table = bnxt_pci_id_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_PROBE_AGAIN, /* Needed in case of VF-REPs
* and OVS-DPDK
*/
.probe = bnxt_pci_probe,
.remove = bnxt_pci_remove,
};

View File

@ -0,0 +1,280 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2014-2020 Broadcom
* All rights reserved.
*/
#include "bnxt.h"
#include "bnxt_ring.h"
#include "bnxt_reps.h"
#include "hsi_struct_def_dpdk.h"
static const struct eth_dev_ops bnxt_vf_rep_dev_ops = {
.dev_infos_get = bnxt_vf_rep_dev_info_get_op,
.dev_configure = bnxt_vf_rep_dev_configure_op,
.dev_start = bnxt_vf_rep_dev_start_op,
.rx_queue_setup = bnxt_vf_rep_rx_queue_setup_op,
.tx_queue_setup = bnxt_vf_rep_tx_queue_setup_op,
.link_update = bnxt_vf_rep_link_update_op,
.dev_close = bnxt_vf_rep_dev_close_op,
.dev_stop = bnxt_vf_rep_dev_stop_op
};
static uint16_t
bnxt_vf_rep_rx_burst(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}
static uint16_t
bnxt_vf_rep_tx_burst(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
{
return 0;
}
int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params)
{
struct bnxt_vf_representor *vf_rep_bp = eth_dev->data->dev_private;
struct bnxt_vf_representor *rep_params =
(struct bnxt_vf_representor *)params;
struct rte_eth_link *link;
struct bnxt *parent_bp;
vf_rep_bp->vf_id = rep_params->vf_id;
vf_rep_bp->switch_domain_id = rep_params->switch_domain_id;
vf_rep_bp->parent_priv = rep_params->parent_priv;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = rep_params->vf_id;
rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);
memcpy(vf_rep_bp->mac_addr, vf_rep_bp->dflt_mac_addr,
sizeof(vf_rep_bp->mac_addr));
eth_dev->data->mac_addrs =
(struct rte_ether_addr *)&vf_rep_bp->mac_addr;
eth_dev->dev_ops = &bnxt_vf_rep_dev_ops;
/* No data-path, but need stub Rx/Tx functions to avoid crash
* when testing with ovs-dpdk
*/
eth_dev->rx_pkt_burst = bnxt_vf_rep_rx_burst;
eth_dev->tx_pkt_burst = bnxt_vf_rep_tx_burst;
/* Link state. Inherited from PF or trusted VF */
parent_bp = vf_rep_bp->parent_priv;
link = &parent_bp->eth_dev->data->dev_link;
eth_dev->data->dev_link.link_speed = link->link_speed;
eth_dev->data->dev_link.link_duplex = link->link_duplex;
eth_dev->data->dev_link.link_status = link->link_status;
eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
PMD_DRV_LOG(INFO, "calling bnxt_print_link_info\n");
bnxt_print_link_info(eth_dev);
/* Pass the information to the rte_eth_dev_close() that it should also
* release the private port resources.
*/
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
PMD_DRV_LOG(INFO,
"Switch domain id %d: Representor Device %d init done\n",
vf_rep_bp->switch_domain_id, vf_rep_bp->vf_id);
return 0;
}
int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev)
{
struct bnxt *parent_bp;
struct bnxt_vf_representor *rep =
(struct bnxt_vf_representor *)eth_dev->data->dev_private;
uint16_t vf_id;
eth_dev->data->mac_addrs = NULL;
parent_bp = rep->parent_priv;
if (parent_bp) {
parent_bp->num_reps--;
vf_id = rep->vf_id;
memset(&parent_bp->rep_info[vf_id], 0,
sizeof(parent_bp->rep_info[vf_id]));
/* mark that this representor has been freed */
}
eth_dev->dev_ops = NULL;
return 0;
}
int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl)
{
struct bnxt *parent_bp;
struct bnxt_vf_representor *rep =
(struct bnxt_vf_representor *)eth_dev->data->dev_private;
struct rte_eth_link *link;
int rc;
parent_bp = rep->parent_priv;
rc = bnxt_link_update_op(parent_bp->eth_dev, wait_to_compl);
/* Link state. Inherited from PF or trusted VF */
link = &parent_bp->eth_dev->data->dev_link;
eth_dev->data->dev_link.link_speed = link->link_speed;
eth_dev->data->dev_link.link_duplex = link->link_duplex;
eth_dev->data->dev_link.link_status = link->link_status;
eth_dev->data->dev_link.link_autoneg = link->link_autoneg;
bnxt_print_link_info(eth_dev);
return rc;
}
int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev)
{
bnxt_vf_rep_link_update_op(eth_dev, 1);
return 0;
}
void bnxt_vf_rep_dev_stop_op(__rte_unused struct rte_eth_dev *eth_dev)
{
}
void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev)
{
bnxt_vf_representor_uninit(eth_dev);
}
int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info)
{
struct bnxt_vf_representor *rep_bp = eth_dev->data->dev_private;
struct bnxt *parent_bp;
uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
int rc = 0;
/* MAC Specifics */
parent_bp = rep_bp->parent_priv;
if (!parent_bp) {
PMD_DRV_LOG(ERR, "Rep parent NULL!\n");
return rc;
}
PMD_DRV_LOG(DEBUG, "Representor dev_info_get_op\n");
dev_info->max_mac_addrs = parent_bp->max_l2_ctx;
dev_info->max_hash_mac_addrs = 0;
max_rx_rings = BNXT_MAX_VF_REP_RINGS;
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
dev_info->reta_size = bnxt_rss_hash_tbl_size(parent_bp);
dev_info->hash_key_size = 40;
max_vnics = parent_bp->max_vnics;
/* MTU specifics */
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->max_mtu = BNXT_MAX_MTU;
/* Fast path specifics */
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_PKT_LEN;
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (parent_bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
/* *INDENT-OFF* */
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = 8,
.hthresh = 8,
.wthresh = 0,
},
.rx_free_thresh = 32,
/* If no descriptors available, pkts are dropped by default */
.rx_drop_en = 1,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_thresh = {
.pthresh = 32,
.hthresh = 0,
.wthresh = 0,
},
.tx_free_thresh = 32,
.tx_rs_thresh = 32,
};
eth_dev->data->dev_conf.intr_conf.lsc = 1;
eth_dev->data->dev_conf.intr_conf.rxq = 1;
dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
/* *INDENT-ON* */
/*
* TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
* need further investigation.
*/
/* VMDq resources */
vpool = 64; /* ETH_64_POOLS */
vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */
for (i = 0; i < 4; vpool >>= 1, i++) {
if (max_vnics > vpool) {
for (j = 0; j < 5; vrxq >>= 1, j++) {
if (dev_info->max_rx_queues > vrxq) {
if (vpool > vrxq)
vpool = vrxq;
goto found;
}
}
/* Not enough resources to support VMDq */
break;
}
}
/* Not enough resources to support VMDq */
vpool = 0;
vrxq = 0;
found:
dev_info->max_vmdq_pools = vpool;
dev_info->vmdq_queue_num = vrxq;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
return 0;
}
int bnxt_vf_rep_dev_configure_op(__rte_unused struct rte_eth_dev *eth_dev)
{
PMD_DRV_LOG(DEBUG, "Representor dev_configure_op\n");
return 0;
}
int bnxt_vf_rep_rx_queue_setup_op(__rte_unused struct rte_eth_dev *eth_dev,
__rte_unused uint16_t queue_idx,
__rte_unused uint16_t nb_desc,
__rte_unused unsigned int socket_id,
__rte_unused const struct rte_eth_rxconf *
rx_conf,
__rte_unused struct rte_mempool *mp)
{
return 0;
}
int bnxt_vf_rep_tx_queue_setup_op(__rte_unused struct rte_eth_dev *eth_dev,
__rte_unused uint16_t queue_idx,
__rte_unused uint16_t nb_desc,
__rte_unused unsigned int socket_id,
__rte_unused const struct rte_eth_txconf *
tx_conf)
{
return 0;
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2014-2020 Broadcom
* All rights reserved.
*/
#ifndef _BNXT_REPS_H_
#define _BNXT_REPS_H_
#include <rte_malloc.h>
#include <rte_ethdev.h>
int bnxt_vf_representor_init(struct rte_eth_dev *eth_dev, void *params);
int bnxt_vf_representor_uninit(struct rte_eth_dev *eth_dev);
int bnxt_vf_rep_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info);
int bnxt_vf_rep_dev_configure_op(struct rte_eth_dev *eth_dev);
int bnxt_vf_rep_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_compl);
int bnxt_vf_rep_dev_start_op(struct rte_eth_dev *eth_dev);
int bnxt_vf_rep_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
__rte_unused uint16_t queue_idx,
__rte_unused uint16_t nb_desc,
__rte_unused unsigned int socket_id,
__rte_unused const struct rte_eth_rxconf *
rx_conf,
__rte_unused struct rte_mempool *mp);
int bnxt_vf_rep_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
__rte_unused uint16_t queue_idx,
__rte_unused uint16_t nb_desc,
__rte_unused unsigned int socket_id,
__rte_unused const struct rte_eth_txconf *
tx_conf);
void bnxt_vf_rep_dev_stop_op(struct rte_eth_dev *eth_dev);
void bnxt_vf_rep_dev_close_op(struct rte_eth_dev *eth_dev);
#endif /* _BNXT_REPS_H_ */

View File

@ -21,6 +21,7 @@ sources = files('bnxt_cpr.c',
'bnxt_txr.c',
'bnxt_util.c',
'bnxt_vnic.c',
'bnxt_reps.c',
'tf_core/tf_core.c',
'tf_core/bitalloc.c',