bnx2x: driver core

This is the first of several parts for a new driver supporting
Broadcom/Qlogic NetXtremeII 10 gigabit devices.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Harish Patil <harish.patil@qlogic.com>
This commit is contained in:
Stephen Hemminger 2015-07-20 09:33:18 -07:00 committed by Thomas Monjalon
parent 632b2d1dee
commit 540a211084
11 changed files with 18226 additions and 0 deletions

11821
drivers/net/bnx2x/bnx2x.c Normal file

File diff suppressed because it is too large Load Diff

1998
drivers/net/bnx2x/bnx2x.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,542 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
* All rights reserved.
*/
#include "bnx2x.h"
#include "bnx2x_rxtx.h"
#include <rte_dev.h>
/*
* The set of PCI devices this driver supports
*/
static struct rte_pci_id pci_id_bnx2x_map[] = {
#define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
{ .vendor_id = 0, }
};
static struct rte_pci_id pci_id_bnx2xvf_map[] = {
#define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
#include "rte_pci_dev_ids.h"
{ .vendor_id = 0, }
};
static void
bnx2x_link_update(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
bnx2x_link_status_update(sc);
mb();
dev->data->dev_link.link_speed = sc->link_vars.line_speed;
switch (sc->link_vars.duplex) {
case DUPLEX_FULL:
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case DUPLEX_HALF:
dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
dev->data->dev_link.link_duplex = ETH_LINK_AUTONEG_DUPLEX;
}
dev->data->dev_link.link_status = sc->link_vars.link_up;
}
static void
bnx2x_interrupt_action(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
PMD_DRV_LOG(INFO, "Interrupt handled");
if (bnx2x_intr_legacy(sc, 0))
DELAY_MS(250);
if (sc->periodic_flags & PERIODIC_GO)
bnx2x_periodic_callout(sc);
link_status = REG_RD(sc, sc->link_params.shmem_base +
offsetof(struct shmem_region,
port_mb[sc->link_params.port].link_status));
if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
bnx2x_link_update(dev);
}
static __rte_unused void
bnx2x_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
bnx2x_interrupt_action(dev);
rte_intr_enable(&(dev->pci_dev->intr_handle));
}
/*
* Devops - helper functions can be called from user application
*/
static int
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
int ret;
PMD_INIT_FUNC_TRACE();
if (dev->data->dev_conf.rxmode.jumbo_frame)
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
return -EINVAL;
}
sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
if (sc->num_queues > mp_ncpus) {
PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
return -EINVAL;
}
PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
sc->num_queues, sc->mtu);
/* allocate ilt */
if (bnx2x_alloc_ilt_mem(sc) != 0) {
PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
return -ENXIO;
}
/* allocate the host hardware/software hsi structures */
if (bnx2x_alloc_hsi_mem(sc) != 0) {
PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
bnx2x_free_ilt_mem(sc);
return -ENXIO;
}
if (IS_VF(sc)) {
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
&sc->vf2pf_mbox_mapping, "vf2pf_mbox",
RTE_CACHE_LINE_SIZE) != 0)
return -ENOMEM;
sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)sc->vf2pf_mbox_mapping.vaddr;
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
&sc->pf2vf_bulletin_mapping, "vf2pf_bull",
RTE_CACHE_LINE_SIZE) != 0)
return -ENOMEM;
sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)sc->pf2vf_bulletin_mapping.vaddr;
ret = bnx2x_vf_get_resources(sc, sc->num_queues, sc->num_queues);
if (ret)
return ret;
}
return 0;
}
static int
bnx2x_dev_start(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
PMD_INIT_FUNC_TRACE();
ret = bnx2x_init(sc);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
return -1;
}
if (IS_PF(sc)) {
rte_intr_callback_register(&(dev->pci_dev->intr_handle),
bnx2x_interrupt_handler, (void *)dev);
if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
PMD_DRV_LOG(ERR, "rte_intr_enable failed");
}
ret = bnx2x_dev_rx_init(dev);
if (ret != 0) {
PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
return -3;
}
/* Print important adapter info for the user. */
bnx2x_print_adapter_info(sc);
DELAY_MS(2500);
return ret;
}
static void
bnx2x_dev_stop(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
PMD_INIT_FUNC_TRACE();
if (IS_PF(sc)) {
rte_intr_disable(&(dev->pci_dev->intr_handle));
rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
bnx2x_interrupt_handler, (void *)dev);
}
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
return;
}
return;
}
static void
bnx2x_dev_close(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
if (IS_VF(sc))
bnx2x_vf_close(sc);
bnx2x_dev_clear_queues(dev);
memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
/* free the host hardware/software hsi structures */
bnx2x_free_hsi_mem(sc);
/* free ilt */
bnx2x_free_ilt_mem(sc);
}
static void
bnx2x_promisc_enable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_PROMISC;
bnx2x_set_rx_mode(sc);
}
static void
bnx2x_promisc_disable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
bnx2x_set_rx_mode(sc);
}
static void
bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
bnx2x_set_rx_mode(sc);
}
static void
bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
bnx2x_set_rx_mode(sc);
}
static int
bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
PMD_INIT_FUNC_TRACE();
int old_link_status = dev->data->dev_link.link_status;
bnx2x_link_update(dev);
return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
}
static int
bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
int old_link_status = dev->data->dev_link.link_status;
struct bnx2x_softc *sc = dev->data->dev_private;
bnx2x_link_update(dev);
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
PMD_DRV_LOG(ERR, "PF indicated channel is down."
"VF device is no longer operational");
dev->data->dev_link.link_status = 0;
}
return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
}
static void
bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct bnx2x_softc *sc = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
memset(stats, 0, sizeof (struct rte_eth_stats));
stats->ipackets =
HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
sc->eth_stats.total_unicast_packets_received_lo) +
HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
sc->eth_stats.total_multicast_packets_received_lo) +
HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
sc->eth_stats.total_broadcast_packets_received_lo);
stats->opackets =
HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
sc->eth_stats.total_unicast_packets_transmitted_lo) +
HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
sc->eth_stats.total_multicast_packets_transmitted_lo) +
HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
sc->eth_stats.total_broadcast_packets_transmitted_lo);
stats->ibytes =
HILO_U64(sc->eth_stats.total_bytes_received_hi,
sc->eth_stats.total_bytes_received_lo);
stats->obytes =
HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
sc->eth_stats.total_bytes_transmitted_lo);
stats->ierrors =
HILO_U64(sc->eth_stats.error_bytes_received_hi,
sc->eth_stats.error_bytes_received_lo);
stats->oerrors = 0;
stats->rx_nombuf =
HILO_U64(sc->eth_stats.no_buff_discard_hi,
sc->eth_stats.no_buff_discard_lo);
}
static void
bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
}
static void
bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct bnx2x_softc *sc = dev->data->dev_private;
if (sc->mac_ops.mac_addr_add)
sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
}
static void
bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
struct bnx2x_softc *sc = dev->data->dev_private;
if (sc->mac_ops.mac_addr_remove)
sc->mac_ops.mac_addr_remove(dev, index);
}
static struct eth_dev_ops bnx2x_eth_dev_ops = {
.dev_configure = bnx2x_dev_configure,
.dev_start = bnx2x_dev_start,
.dev_stop = bnx2x_dev_stop,
.dev_close = bnx2x_dev_close,
.promiscuous_enable = bnx2x_promisc_enable,
.promiscuous_disable = bnx2x_promisc_disable,
.allmulticast_enable = bnx2x_dev_allmulticast_enable,
.allmulticast_disable = bnx2x_dev_allmulticast_disable,
.link_update = bnx2x_dev_link_update,
.stats_get = bnx2x_dev_stats_get,
.dev_infos_get = bnx2x_dev_infos_get,
.rx_queue_setup = bnx2x_dev_rx_queue_setup,
.rx_queue_release = bnx2x_dev_rx_queue_release,
.tx_queue_setup = bnx2x_dev_tx_queue_setup,
.tx_queue_release = bnx2x_dev_tx_queue_release,
.mac_addr_add = bnx2x_mac_addr_add,
.mac_addr_remove = bnx2x_mac_addr_remove,
};
/*
* dev_ops for virtual function
*/
static struct eth_dev_ops bnx2xvf_eth_dev_ops = {
.dev_configure = bnx2x_dev_configure,
.dev_start = bnx2x_dev_start,
.dev_stop = bnx2x_dev_stop,
.dev_close = bnx2x_dev_close,
.promiscuous_enable = bnx2x_promisc_enable,
.promiscuous_disable = bnx2x_promisc_disable,
.allmulticast_enable = bnx2x_dev_allmulticast_enable,
.allmulticast_disable = bnx2x_dev_allmulticast_disable,
.link_update = bnx2xvf_dev_link_update,
.stats_get = bnx2x_dev_stats_get,
.dev_infos_get = bnx2x_dev_infos_get,
.rx_queue_setup = bnx2x_dev_rx_queue_setup,
.rx_queue_release = bnx2x_dev_rx_queue_release,
.tx_queue_setup = bnx2x_dev_tx_queue_setup,
.tx_queue_release = bnx2x_dev_tx_queue_release,
.mac_addr_add = bnx2x_mac_addr_add,
.mac_addr_remove = bnx2x_mac_addr_remove,
};
static int
bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
{
int ret = 0;
struct rte_pci_device *pci_dev;
struct bnx2x_softc *sc;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
pci_dev = eth_dev->pci_dev;
sc = eth_dev->data->dev_private;
sc->pcie_bus = pci_dev->addr.bus;
sc->pcie_device = pci_dev->addr.devid;
if (is_vf)
sc->flags = BNX2X_IS_VF_FLAG;
sc->devinfo.vendor_id = pci_dev->id.vendor_id;
sc->devinfo.device_id = pci_dev->id.device_id;
sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
sc->pcie_func = pci_dev->addr.function;
sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
if (is_vf)
sc->bar[BAR1].base_addr = (void *)
((uint64_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
else
sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
assert(sc->bar[BAR0].base_addr);
assert(sc->bar[BAR1].base_addr);
bnx2x_load_firmware(sc);
assert(sc->firmware);
if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
sc->udp_rss = 1;
sc->rx_budget = BNX2X_RX_BUDGET;
sc->hc_rx_ticks = BNX2X_RX_TICKS;
sc->hc_tx_ticks = BNX2X_TX_TICKS;
sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
sc->pci_dev = pci_dev;
ret = bnx2x_attach(sc);
if (ret) {
PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
}
eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
sc->pcie_bus, sc->pcie_device);
PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
return ret;
}
static int
eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
{
PMD_INIT_FUNC_TRACE();
return bnx2x_common_dev_init(eth_dev, 0);
}
static int
eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
{
PMD_INIT_FUNC_TRACE();
return bnx2x_common_dev_init(eth_dev, 1);
}
static struct eth_driver rte_bnx2x_pmd = {
.pci_drv = {
.name = "rte_bnx2x_pmd",
.id_table = pci_id_bnx2x_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
},
.eth_dev_init = eth_bnx2x_dev_init,
.dev_private_size = sizeof(struct bnx2x_softc),
};
/*
* virtual function driver struct
*/
static struct eth_driver rte_bnx2xvf_pmd = {
.pci_drv = {
.name = "rte_bnx2xvf_pmd",
.id_table = pci_id_bnx2xvf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
},
.eth_dev_init = eth_bnx2xvf_dev_init,
.dev_private_size = sizeof(struct bnx2x_softc),
};
static int rte_bnx2x_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
{
PMD_INIT_FUNC_TRACE();
rte_eth_driver_register(&rte_bnx2x_pmd);
return 0;
}
static int rte_bnx2xvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
{
PMD_INIT_FUNC_TRACE();
rte_eth_driver_register(&rte_bnx2xvf_pmd);
return 0;
}
static struct rte_driver rte_bnx2x_driver = {
.type = PMD_PDEV,
.init = rte_bnx2x_pmd_init,
};
static struct rte_driver rte_bnx2xvf_driver = {
.type = PMD_PDEV,
.init = rte_bnx2xvf_pmd_init,
};
PMD_REGISTER_DRIVER(rte_bnx2x_driver);
PMD_REGISTER_DRIVER(rte_bnx2xvf_driver);

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
* All rights reserved.
*/
#ifndef PMD_BNX2X_ETHDEV_H
#define PMD_BNX2X_ETHDEV_H
#include <sys/queue.h>
#include <sys/param.h>
#include <sys/user.h>
#include <sys/stat.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <stdarg.h>
#include <inttypes.h>
#include <assert.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_spinlock.h>
#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include "bnx2x_rxtx.h"
#include "bnx2x_logs.h"
#define DELAY(x) rte_delay_us(x)
#define DELAY_MS(x) rte_delay_ms(x)
#define usec_delay(x) DELAY(x)
#define msec_delay(x) DELAY(1000*(x))
#define FALSE 0
#define TRUE 1
#define false 0
#define true 1
#define min(a,b) RTE_MIN(a,b)
#define mb() rte_mb()
#define wmb() rte_wmb()
#define rmb() rte_rmb()
#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF)
#define BNX2X_MIN_RX_BUF_SIZE 1024
#define BNX2X_MAX_RX_PKT_LEN 15872
#define BNX2X_MAX_MAC_ADDRS 1
/* Hardware RX tick timer (usecs) */
#define BNX2X_RX_TICKS 25
/* Hardware TX tick timer (usecs) */
#define BNX2X_TX_TICKS 50
/* Maximum number of Rx packets to process at a time */
#define BNX2X_RX_BUDGET 0xffffffff
#endif
/* MAC address operations */
struct bnx2x_mac_ops {
void (*mac_addr_add)(struct rte_eth_dev *dev, struct ether_addr *addr,
uint16_t index, uint32_t pool); /* not implemented yet */
void (*mac_addr_remove)(struct rte_eth_dev *dev, uint16_t index); /* not implemented yet */
};

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
* All rights reserved.
*/
#ifndef _PMD_LOGS_H_
#define _PMD_LOGS_H_
#define PMD_INIT_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
"PMD: %s(): " fmt "\n", __func__, ##args)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
#else
#define PMD_INIT_FUNC_TRACE() do { } while(0)
#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG_TX_FREE
#define PMD_TX_FREE_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG_DRIVER
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
#else
#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
#endif
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
#endif /* _PMD_LOGS_H_ */

View File

@ -0,0 +1,487 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
* All rights reserved.
*/
#include "bnx2x.h"
#include "bnx2x_rxtx.h"
static inline struct rte_mbuf *
bnx2x_rxmbuf_alloc(struct rte_mempool *mp)
{
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check(m, 0);
return m;
}
static const struct rte_memzone *
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
uint16_t queue_id, uint32_t ring_size, int socket_id)
{
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
if (mz)
return mz;
return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
}
static void
bnx2x_rx_queue_release(struct bnx2x_rx_queue *rx_queue)
{
uint16_t i;
struct rte_mbuf **sw_ring;
if (NULL != rx_queue) {
sw_ring = rx_queue->sw_ring;
if (NULL != sw_ring) {
for (i = 0; i < rx_queue->nb_rx_desc; i++) {
if (NULL != sw_ring[i])
rte_pktmbuf_free(sw_ring[i]);
}
rte_free(sw_ring);
}
rte_free(rx_queue);
}
}
void
bnx2x_dev_rx_queue_release(void *rxq)
{
bnx2x_rx_queue_release(rxq);
}
int
bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
uint16_t j, idx;
const struct rte_memzone *dma;
struct bnx2x_rx_queue *rxq;
uint32_t dma_size;
struct rte_mbuf *mbuf;
struct bnx2x_softc *sc = dev->data->dev_private;
struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
struct eth_rx_cqe_next_page *nextpg;
phys_addr_t *rx_bd;
phys_addr_t busaddr;
/* First allocate the rx queue data structure */
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (NULL == rxq) {
PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
return (-ENOMEM);
}
rxq->sc = sc;
rxq->mb_pool = mp;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN);
rxq->nb_rx_pages = 1;
while (USABLE_RX_BD(rxq) < nb_desc)
rxq->nb_rx_pages <<= 1;
rxq->nb_rx_desc = TOTAL_RX_BD(rxq);
sc->rx_ring_size = USABLE_RX_BD(rxq);
rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
rxq->rx_free_thresh = rx_conf->rx_free_thresh ?
rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH;
PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, rx_pages=%u, cq_pages=%u",
queue_idx, nb_desc, rxq->rx_free_thresh, USABLE_RX_BD(rxq),
TOTAL_RX_BD(rxq), rxq->nb_rx_pages, rxq->nb_cq_pages);
/* Allocate RX ring hardware descriptors */
dma_size = rxq->nb_rx_desc * sizeof(struct eth_rx_bd);
dma = ring_dma_zone_reserve(dev, "hw_ring", queue_idx, dma_size, socket_id);
if (NULL == dma) {
PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
bnx2x_rx_queue_release(rxq);
return (-ENOMEM);
}
fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
rxq->rx_ring = (uint64_t*)dma->addr;
memset((void *)rxq->rx_ring, 0, dma_size);
/* Link the RX chain pages. */
for (j = 1; j <= rxq->nb_rx_pages; j++) {
rx_bd = &rxq->rx_ring[TOTAL_RX_BD_PER_PAGE * j - 2];
busaddr = rxq->rx_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_rx_pages);
*rx_bd = busaddr;
}
/* Allocate software ring */
dma_size = rxq->nb_rx_desc * sizeof(struct bnx2x_rx_entry);
rxq->sw_ring = rte_zmalloc_socket("sw_ring", dma_size,
RTE_CACHE_LINE_SIZE,
socket_id);
if (NULL == rxq->sw_ring) {
PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
bnx2x_rx_queue_release(rxq);
return (-ENOMEM);
}
/* Initialize software ring entries */
rxq->rx_mbuf_alloc = 0;
for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
mbuf = bnx2x_rxmbuf_alloc(mp);
if (NULL == mbuf) {
PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
(unsigned)rxq->queue_id, idx);
bnx2x_rx_queue_release(rxq);
return (-ENOMEM);
}
rxq->sw_ring[idx] = mbuf;
rxq->rx_ring[idx] = mbuf->buf_physaddr;
rxq->rx_mbuf_alloc++;
}
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
rxq->rx_bd_head = 0;
rxq->rx_bd_tail = idx;
/* Allocate CQ chain. */
dma_size = BNX2X_RX_CHAIN_PAGE_SZ * rxq->nb_cq_pages;
dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
if (NULL == dma) {
PMD_RX_LOG(ERR, "RCQ alloc failed");
return (-ENOMEM);
}
fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
/* Link the CQ chain pages. */
for (j = 1; j <= rxq->nb_cq_pages; j++) {
nextpg = &rxq->cq_ring[TOTAL_RCQ_ENTRIES_PER_PAGE * j - 1].next_page_cqe;
busaddr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE * (j % rxq->nb_cq_pages);
nextpg->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
nextpg->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
}
rxq->rx_cq_head = 0;
rxq->rx_cq_tail = TOTAL_RCQ_ENTRIES(rxq);
dev->data->rx_queues[queue_idx] = rxq;
if (!sc->rx_queues) sc->rx_queues = dev->data->rx_queues;
return 0;
}
static void
bnx2x_tx_queue_release(struct bnx2x_tx_queue *tx_queue)
{
uint16_t i;
struct rte_mbuf **sw_ring;
if (NULL != tx_queue) {
sw_ring = tx_queue->sw_ring;
if (NULL != sw_ring) {
for (i = 0; i < tx_queue->nb_tx_desc; i++) {
if (NULL != sw_ring[i])
rte_pktmbuf_free(sw_ring[i]);
}
rte_free(sw_ring);
}
rte_free(tx_queue);
}
}
void
bnx2x_dev_tx_queue_release(void *txq)
{
bnx2x_tx_queue_release(txq);
}
static uint16_t
bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct bnx2x_tx_queue *txq;
struct bnx2x_softc *sc;
struct bnx2x_fastpath *fp;
uint32_t burst, nb_tx;
struct rte_mbuf **m = tx_pkts;
int ret;
txq = p_txq;
sc = txq->sc;
fp = &sc->fp[txq->queue_id];
nb_tx = nb_pkts;
do {
burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST);
ret = bnx2x_tx_encap(txq, m, burst);
if (unlikely(ret)) {
PMD_TX_LOG(ERR, "tx_encap failed!");
}
bnx2x_update_fp_sb_idx(fp);
if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) {
bnx2x_txeof(sc, fp);
}
if (unlikely(ret == ENOMEM)) {
break;
}
m += burst;
nb_pkts -= burst;
} while (nb_pkts);
return nb_tx - nb_pkts;
}
int
bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
uint16_t i;
unsigned int tsize;
const struct rte_memzone *tz;
struct bnx2x_tx_queue *txq;
struct eth_tx_next_bd *tx_n_bd;
uint64_t busaddr;
struct bnx2x_softc *sc = dev->data->dev_private;
struct bnx2x_fastpath *fp = &sc->fp[queue_idx];
/* First allocate the tx queue data structure */
txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
RTE_CACHE_LINE_SIZE);
if (txq == NULL)
return (-ENOMEM);
txq->sc = sc;
txq->nb_tx_pages = 1;
while (USABLE_TX_BD(txq) < nb_desc)
txq->nb_tx_pages <<= 1;
txq->nb_tx_desc = TOTAL_TX_BD(txq);
sc->tx_ring_size = TOTAL_TX_BD(txq);
txq->tx_free_thresh = tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, tx_pages=%u",
queue_idx, nb_desc, txq->tx_free_thresh, USABLE_TX_BD(txq),
TOTAL_TX_BD(txq), txq->nb_tx_pages);
/* Allocate TX ring hardware descriptors */
tsize = txq->nb_tx_desc * sizeof(union eth_tx_bd_types);
tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
if (tz == NULL) {
bnx2x_tx_queue_release(txq);
return (-ENOMEM);
}
fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
memset(txq->tx_ring, 0, tsize);
/* Allocate software ring */
tsize = txq->nb_tx_desc * sizeof(struct rte_mbuf *);
txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
RTE_CACHE_LINE_SIZE);
if (txq->sw_ring == NULL) {
bnx2x_tx_queue_release(txq);
return (-ENOMEM);
}
/* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
/* Link TX pages */
for (i = 1; i <= txq->nb_tx_pages; i++) {
tx_n_bd = &txq->tx_ring[TOTAL_TX_BD_PER_PAGE * i - 1].next_bd;
busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
/* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
}
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
txq->tx_pkt_tail = 0;
txq->tx_pkt_head = 0;
txq->tx_bd_tail = 0;
txq->tx_bd_head = 0;
txq->nb_tx_avail = txq->nb_tx_desc;
dev->tx_pkt_burst = bnx2x_xmit_pkts;
dev->data->tx_queues[queue_idx] = txq;
if (!sc->tx_queues) sc->tx_queues = dev->data->tx_queues;
return 0;
}
static inline void
bnx2x_upd_rx_prod_fast(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
uint16_t rx_bd_prod, uint16_t rx_cq_prod)
{
union ustorm_eth_rx_producers rx_prods;
rx_prods.prod.bd_prod = rx_bd_prod;
rx_prods.prod.cqe_prod = rx_cq_prod;
REG_WR(sc, fp->ustorm_rx_prods_offset, rx_prods.raw_data[0]);
}
static uint16_t
bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct bnx2x_rx_queue *rxq = p_rxq;
struct bnx2x_softc *sc = rxq->sc;
struct bnx2x_fastpath *fp = &sc->fp[rxq->queue_id];
uint32_t nb_rx = 0;
uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod;
uint16_t bd_cons, bd_prod;
struct rte_mbuf *new_mb;
uint16_t rx_pref;
struct eth_fast_path_rx_cqe *cqe_fp;
uint16_t len, pad;
struct rte_mbuf *rx_mb = NULL;
hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
if ((hw_cq_cons & USABLE_RCQ_ENTRIES_PER_PAGE) ==
USABLE_RCQ_ENTRIES_PER_PAGE) {
++hw_cq_cons;
}
bd_cons = rxq->rx_bd_head;
bd_prod = rxq->rx_bd_tail;
sw_cq_cons = rxq->rx_cq_head;
sw_cq_prod = rxq->rx_cq_tail;
while (nb_rx < nb_pkts && sw_cq_cons != hw_cq_cons) {
bd_prod &= MAX_RX_BD(rxq);
bd_cons &= MAX_RX_BD(rxq);
cqe_fp = &rxq->cq_ring[sw_cq_cons & MAX_RX_BD(rxq)].fast_path_cqe;
if (unlikely(CQE_TYPE_SLOW(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_TYPE))) {
PMD_RX_LOG(ERR, "slowpath event during traffic processing");
break;
}
if (unlikely(cqe_fp->type_error_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) {
PMD_RX_LOG(ERR, "flags 0x%x rx packet %u",
cqe_fp->type_error_flags, sw_cq_cons);
goto next_rx;
}
len = cqe_fp->pkt_len_or_gro_seg_len;
pad = cqe_fp->placement_offset;
new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool);
if (unlikely(!new_mb)) {
PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index);
goto next_rx;
}
rx_mb = rxq->sw_ring[bd_cons];
rxq->sw_ring[bd_cons] = new_mb;
rxq->rx_ring[bd_prod] = new_mb->buf_physaddr;
rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
rte_prefetch0(rxq->sw_ring[rx_pref]);
if ((rx_pref & 0x3) == 0) {
rte_prefetch0(&rxq->rx_ring[rx_pref]);
rte_prefetch0(&rxq->sw_ring[rx_pref]);
}
rx_mb->data_off = pad;
rx_mb->nb_segs = 1;
rx_mb->next = NULL;
rx_mb->pkt_len = rx_mb->data_len = len;
rx_mb->port = rxq->port_id;
rx_mb->buf_len = len + pad;
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
/*
* If we received a packet with a vlan tag,
* attach that information to the packet.
*/
if (cqe_fp->pars_flags.flags & PARSING_FLAGS_VLAN) {
rx_mb->vlan_tci = cqe_fp->vlan_tag;
rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
}
rx_pkts[nb_rx] = rx_mb;
nb_rx++;
/* limit spinning on the queue */
if (unlikely(nb_rx == sc->rx_budget)) {
PMD_RX_LOG(ERR, "Limit spinning on the queue");
break;
}
next_rx:
bd_cons = NEXT_RX_BD(bd_cons);
bd_prod = NEXT_RX_BD(bd_prod);
sw_cq_prod = NEXT_RCQ_IDX(sw_cq_prod);
sw_cq_cons = NEXT_RCQ_IDX(sw_cq_cons);
}
rxq->rx_bd_head = bd_cons;
rxq->rx_bd_tail = bd_prod;
rxq->rx_cq_head = sw_cq_cons;
rxq->rx_cq_tail = sw_cq_prod;
bnx2x_upd_rx_prod_fast(sc, fp, bd_prod, sw_cq_prod);
return nb_rx;
}
int
bnx2x_dev_rx_init(struct rte_eth_dev *dev)
{
dev->rx_pkt_burst = bnx2x_recv_pkts;
return 0;
}
void
bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
{
uint8_t i;
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
bnx2x_tx_queue_release(txq);
dev->data->tx_queues[i] = NULL;
}
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct bnx2x_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
bnx2x_rx_queue_release(rxq);
dev->data->rx_queues[i] = NULL;
}
}
}

View File

@ -0,0 +1,85 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
* All rights reserved.
*/
#ifndef _BNX2X_RXTX_H_
#define _BNX2X_RXTX_H_
#define DEFAULT_RX_FREE_THRESH 0
#define DEFAULT_TX_FREE_THRESH 512
#define RTE_PMD_BNX2X_TX_MAX_BURST 1
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
struct bnx2x_rx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
};
/**
* Structure associated with each RX queue.
*/
struct bnx2x_rx_queue {
struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
union eth_rx_cqe *cq_ring; /**< RCQ ring virtual address. */
uint64_t cq_ring_phys_addr; /**< RCQ ring DMA address. */
uint64_t *rx_ring; /**< RX ring virtual address. */
uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
struct rte_mbuf **sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
uint16_t nb_cq_pages; /**< number of RCQ pages. */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t nb_rx_pages; /**< number of RX pages. */
uint16_t rx_bd_head; /**< Index of current rx bd. */
uint16_t rx_bd_tail; /**< Index of last rx bd. */
uint16_t rx_cq_head; /**< Index of current rcq bd. */
uint16_t rx_cq_tail; /**< Index of last rcq bd. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint8_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
struct bnx2x_softc *sc; /**< Ptr to dev_private data. */
uint64_t rx_mbuf_alloc; /**< Number of allocated mbufs. */
};
/**
* Structure associated with each TX queue.
*/
struct bnx2x_tx_queue {
/** TX ring virtual address. */
union eth_tx_bd_types *tx_ring; /**< TX ring virtual address. */
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
struct rte_mbuf **sw_ring; /**< virtual address of SW ring. */
uint16_t tx_pkt_tail; /**< Index of current tx pkt. */
uint16_t tx_pkt_head; /**< Index of last pkt counted by txeof. */
uint16_t tx_bd_tail; /**< Index of current tx bd. */
uint16_t tx_bd_head; /**< Index of last bd counted by txeof. */
uint16_t nb_tx_desc; /**< number of TX descriptors. */
uint16_t tx_free_thresh; /**< minimum TX before freeing. */
uint16_t nb_tx_avail; /**< Number of TX descriptors available. */
uint16_t nb_tx_pages; /**< number of TX pages */
uint16_t queue_id; /**< TX queue index. */
uint8_t port_id; /**< Device port identifier. */
struct bnx2x_softc *sc; /**< Ptr to dev_private data */
};
int bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
int bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
void bnx2x_dev_rx_queue_release(void *rxq);
void bnx2x_dev_tx_queue_release(void *txq);
int bnx2x_dev_rx_init(struct rte_eth_dev *dev);
void bnx2x_dev_clear_queues(struct rte_eth_dev *dev);
#endif /* _BNX2X_RXTX_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,632 @@
/*-
* Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Broadcom Corporation nor the name of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written consent.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef BNX2X_STATS_H
#define BNX2X_STATS_H
#include <sys/types.h>
struct nig_stats {
uint32_t brb_discard;
uint32_t brb_packet;
uint32_t brb_truncate;
uint32_t flow_ctrl_discard;
uint32_t flow_ctrl_octets;
uint32_t flow_ctrl_packet;
uint32_t mng_discard;
uint32_t mng_octet_inp;
uint32_t mng_octet_out;
uint32_t mng_packet_inp;
uint32_t mng_packet_out;
uint32_t pbf_octets;
uint32_t pbf_packet;
uint32_t safc_inp;
uint32_t egress_mac_pkt0_lo;
uint32_t egress_mac_pkt0_hi;
uint32_t egress_mac_pkt1_lo;
uint32_t egress_mac_pkt1_hi;
};
enum bnx2x_stats_event {
STATS_EVENT_PMF = 0,
STATS_EVENT_LINK_UP,
STATS_EVENT_UPDATE,
STATS_EVENT_STOP,
STATS_EVENT_MAX
};
enum bnx2x_stats_state {
STATS_STATE_DISABLED = 0,
STATS_STATE_ENABLED,
STATS_STATE_MAX
};
struct bnx2x_eth_stats {
uint32_t total_bytes_received_hi;
uint32_t total_bytes_received_lo;
uint32_t total_bytes_transmitted_hi;
uint32_t total_bytes_transmitted_lo;
uint32_t total_unicast_packets_received_hi;
uint32_t total_unicast_packets_received_lo;
uint32_t total_multicast_packets_received_hi;
uint32_t total_multicast_packets_received_lo;
uint32_t total_broadcast_packets_received_hi;
uint32_t total_broadcast_packets_received_lo;
uint32_t total_unicast_packets_transmitted_hi;
uint32_t total_unicast_packets_transmitted_lo;
uint32_t total_multicast_packets_transmitted_hi;
uint32_t total_multicast_packets_transmitted_lo;
uint32_t total_broadcast_packets_transmitted_hi;
uint32_t total_broadcast_packets_transmitted_lo;
uint32_t valid_bytes_received_hi;
uint32_t valid_bytes_received_lo;
uint32_t error_bytes_received_hi;
uint32_t error_bytes_received_lo;
uint32_t etherstatsoverrsizepkts_hi;
uint32_t etherstatsoverrsizepkts_lo;
uint32_t no_buff_discard_hi;
uint32_t no_buff_discard_lo;
uint32_t rx_stat_ifhcinbadoctets_hi;
uint32_t rx_stat_ifhcinbadoctets_lo;
uint32_t tx_stat_ifhcoutbadoctets_hi;
uint32_t tx_stat_ifhcoutbadoctets_lo;
uint32_t rx_stat_dot3statsfcserrors_hi;
uint32_t rx_stat_dot3statsfcserrors_lo;
uint32_t rx_stat_dot3statsalignmenterrors_hi;
uint32_t rx_stat_dot3statsalignmenterrors_lo;
uint32_t rx_stat_dot3statscarriersenseerrors_hi;
uint32_t rx_stat_dot3statscarriersenseerrors_lo;
uint32_t rx_stat_falsecarriererrors_hi;
uint32_t rx_stat_falsecarriererrors_lo;
uint32_t rx_stat_etherstatsundersizepkts_hi;
uint32_t rx_stat_etherstatsundersizepkts_lo;
uint32_t rx_stat_dot3statsframestoolong_hi;
uint32_t rx_stat_dot3statsframestoolong_lo;
uint32_t rx_stat_etherstatsfragments_hi;
uint32_t rx_stat_etherstatsfragments_lo;
uint32_t rx_stat_etherstatsjabbers_hi;
uint32_t rx_stat_etherstatsjabbers_lo;
uint32_t rx_stat_maccontrolframesreceived_hi;
uint32_t rx_stat_maccontrolframesreceived_lo;
uint32_t rx_stat_bmac_xpf_hi;
uint32_t rx_stat_bmac_xpf_lo;
uint32_t rx_stat_bmac_xcf_hi;
uint32_t rx_stat_bmac_xcf_lo;
uint32_t rx_stat_xoffstateentered_hi;
uint32_t rx_stat_xoffstateentered_lo;
uint32_t rx_stat_xonpauseframesreceived_hi;
uint32_t rx_stat_xonpauseframesreceived_lo;
uint32_t rx_stat_xoffpauseframesreceived_hi;
uint32_t rx_stat_xoffpauseframesreceived_lo;
uint32_t tx_stat_outxonsent_hi;
uint32_t tx_stat_outxonsent_lo;
uint32_t tx_stat_outxoffsent_hi;
uint32_t tx_stat_outxoffsent_lo;
uint32_t tx_stat_flowcontroldone_hi;
uint32_t tx_stat_flowcontroldone_lo;
uint32_t tx_stat_etherstatscollisions_hi;
uint32_t tx_stat_etherstatscollisions_lo;
uint32_t tx_stat_dot3statssinglecollisionframes_hi;
uint32_t tx_stat_dot3statssinglecollisionframes_lo;
uint32_t tx_stat_dot3statsmultiplecollisionframes_hi;
uint32_t tx_stat_dot3statsmultiplecollisionframes_lo;
uint32_t tx_stat_dot3statsdeferredtransmissions_hi;
uint32_t tx_stat_dot3statsdeferredtransmissions_lo;
uint32_t tx_stat_dot3statsexcessivecollisions_hi;
uint32_t tx_stat_dot3statsexcessivecollisions_lo;
uint32_t tx_stat_dot3statslatecollisions_hi;
uint32_t tx_stat_dot3statslatecollisions_lo;
uint32_t tx_stat_etherstatspkts64octets_hi;
uint32_t tx_stat_etherstatspkts64octets_lo;
uint32_t tx_stat_etherstatspkts65octetsto127octets_hi;
uint32_t tx_stat_etherstatspkts65octetsto127octets_lo;
uint32_t tx_stat_etherstatspkts128octetsto255octets_hi;
uint32_t tx_stat_etherstatspkts128octetsto255octets_lo;
uint32_t tx_stat_etherstatspkts256octetsto511octets_hi;
uint32_t tx_stat_etherstatspkts256octetsto511octets_lo;
uint32_t tx_stat_etherstatspkts512octetsto1023octets_hi;
uint32_t tx_stat_etherstatspkts512octetsto1023octets_lo;
uint32_t tx_stat_etherstatspkts1024octetsto1522octets_hi;
uint32_t tx_stat_etherstatspkts1024octetsto1522octets_lo;
uint32_t tx_stat_etherstatspktsover1522octets_hi;
uint32_t tx_stat_etherstatspktsover1522octets_lo;
uint32_t tx_stat_bmac_2047_hi;
uint32_t tx_stat_bmac_2047_lo;
uint32_t tx_stat_bmac_4095_hi;
uint32_t tx_stat_bmac_4095_lo;
uint32_t tx_stat_bmac_9216_hi;
uint32_t tx_stat_bmac_9216_lo;
uint32_t tx_stat_bmac_16383_hi;
uint32_t tx_stat_bmac_16383_lo;
uint32_t tx_stat_dot3statsinternalmactransmiterrors_hi;
uint32_t tx_stat_dot3statsinternalmactransmiterrors_lo;
uint32_t tx_stat_bmac_ufl_hi;
uint32_t tx_stat_bmac_ufl_lo;
uint32_t pause_frames_received_hi;
uint32_t pause_frames_received_lo;
uint32_t pause_frames_sent_hi;
uint32_t pause_frames_sent_lo;
uint32_t etherstatspkts1024octetsto1522octets_hi;
uint32_t etherstatspkts1024octetsto1522octets_lo;
uint32_t etherstatspktsover1522octets_hi;
uint32_t etherstatspktsover1522octets_lo;
uint32_t brb_drop_hi;
uint32_t brb_drop_lo;
uint32_t brb_truncate_hi;
uint32_t brb_truncate_lo;
uint32_t mac_filter_discard;
uint32_t mf_tag_discard;
uint32_t brb_truncate_discard;
uint32_t mac_discard;
uint32_t nig_timer_max;
/* PFC */
uint32_t pfc_frames_received_hi;
uint32_t pfc_frames_received_lo;
uint32_t pfc_frames_sent_hi;
uint32_t pfc_frames_sent_lo;
/* Recovery */
uint32_t recoverable_error;
uint32_t unrecoverable_error;
/* src: Clear-on-Read register; Will not survive PMF Migration */
uint32_t eee_tx_lpi;
/* receive path driver statistics */
uint32_t rx_calls;
uint32_t rx_pkts;
uint32_t rx_soft_errors;
uint32_t rx_hw_csum_errors;
uint32_t rx_ofld_frames_csum_ip;
uint32_t rx_ofld_frames_csum_tcp_udp;
uint32_t rx_budget_reached;
/* tx path driver statistics */
uint32_t tx_pkts;
uint32_t tx_soft_errors;
uint32_t tx_ofld_frames_csum_ip;
uint32_t tx_ofld_frames_csum_tcp;
uint32_t tx_ofld_frames_csum_udp;
uint32_t tx_encap_failures;
uint32_t tx_hw_queue_full;
uint32_t tx_hw_max_queue_depth;
uint32_t tx_dma_mapping_failure;
uint32_t tx_max_drbr_queue_depth;
uint32_t tx_window_violation_std;
uint32_t tx_chain_lost_mbuf;
uint32_t tx_frames_deferred;
uint32_t tx_queue_xoff;
/* mbuf driver statistics */
uint32_t mbuf_defrag_attempts;
uint32_t mbuf_defrag_failures;
uint32_t mbuf_rx_bd_alloc_failed;
uint32_t mbuf_rx_bd_mapping_failed;
/* track the number of allocated mbufs */
uint32_t mbuf_alloc_tx;
uint32_t mbuf_alloc_rx;
};
struct bnx2x_eth_q_stats {
uint32_t total_unicast_bytes_received_hi;
uint32_t total_unicast_bytes_received_lo;
uint32_t total_broadcast_bytes_received_hi;
uint32_t total_broadcast_bytes_received_lo;
uint32_t total_multicast_bytes_received_hi;
uint32_t total_multicast_bytes_received_lo;
uint32_t total_bytes_received_hi;
uint32_t total_bytes_received_lo;
uint32_t total_unicast_bytes_transmitted_hi;
uint32_t total_unicast_bytes_transmitted_lo;
uint32_t total_broadcast_bytes_transmitted_hi;
uint32_t total_broadcast_bytes_transmitted_lo;
uint32_t total_multicast_bytes_transmitted_hi;
uint32_t total_multicast_bytes_transmitted_lo;
uint32_t total_bytes_transmitted_hi;
uint32_t total_bytes_transmitted_lo;
uint32_t total_unicast_packets_received_hi;
uint32_t total_unicast_packets_received_lo;
uint32_t total_multicast_packets_received_hi;
uint32_t total_multicast_packets_received_lo;
uint32_t total_broadcast_packets_received_hi;
uint32_t total_broadcast_packets_received_lo;
uint32_t total_unicast_packets_transmitted_hi;
uint32_t total_unicast_packets_transmitted_lo;
uint32_t total_multicast_packets_transmitted_hi;
uint32_t total_multicast_packets_transmitted_lo;
uint32_t total_broadcast_packets_transmitted_hi;
uint32_t total_broadcast_packets_transmitted_lo;
uint32_t valid_bytes_received_hi;
uint32_t valid_bytes_received_lo;
uint32_t etherstatsoverrsizepkts_hi;
uint32_t etherstatsoverrsizepkts_lo;
uint32_t no_buff_discard_hi;
uint32_t no_buff_discard_lo;
uint32_t total_packets_received_checksum_discarded_hi;
uint32_t total_packets_received_checksum_discarded_lo;
uint32_t total_packets_received_ttl0_discarded_hi;
uint32_t total_packets_received_ttl0_discarded_lo;
uint32_t total_transmitted_dropped_packets_error_hi;
uint32_t total_transmitted_dropped_packets_error_lo;
/* receive path driver statistics */
uint32_t rx_calls;
uint32_t rx_pkts;
uint32_t rx_soft_errors;
uint32_t rx_hw_csum_errors;
uint32_t rx_ofld_frames_csum_ip;
uint32_t rx_ofld_frames_csum_tcp_udp;
uint32_t rx_budget_reached;
/* tx path driver statistics */
uint32_t tx_pkts;
uint32_t tx_soft_errors;
uint32_t tx_ofld_frames_csum_ip;
uint32_t tx_ofld_frames_csum_tcp;
uint32_t tx_ofld_frames_csum_udp;
uint32_t tx_encap_failures;
uint32_t tx_hw_queue_full;
uint32_t tx_hw_max_queue_depth;
uint32_t tx_dma_mapping_failure;
uint32_t tx_max_drbr_queue_depth;
uint32_t tx_window_violation_std;
uint32_t tx_chain_lost_mbuf;
uint32_t tx_frames_deferred;
uint32_t tx_queue_xoff;
/* mbuf driver statistics */
uint32_t mbuf_defrag_attempts;
uint32_t mbuf_defrag_failures;
uint32_t mbuf_rx_bd_alloc_failed;
uint32_t mbuf_rx_bd_mapping_failed;
/* track the number of allocated mbufs */
uint32_t mbuf_alloc_tx;
uint32_t mbuf_alloc_rx;
};
struct bnx2x_eth_stats_old {
uint32_t rx_stat_dot3statsframestoolong_hi;
uint32_t rx_stat_dot3statsframestoolong_lo;
};
struct bnx2x_eth_q_stats_old {
/* Fields to perserve over fw reset*/
uint32_t total_unicast_bytes_received_hi;
uint32_t total_unicast_bytes_received_lo;
uint32_t total_broadcast_bytes_received_hi;
uint32_t total_broadcast_bytes_received_lo;
uint32_t total_multicast_bytes_received_hi;
uint32_t total_multicast_bytes_received_lo;
uint32_t total_unicast_bytes_transmitted_hi;
uint32_t total_unicast_bytes_transmitted_lo;
uint32_t total_broadcast_bytes_transmitted_hi;
uint32_t total_broadcast_bytes_transmitted_lo;
uint32_t total_multicast_bytes_transmitted_hi;
uint32_t total_multicast_bytes_transmitted_lo;
/* Fields to perserve last of */
uint32_t total_bytes_received_hi;
uint32_t total_bytes_received_lo;
uint32_t total_bytes_transmitted_hi;
uint32_t total_bytes_transmitted_lo;
uint32_t total_unicast_packets_received_hi;
uint32_t total_unicast_packets_received_lo;
uint32_t total_multicast_packets_received_hi;
uint32_t total_multicast_packets_received_lo;
uint32_t total_broadcast_packets_received_hi;
uint32_t total_broadcast_packets_received_lo;
uint32_t total_unicast_packets_transmitted_hi;
uint32_t total_unicast_packets_transmitted_lo;
uint32_t total_multicast_packets_transmitted_hi;
uint32_t total_multicast_packets_transmitted_lo;
uint32_t total_broadcast_packets_transmitted_hi;
uint32_t total_broadcast_packets_transmitted_lo;
uint32_t valid_bytes_received_hi;
uint32_t valid_bytes_received_lo;
/* receive path driver statistics */
uint32_t rx_calls_old;
uint32_t rx_pkts_old;
uint32_t rx_soft_errors_old;
uint32_t rx_hw_csum_errors_old;
uint32_t rx_ofld_frames_csum_ip_old;
uint32_t rx_ofld_frames_csum_tcp_udp_old;
uint32_t rx_budget_reached_old;
/* tx path driver statistics */
uint32_t tx_pkts_old;
uint32_t tx_soft_errors_old;
uint32_t tx_ofld_frames_csum_ip_old;
uint32_t tx_ofld_frames_csum_tcp_old;
uint32_t tx_ofld_frames_csum_udp_old;
uint32_t tx_encap_failures_old;
uint32_t tx_hw_queue_full_old;
uint32_t tx_hw_max_queue_depth_old;
uint32_t tx_dma_mapping_failure_old;
uint32_t tx_max_drbr_queue_depth_old;
uint32_t tx_window_violation_std_old;
uint32_t tx_chain_lost_mbuf_old;
uint32_t tx_frames_deferred_old;
uint32_t tx_queue_xoff_old;
/* mbuf driver statistics */
uint32_t mbuf_defrag_attempts_old;
uint32_t mbuf_defrag_failures_old;
uint32_t mbuf_rx_bd_alloc_failed_old;
uint32_t mbuf_rx_bd_mapping_failed_old;
/* track the number of allocated mbufs */
int mbuf_alloc_tx_old;
int mbuf_alloc_rx_old;
};
struct bnx2x_net_stats_old {
uint32_t rx_dropped;
};
struct bnx2x_fw_port_stats_old {
uint32_t pfc_frames_tx_hi;
uint32_t pfc_frames_tx_lo;
uint32_t pfc_frames_rx_hi;
uint32_t pfc_frames_rx_lo;
uint32_t mac_filter_discard;
uint32_t mf_tag_discard;
uint32_t brb_truncate_discard;
uint32_t mac_discard;
};
/* sum[hi:lo] += add[hi:lo] */
#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
do { \
s_lo += a_lo; \
s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
} while (0)
#define LE32_0 ((uint32_t) 0)
#define LE16_0 ((uint16_t) 0)
/* The _force is for cases where high value is 0 */
#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
ADD_64(s_hi, le32toh(a_hi_le), \
s_lo, le32toh(a_lo_le))
#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
ADD_64(s_hi, le16toh(a_hi_le), \
s_lo, le16toh(a_lo_le))
/* difference = minuend - subtrahend */
#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
do { \
if (m_lo < s_lo) { \
/* underflow */ \
d_hi = m_hi - s_hi; \
if (d_hi > 0) { \
/* we can 'loan' 1 */ \
d_hi--; \
d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
} else { \
/* m_hi <= s_hi */ \
d_hi = 0; \
d_lo = 0; \
} \
} else { \
/* m_lo >= s_lo */ \
if (m_hi < s_hi) { \
d_hi = 0; \
d_lo = 0; \
} else { \
/* m_hi >= s_hi */ \
d_hi = m_hi - s_hi; \
d_lo = m_lo - s_lo; \
} \
} \
} while (0)
#define UPDATE_STAT64(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
pstats->mac_stx[0].t##_hi = new->s##_hi; \
pstats->mac_stx[0].t##_lo = new->s##_lo; \
ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
pstats->mac_stx[1].t##_lo, diff.lo); \
} while (0)
#define UPDATE_STAT64_NIG(s, t) \
do { \
DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
diff.lo, new->s##_lo, old->s##_lo); \
ADD_64(estats->t##_hi, diff.hi, \
estats->t##_lo, diff.lo); \
} while (0)
/* sum[hi:lo] += add */
#define ADD_EXTEND_64(s_hi, s_lo, a) \
do { \
s_lo += a; \
s_hi += (s_lo < a) ? 1 : 0; \
} while (0)
#define ADD_STAT64(diff, t) \
do { \
ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
pstats->mac_stx[1].t##_lo, new->diff##_lo); \
} while (0)
#define UPDATE_EXTEND_STAT(s) \
do { \
ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
pstats->mac_stx[1].s##_lo, \
new->s); \
} while (0)
#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
do { \
diff = le##size##toh(tclient->s) - \
le##size##toh(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
do { \
UPDATE_EXTEND_TSTAT_X(s, t, size); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_USTAT(s, t) \
do { \
diff = le32toh(uclient->s) - le32toh(old_uclient->s); \
old_uclient->s = uclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_E_USTAT(s, t) \
do { \
UPDATE_EXTEND_USTAT(s, t); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
#define UPDATE_EXTEND_XSTAT(s, t) \
do { \
diff = le32toh(xclient->s) - le32toh(old_xclient->s); \
old_xclient->s = xclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
#define UPDATE_QSTAT(s, t) \
do { \
qstats->t##_hi = qstats_old->t##_hi + le32toh(s.hi); \
qstats->t##_lo = qstats_old->t##_lo + le32toh(s.lo); \
} while (0)
#define UPDATE_QSTAT_OLD(f) \
do { \
qstats_old->f = qstats->f; \
} while (0)
#define UPDATE_ESTAT_QSTAT_64(s) \
do { \
ADD_64(estats->s##_hi, qstats->s##_hi, \
estats->s##_lo, qstats->s##_lo); \
SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
estats->s##_lo, qstats_old->s##_lo_old); \
qstats_old->s##_hi_old = qstats->s##_hi; \
qstats_old->s##_lo_old = qstats->s##_lo; \
} while (0)
#define UPDATE_ESTAT_QSTAT(s) \
do { \
estats->s += qstats->s; \
estats->s -= qstats_old->s##_old; \
qstats_old->s##_old = qstats->s; \
} while (0)
#define UPDATE_FSTAT_QSTAT(s) \
do { \
ADD_64(fstats->s##_hi, qstats->s##_hi, \
fstats->s##_lo, qstats->s##_lo); \
SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
fstats->s##_lo, qstats_old->s##_lo); \
estats->s##_hi = fstats->s##_hi; \
estats->s##_lo = fstats->s##_lo; \
qstats_old->s##_hi = qstats->s##_hi; \
qstats_old->s##_lo = qstats->s##_lo; \
} while (0)
#define UPDATE_FW_STAT(s) \
do { \
estats->s = le32toh(tport->s) + fwstats->s; \
} while (0)
#define UPDATE_FW_STAT_OLD(f) \
do { \
fwstats->f = estats->f; \
} while (0)
#define UPDATE_ESTAT(s, t) \
do { \
SUB_64(estats->s##_hi, estats_old->t##_hi, \
estats->s##_lo, estats_old->t##_lo); \
ADD_64(estats->s##_hi, estats->t##_hi, \
estats->s##_lo, estats->t##_lo); \
estats_old->t##_hi = estats->t##_hi; \
estats_old->t##_lo = estats->t##_lo; \
} while (0)
/* minuend -= subtrahend */
#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
do { \
DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
} while (0)
/* minuend[hi:lo] -= subtrahend */
#define SUB_EXTEND_64(m_hi, m_lo, s) \
do { \
uint32_t s_hi = 0; \
SUB_64(m_hi, s_hi, m_lo, s); \
} while (0)
#define SUB_EXTEND_USTAT(s, t) \
do { \
diff = le32toh(uclient->s) - le32toh(old_uclient->s); \
SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
struct bnx2x_softc;
void bnx2x_stats_init(struct bnx2x_softc *sc);
void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event);
void bnx2x_save_statistics(struct bnx2x_softc *sc);
void bnx2x_memset_stats(struct bnx2x_softc *sc);
#endif /* BNX2X_STATS_H */

View File

@ -0,0 +1,597 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
* All rights reserved.
*/
#include "bnx2x.h"
/* calculate the crc in the bulletin board */
static inline uint32_t
bnx2x_vf_crc(struct bnx2x_vf_bulletin *bull)
{
uint32_t crc_sz = sizeof(bull->crc), length = bull->length - crc_sz;
return ECORE_CRC32_LE(0, (uint8_t *)bull + crc_sz, length);
}
/* Checks are there mac/channel updates for VF
* returns TRUE if something was updated
*/
int
bnx2x_check_bull(struct bnx2x_softc *sc)
{
struct bnx2x_vf_bulletin *bull;
uint8_t tries = 0;
uint16_t old_version = sc->old_bulletin.version;
uint64_t valid_bitmap;
bull = sc->pf2vf_bulletin;
if (old_version == bull->version) {
return FALSE;
} else {
/* Check the crc until we get the correct data */
while (tries < BNX2X_VF_BULLETIN_TRIES) {
bull = sc->pf2vf_bulletin;
if (bull->crc == bnx2x_vf_crc(bull))
break;
PMD_DRV_LOG(ERR, "bad crc on bulletin board. contained %x computed %x",
bull->crc, bnx2x_vf_crc(bull));
++tries;
}
if (tries == BNX2X_VF_BULLETIN_TRIES) {
PMD_DRV_LOG(ERR, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
tries);
return FALSE;
}
}
valid_bitmap = bull->valid_bitmap;
/* check the mac address and VLAN and allocate memory if valid */
if (valid_bitmap & (1 << MAC_ADDR_VALID) && memcmp(bull->mac, sc->old_bulletin.mac, ETH_ALEN))
rte_memcpy(&sc->link_params.mac_addr, bull->mac, ETH_ALEN);
if (valid_bitmap & (1 << VLAN_VALID))
rte_memcpy(&bull->vlan, &sc->old_bulletin.vlan, VLAN_HLEN);
sc->old_bulletin = *bull;
return TRUE;
}
/* add tlv to a buffer */
#define BNX2X_TLV_APPEND(_tlvs, _offset, _type, _length) \
((struct vf_first_tlv *)((uint64_t)_tlvs + _offset))->type = _type; \
((struct vf_first_tlv *)((uint64_t)_tlvs + _offset))->length = _length
/* Initiliaze header of the first tlv and clear mailbox*/
static void
bnx2x_init_first_tlv(struct bnx2x_softc *sc, struct vf_first_tlv *tlv,
uint16_t type, uint16_t len)
{
struct bnx2x_vf_mbx_msg *mbox = sc->vf2pf_mbox;
PMD_DRV_LOG(DEBUG, "Preparing %d tlv for sending", type);
memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
BNX2X_TLV_APPEND(tlv, 0, type, len);
/* Initialize header of the first tlv */
tlv->reply_offset = sizeof(mbox->query);
}
#define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START
#define BNX2X_VF_CMD_ADDR_HI BNX2X_VF_CMD_ADDR_LO + 4
#define BNX2X_VF_CMD_TRIGGER BNX2X_VF_CMD_ADDR_HI + 4
#define BNX2X_VF_CHANNEL_DELAY 100
#define BNX2X_VF_CHANNEL_TRIES 100
static int
bnx2x_do_req4pf(struct bnx2x_softc *sc, phys_addr_t phys_addr)
{
uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status;
uint8_t i;
if (!*status) {
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
PMD_DRV_LOG(ERR, "channel is down. Aborting message sending");
*status = BNX2X_VF_STATUS_SUCCESS;
return 0;
}
REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr));
REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr));
/* memory barrier to ensure that FW can read phys_addr */
wmb();
REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1);
/* Do several attempts until PF completes
* "." is used to show progress
*/
for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) {
DELAY_MS(BNX2X_VF_CHANNEL_DELAY);
if (*status)
break;
}
if (i == BNX2X_VF_CHANNEL_TRIES) {
PMD_DRV_LOG(ERR, "Response from PF timed out");
return -EAGAIN;
}
if (BNX2X_VF_STATUS_SUCCESS != *status) {
PMD_DRV_LOG(ERR, "Bad reply from PF : %u",
*status);
return -EINVAL;
}
} else {
PMD_DRV_LOG(ERR, "status should be zero before message"
"to pf was sent");
return -EINVAL;
}
PMD_DRV_LOG(DEBUG, "Response from PF was received");
return 0;
}
static inline uint16_t bnx2x_check_me_flags(uint32_t val)
{
if (((val) & ME_REG_VF_VALID) && (!((val) & ME_REG_VF_ERR)))
return ME_REG_VF_VALID;
else
return 0;
}
#define BNX2X_ME_ANSWER_DELAY 100
#define BNX2X_ME_ANSWER_TRIES 10
static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc)
{
uint32_t val;
uint8_t i = 0;
while (i <= BNX2X_ME_ANSWER_TRIES) {
val = BNX2X_DB_READ(DOORBELL_ADDR(sc, 0));
if (bnx2x_check_me_flags(val))
return VF_ID(val);
DELAY_MS(BNX2X_ME_ANSWER_DELAY);
i++;
}
return -EINVAL;
}
#define BNX2X_VF_OBTAIN_MAX_TRIES 3
#define BNX2X_VF_OBTAIN_MAC_FILTERS 1
#define BNX2X_VF_OBTAIN_MC_FILTERS 10
struct bnx2x_obtain_status {
int success;
int err_code;
};
static
struct bnx2x_obtain_status bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
{
int tries = 0;
struct vf_acquire_resp_tlv *resp = &sc->vf2pf_mbox->resp.acquire_resp,
*sc_resp = &sc->acquire_resp;
struct vf_resource_query *res_query;
struct vf_resc *resc;
struct bnx2x_obtain_status status;
int res_obtained = false;
do {
PMD_DRV_LOG(DEBUG, "trying to get resources");
if ( bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr) ) {
/* timeout */
status.success = 0;
status.err_code = 0;
return status;
}
memcpy(sc_resp, resp, sizeof(sc->acquire_resp));
tries++;
/* check PF to request acceptance */
if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) {
PMD_DRV_LOG(DEBUG, "resources obtained successfully");
res_obtained = true;
} else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES &&
tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
PMD_DRV_LOG(DEBUG,
"PF cannot allocate requested amount of resources");
res_query = &sc->vf2pf_mbox->query[0].acquire.res_query;
resc = &sc_resp->resc;
/* PF refused our request. Try to decrease request params */
res_query->num_txqs = min(res_query->num_txqs, resc->num_txqs);
res_query->num_rxqs = min(res_query->num_rxqs, resc->num_rxqs);
res_query->num_sbs = min(res_query->num_sbs, resc->num_sbs);
res_query->num_mac_filters = min(res_query->num_mac_filters, resc->num_mac_filters);
res_query->num_vlan_filters = min(res_query->num_vlan_filters, resc->num_vlan_filters);
res_query->num_mc_filters = min(res_query->num_mc_filters, resc->num_mc_filters);
memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs));
} else {
PMD_DRV_LOG(ERR, "Resources cannot be obtained. Status of handling: %d. Aborting",
sc_resp->status);
status.success = 0;
status.err_code = -EAGAIN;
return status;
}
} while (!res_obtained);
status.success = 1;
return status;
}
int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count)
{
struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire;
int vf_id;
struct bnx2x_obtain_status obtain_status;
bnx2x_vf_close(sc);
bnx2x_init_first_tlv(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq));
vf_id = bnx2x_read_vf_id(sc);
if (vf_id < 0)
return -EAGAIN;
acq->vf_id = vf_id;
acq->res_query.num_rxqs = rx_count;
acq->res_query.num_txqs = tx_count;
acq->res_query.num_sbs = sc->igu_sb_cnt;
acq->res_query.num_mac_filters = BNX2X_VF_OBTAIN_MAC_FILTERS;
acq->res_query.num_mc_filters = BNX2X_VF_OBTAIN_MC_FILTERS;
acq->bulletin_addr = sc->pf2vf_bulletin_mapping.paddr;
BNX2X_TLV_APPEND(acq, acq->first_tlv.length, BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* requesting the resources in loop */
obtain_status = bnx2x_loop_obtain_resources(sc);
if (!obtain_status.success)
return obtain_status.err_code;
struct vf_acquire_resp_tlv sc_resp = sc->acquire_resp;
sc->devinfo.chip_id |= (sc_resp.chip_num & 0xFFFF);
sc->devinfo.int_block = INT_BLOCK_IGU;
sc->devinfo.chip_port_mode = CHIP_2_PORT_MODE;
sc->devinfo.mf_info.mf_ov = 0;
sc->devinfo.mf_info.mf_mode = 0;
sc->devinfo.flash_size = 0;
sc->igu_sb_cnt = sc_resp.resc.num_sbs;
sc->igu_base_sb = sc_resp.resc.hw_sbs[0] & 0xFF;
sc->igu_dsb_id = -1;
sc->link_params.chip_id = sc->devinfo.chip_id;
sc->doorbell_size = sc_resp.db_size;
sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG;
PMD_DRV_LOG(DEBUG, "status block count = %d, base status block = %x",
sc->igu_sb_cnt, sc->igu_base_sb);
strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver));
if (is_valid_ether_addr(sc_resp.resc.current_mac_addr))
(void)rte_memcpy(sc->link_params.mac_addr,
sc_resp.resc.current_mac_addr,
ETH_ALEN);
return 0;
}
/* Ask PF to release VF's resources */
void
bnx2x_vf_close(struct bnx2x_softc *sc)
{
struct vf_release_tlv *query;
int vf_id = bnx2x_read_vf_id(sc);
int ret;
if (vf_id >= 0) {
query = &sc->vf2pf_mbox->query[0].release;
bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
sizeof(*query));
query->vf_id = vf_id;
BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ret = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to release VF");
}
}
}
/* Let PF know the VF status blocks phys_addrs */
int
bnx2x_vf_init(struct bnx2x_softc *sc)
{
struct vf_init_tlv *query;
int i, ret;
query = &sc->vf2pf_mbox->query[0].init;
bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_INIT,
sizeof(*query));
FOR_EACH_QUEUE(sc, i) {
query->sb_addr[i] = (unsigned long)(sc->fp[i].sb_dma.paddr);
}
query->stats_step = sizeof(struct per_queue_stats);
query->stats_addr = sc->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ret = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to init VF");
return ret;
}
PMD_DRV_LOG(DEBUG, "VF was initialized");
return 0;
}
void
bnx2x_vf_unload(struct bnx2x_softc *sc)
{
struct vf_close_tlv *query;
struct vf_q_op_tlv *query_op;
int i, vf_id, ret;
vf_id = bnx2x_read_vf_id(sc);
if (vf_id > 0) {
FOR_EACH_QUEUE(sc, i) {
query_op = &sc->vf2pf_mbox->query[0].q_op;
bnx2x_init_first_tlv(sc, &query_op->first_tlv,
BNX2X_VF_TLV_TEARDOWN_Q,
sizeof(*query_op));
query_op->vf_qid = i;
BNX2X_TLV_APPEND(query_op, query_op->first_tlv.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ret = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (ret)
PMD_DRV_LOG(ERR,
"Bad reply for vf_q %d teardown", i);
}
bnx2x_vf_set_mac(sc, false);
query = &sc->vf2pf_mbox->query[0].close;
bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
sizeof(*query));
query->vf_id = vf_id;
BNX2X_TLV_APPEND(query, query->first_tlv.length,
BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ret = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (ret)
PMD_DRV_LOG(ERR,
"Bad reply from PF for close message");
}
}
static inline uint16_t
bnx2x_vf_q_flags(uint8_t leading)
{
uint16_t flags = leading ? BNX2X_VF_Q_FLAG_LEADING_RSS : 0;
flags |= BNX2X_VF_Q_FLAG_CACHE_ALIGN;
flags |= BNX2X_VF_Q_FLAG_STATS;
flags |= BNX2X_VF_Q_FLAG_VLAN;
return flags;
}
static void
bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
struct vf_rxq_params *rxq_init, uint16_t flags)
{
struct bnx2x_rx_queue *rxq;
rxq = sc->rx_queues[fp->index];
if (!rxq) {
PMD_DRV_LOG(ERR, "RX queue %d is NULL", fp->index);
return;
}
rxq_init->rcq_addr = rxq->cq_ring_phys_addr;
rxq_init->rcq_np_addr = rxq->cq_ring_phys_addr + BNX2X_PAGE_SIZE;
rxq_init->rxq_addr = rxq->rx_ring_phys_addr;
rxq_init->vf_sb_id = fp->index;
rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
rxq_init->mtu = sc->mtu;
rxq_init->buf_sz = fp->rx_buf_size;
rxq_init->flags = flags;
rxq_init->stat_id = -1;
rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
}
static void
bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
struct vf_txq_params *txq_init, uint16_t flags)
{
struct bnx2x_tx_queue *txq;
txq = sc->tx_queues[fp->index];
if (!txq) {
PMD_DRV_LOG(ERR, "TX queue %d is NULL", fp->index);
return;
}
txq_init->txq_addr = txq->tx_ring_phys_addr;
txq_init->sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
txq_init->flags = flags;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->vf_sb_id = fp->index;
}
int
bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int leading)
{
struct vf_setup_q_tlv *query;
uint16_t flags = bnx2x_vf_q_flags(leading);
int ret;
query = &sc->vf2pf_mbox->query[0].setup_q;
bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q,
sizeof(*query));
query->vf_qid = fp->index;
query->param_valid = VF_RXQ_VALID | VF_TXQ_VALID;
bnx2x_vf_rx_q_prep(sc, fp, &query->rxq, flags);
bnx2x_vf_tx_q_prep(sc, fp, &query->txq, flags);
BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ret = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to setup VF queue[%d]",
fp->index);
return -EINVAL;
}
return 0;
}
int
bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
{
struct vf_set_q_filters_tlv *query;
struct vf_common_reply_tlv *reply;
query = &sc->vf2pf_mbox->query[0].set_q_filters;
bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
sizeof(*query));
query->vf_qid = sc->fp->index;
query->mac_filters_cnt = 1;
query->flags = BNX2X_VF_MAC_VLAN_CHANGED;
query->filters[0].flags = (set ? BNX2X_VF_Q_FILTER_SET_MAC : 0) |
BNX2X_VF_Q_FILTER_DEST_MAC_VALID;
bnx2x_check_bull(sc);
rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
reply = &sc->vf2pf_mbox->resp.common_reply;
while (BNX2X_VF_STATUS_FAILURE == reply->status &&
bnx2x_check_bull(sc)) {
/* A new mac was configured by PF for us */
rte_memcpy(sc->link_params.mac_addr, sc->pf2vf_bulletin->mac,
ETH_ALEN);
rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
ETH_ALEN);
bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
}
if (BNX2X_VF_STATUS_SUCCESS != reply->status) {
PMD_DRV_LOG(ERR, "Bad reply from PF for SET MAC message: %d",
reply->status);
return -EINVAL;
}
return 0;
}
int
bnx2x_vf_config_rss(struct bnx2x_softc *sc,
struct ecore_config_rss_params *params)
{
struct vf_rss_tlv *query;
int ret;
query = &sc->vf2pf_mbox->query[0].update_rss;
bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS,
sizeof(*query));
/* add list termination tlv */
BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
query->rss_key_size = T_ETH_RSS_KEY;
rte_memcpy(query->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
query->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
query->rss_result_mask = params->rss_result_mask;
query->rss_flags = params->rss_flags;
ret = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to send message to PF, rc %d", ret);
return ret;
}
return 0;
}
int
bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
{
struct vf_set_q_filters_tlv *query;
unsigned long tx_mask;
int ret;
query = &sc->vf2pf_mbox->query[0].set_q_filters;
bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
sizeof(*query));
query->vf_qid = 0;
query->flags = BNX2X_VF_RX_MASK_CHANGED;
if (bnx2x_fill_accept_flags(sc, sc->rx_mode, &query->rx_mask, &tx_mask)) {
return -EINVAL;
}
BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ret = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (ret) {
PMD_DRV_LOG(ERR, "Failed to send message to PF, rc %d", ret);
return ret;
}
return 0;
}

View File

@ -0,0 +1,315 @@
/*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
*
* All rights reserved.
*/
#ifndef BNX2X_VFPF_H
#define BNX2X_VFPF_H
#include "ecore_sp.h"
#define VLAN_HLEN 4
struct vf_resource_query {
uint8_t num_rxqs;
uint8_t num_txqs;
uint8_t num_sbs;
uint8_t num_mac_filters;
uint8_t num_vlan_filters;
uint8_t num_mc_filters;
};
#define BNX2X_VF_STATUS_SUCCESS 1
#define BNX2X_VF_STATUS_FAILURE 2
#define BNX2X_VF_STATUS_NO_RESOURCES 4
#define BNX2X_VF_BULLETIN_TRIES 5
#define BNX2X_VF_Q_FLAG_CACHE_ALIGN 0x0008
#define BNX2X_VF_Q_FLAG_STATS 0x0010
#define BNX2X_VF_Q_FLAG_OV 0x0020
#define BNX2X_VF_Q_FLAG_VLAN 0x0040
#define BNX2X_VF_Q_FLAG_COS 0x0080
#define BNX2X_VF_Q_FLAG_HC 0x0100
#define BNX2X_VF_Q_FLAG_DHC 0x0200
#define BNX2X_VF_Q_FLAG_LEADING_RSS 0x0400
struct vf_first_tlv {
uint16_t type;
uint16_t length;
uint32_t reply_offset;
};
/* tlv struct for all PF replies except acquire */
struct vf_common_reply_tlv {
uint16_t type;
uint16_t length;
uint8_t status;
uint8_t pad[3];
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
uint16_t type;
uint16_t length;
uint32_t pad;
};
/* Acquire */
struct vf_acquire_tlv {
struct vf_first_tlv first_tlv;
uint8_t vf_id;
uint8_t pad[3];
struct vf_resource_query res_query;
uint64_t bulletin_addr;
};
/* simple operation request on queue */
struct vf_q_op_tlv {
struct vf_first_tlv first_tlv;
uint8_t vf_qid;
uint8_t pad[3];
};
/* receive side scaling tlv */
struct vf_rss_tlv {
struct vf_first_tlv first_tlv;
uint32_t rss_flags;
uint8_t rss_result_mask;
uint8_t ind_table_size;
uint8_t rss_key_size;
uint8_t pad;
uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
uint32_t rss_key[T_ETH_RSS_KEY]; /* hash values */
};
struct vf_resc {
#define BNX2X_VF_MAX_QUEUES_PER_VF 16
#define BNX2X_VF_MAX_SBS_PER_VF 16
uint16_t hw_sbs[BNX2X_VF_MAX_SBS_PER_VF];
uint8_t hw_qid[BNX2X_VF_MAX_QUEUES_PER_VF];
uint8_t num_rxqs;
uint8_t num_txqs;
uint8_t num_sbs;
uint8_t num_mac_filters;
uint8_t num_vlan_filters;
uint8_t num_mc_filters;
uint8_t permanent_mac_addr[ETH_ALEN];
uint8_t current_mac_addr[ETH_ALEN];
uint16_t pf_link_speed;
uint32_t pf_link_supported;
};
/* tlv struct holding reply for acquire */
struct vf_acquire_resp_tlv {
uint16_t type;
uint16_t length;
uint8_t status;
uint8_t pad1[3];
uint32_t chip_num;
uint8_t pad2[4];
char fw_ver[32];
uint16_t db_size;
uint8_t pad3[2];
struct vf_resc resc;
};
/* Init VF */
struct vf_init_tlv {
struct vf_first_tlv first_tlv;
uint64_t sb_addr[BNX2X_VF_MAX_SBS_PER_VF];
uint64_t spq_addr;
uint64_t stats_addr;
uint16_t stats_step;
uint32_t flags;
uint32_t pad[2];
};
struct vf_rxq_params {
/* physical addresses */
uint64_t rcq_addr;
uint64_t rcq_np_addr;
uint64_t rxq_addr;
uint64_t pad1;
/* sb + hc info */
uint8_t vf_sb_id;
uint8_t sb_cq_index;
uint16_t hc_rate; /* desired interrupts per sec. */
/* rx buffer info */
uint16_t mtu;
uint16_t buf_sz;
uint16_t flags; /* for BNX2X_VF_Q_FLAG_X flags */
uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */
uint8_t pad2[5];
uint8_t drop_flags;
uint8_t cache_line_log; /* BNX2X_VF_Q_FLAG_CACHE_ALIGN */
uint8_t pad3;
};
struct vf_txq_params {
/* physical addresses */
uint64_t txq_addr;
/* sb + hc info */
uint8_t vf_sb_id; /* index in hw_sbs[] */
uint8_t sb_index; /* Index in the SB */
uint16_t hc_rate; /* desired interrupts per sec. */
uint32_t flags; /* for BNX2X_VF_Q_FLAG_X flags */
uint16_t stat_id; /* valid if BNX2X_VF_Q_FLAG_STATS */
uint8_t traffic_type; /* see in setup_context() */
uint8_t pad;
};
/* Setup Queue */
struct vf_setup_q_tlv {
struct vf_first_tlv first_tlv;
struct vf_rxq_params rxq;
struct vf_txq_params txq;
uint8_t vf_qid; /* index in hw_qid[] */
uint8_t param_valid;
#define VF_RXQ_VALID 0x01
#define VF_TXQ_VALID 0x02
uint8_t pad[2];
};
/* Set Queue Filters */
struct vf_q_mac_vlan_filter {
uint32_t flags;
#define BNX2X_VF_Q_FILTER_DEST_MAC_VALID 0x01
#define BNX2X_VF_Q_FILTER_VLAN_TAG_VALID 0x02
#define BNX2X_VF_Q_FILTER_SET_MAC 0x100 /* set/clear */
uint8_t mac[ETH_ALEN];
uint16_t vlan_tag;
};
#define _UP_ETH_ALEN (6)
/* configure queue filters */
struct vf_set_q_filters_tlv {
struct vf_first_tlv first_tlv;
uint32_t flags;
#define BNX2X_VF_MAC_VLAN_CHANGED 0x01
#define BNX2X_VF_MULTICAST_CHANGED 0x02
#define BNX2X_VF_RX_MASK_CHANGED 0x04
uint8_t vf_qid; /* index in hw_qid[] */
uint8_t mac_filters_cnt;
uint8_t multicast_cnt;
uint8_t pad;
#define VF_MAX_MAC_FILTERS 16
#define VF_MAX_VLAN_FILTERS 16
#define VF_MAX_FILTERS (VF_MAX_MAC_FILTERS +\
VF_MAX_VLAN_FILTERS)
struct vf_q_mac_vlan_filter filters[VF_MAX_FILTERS];
#define VF_MAX_MULTICAST_PER_VF 32
uint8_t multicast[VF_MAX_MULTICAST_PER_VF][_UP_ETH_ALEN];
unsigned long rx_mask;
};
/* close VF (disable VF) */
struct vf_close_tlv {
struct vf_first_tlv first_tlv;
uint16_t vf_id; /* for debug */
uint8_t pad[2];
};
/* rlease the VF's acquired resources */
struct vf_release_tlv {
struct vf_first_tlv first_tlv;
uint16_t vf_id; /* for debug */
uint8_t pad[2];
};
union query_tlvs {
struct vf_first_tlv first_tlv;
struct vf_acquire_tlv acquire;
struct vf_init_tlv init;
struct vf_close_tlv close;
struct vf_q_op_tlv q_op;
struct vf_setup_q_tlv setup_q;
struct vf_set_q_filters_tlv set_q_filters;
struct vf_release_tlv release;
struct vf_rss_tlv update_rss;
struct channel_list_end_tlv list_end;
};
union resp_tlvs {
struct vf_common_reply_tlv common_reply;
struct vf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
};
/* struct allocated by VF driver, PF sends updates to VF via bulletin */
struct bnx2x_vf_bulletin {
uint32_t crc; /* crc of structure to ensure is not in
* mid-update
*/
uint16_t version;
uint16_t length;
uint64_t valid_bitmap; /* bitmap indicating wich fields
* hold valid values
*/
#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
* is available for it
*/
#define VLAN_VALID 1 /* when set, the vf should no access the
* vf channel
*/
#define CHANNEL_DOWN 2 /* vf channel is disabled. VFs are not
* to attempt to send messages on the
* channel after this bit is set
*/
uint8_t mac[ETH_ALEN];
uint8_t mac_pad[2];
uint16_t vlan;
uint8_t vlan_pad[6];
};
#define MAX_TLVS_IN_LIST 50
enum channel_tlvs {
BNX2X_VF_TLV_NONE, /* ends tlv sequence */
BNX2X_VF_TLV_ACQUIRE,
BNX2X_VF_TLV_INIT,
BNX2X_VF_TLV_SETUP_Q,
BNX2X_VF_TLV_SET_Q_FILTERS,
BNX2X_VF_TLV_ACTIVATE_Q,
BNX2X_VF_TLV_DEACTIVATE_Q,
BNX2X_VF_TLV_TEARDOWN_Q,
BNX2X_VF_TLV_CLOSE,
BNX2X_VF_TLV_RELEASE,
BNX2X_VF_TLV_UPDATE_RSS_OLD,
BNX2X_VF_TLV_PF_RELEASE_VF,
BNX2X_VF_TLV_LIST_END,
BNX2X_VF_TLV_FLR,
BNX2X_VF_TLV_PF_SET_MAC,
BNX2X_VF_TLV_PF_SET_VLAN,
BNX2X_VF_TLV_UPDATE_RSS,
BNX2X_VF_TLV_MAX
};
struct bnx2x_vf_mbx_msg {
union query_tlvs query[BNX2X_VF_MAX_QUEUES_PER_VF];
union resp_tlvs resp;
};
void bnx2x_add_tlv(void *tlvs_list, uint16_t offset, uint16_t type, uint16_t length);
int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set);
int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params);
#endif /* BNX2X_VFPF_H */