net: Remove IXGBE driver, we're going with DPDK instead!

This commit is contained in:
Adam Belay 2017-11-09 13:48:02 -05:00
parent 5ac5fc0daa
commit edb5fdbe4c
35 changed files with 1 additions and 27866 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,70 +0,0 @@
/*
* ethqueue.h - ethernet queue support
*/
#pragma once
#include <net/mbuf.h>
/*
* Recieve Queue API
*/
struct eth_rx_queue {
int (*recv) (struct eth_rx_queue *rx, int nr, struct mbuf **pkts);
};
/**
* eth_rx_recv - receive packets on an RX queue
* @rx: the RX queue
* @nr: the length of the array, determining the max batch size
* @pkts: an array to store received packets
*
* Returns the number of packets received, a value less than or equal to @nr.
* A negative return value indicates an error occurred.
*/
static inline int
eth_rx_recv(struct eth_rx_queue *rx, int nr, struct mbuf **pkts)
{
return rx->recv(rx, nr, pkts);
}
/*
* Transmit Queue API
*/
struct eth_tx_queue {
int (*reclaim) (struct eth_tx_queue *tx);
int (*xmit) (struct eth_tx_queue *tx, int nr, struct mbuf **pkts);
};
/**
* eth_tx_reclaim - scans the queue and reclaims finished buffers
* @tx: the TX queue
*
* NOTE: scatter-gather mbuf's can span multiple descriptors, so
* take that into account when interpreting the count provided by
* this function.
*
* Returns an available descriptor count.
*/
static inline int eth_tx_reclaim(struct eth_tx_queue *tx)
{
return tx->reclaim(tx);
}
/**
* eth_tx_xmit - transmits packets on a TX queue
* @tx: the TX queue
* @nr: the number of mbufs to transmit
* @mbufs: an array of mbufs to tranmsit
*
* Returns the number of mbuf's transmitted.
*/
static inline int
eth_tx_xmit(struct eth_tx_queue *tx, int nr, struct mbuf **mbufs)
{
return tx->xmit(tx, nr, mbufs);
}

View File

@ -1,381 +0,0 @@
/*
* ethdev.c - ethernet device support
*/
#include <string.h>
#include <base/stddef.h>
#include <base/log.h>
#include <base/lock.h>
#include <base/pci.h>
#include <net/ethernet.h>
#include <net/ethdev.h>
/* TODO: more fine-grained locking */
static DEFINE_SPINLOCK(eth_dev_lock);
/**
* eth_dev_get_hw_mac - retreives the default MAC address
* @dev: the ethernet device
* @mac_addr: pointer to store the mac
*/
void eth_dev_get_hw_mac(struct rte_eth_dev *dev, struct eth_addr *mac_addr)
{
memcpy(&mac_addr->addr[0], &dev->data->mac_addrs[0], ETH_ADDR_LEN);
}
/**
* eth_dev_set_hw_mac - sets the default MAC address
* @dev: the ethernet device
* @mac_addr: pointer of mac
*/
void eth_dev_set_hw_mac(struct rte_eth_dev *dev, struct eth_addr *mac_addr)
{
dev->dev_ops->mac_addr_add(dev, mac_addr, 0, 0);
}
static int eth_dev_prepare_start(struct rte_eth_dev *dev)
{
int ret;
struct rte_eth_dev_info dev_info;
dev->dev_ops->dev_infos_get(dev, &dev_info);
dev->data->nb_rx_queues = 0;
dev->data->nb_tx_queues = 0;
dev->data->max_rx_queues =
min(dev_info.max_rx_queues, ETH_RSS_RETA_MAX_QUEUE);
dev->data->max_tx_queues = dev_info.max_tx_queues;
dev->data->rx_queues = malloc(sizeof(struct eth_rx_queue *) *
dev->data->max_rx_queues);
if (!dev->data->rx_queues)
return -ENOMEM;
dev->data->tx_queues = malloc(sizeof(struct eth_tx_queue *) *
dev->data->max_tx_queues);
if (!dev->data->tx_queues) {
ret = -ENOMEM;
goto err_tx_queues;
}
return 0;
err_tx_queues:
free(dev->data->rx_queues);
return ret;
}
/**
* eth_dev_start - starts an ethernet device
* @dev: the ethernet device
*
* Returns 0 if successful, otherwise failure.
*/
int eth_dev_start(struct rte_eth_dev *dev)
{
int ret;
struct eth_addr macaddr;
struct rte_eth_link link;
ret = eth_dev_prepare_start(dev);
if (ret)
return ret;
ret = dev->dev_ops->dev_start(dev);
if (ret)
return ret;
dev->dev_ops->promiscuous_disable(dev);
dev->dev_ops->allmulticast_enable(dev);
eth_dev_get_hw_mac(dev, &macaddr);
log_info("eth: started an ethernet device\n");
log_info("eth:\tMAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
macaddr.addr[0], macaddr.addr[1],
macaddr.addr[2], macaddr.addr[3],
macaddr.addr[4], macaddr.addr[5]);
dev->dev_ops->link_update(dev, 1);
link = dev->data->dev_link;
if (!link.link_status) {
log_warn("eth:\tlink appears to be down, check connection.\n");
} else {
log_info("eth:\tlink up - speed %u Mbps, %s\n",
(uint32_t)link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
}
return 0;
}
/**
* eth_dev_stop - stops an ethernet device
* @dev: the ethernet device
*/
void eth_dev_stop(struct rte_eth_dev *dev)
{
int i;
dev->dev_ops->dev_stop(dev);
for (i = 0; i < dev->data->nb_tx_queues; i++)
dev->dev_ops->tx_queue_release(dev->data->tx_queues[i]);
for (i = 0; i < dev->data->nb_rx_queues; i++)
dev->dev_ops->rx_queue_release(dev->data->rx_queues[i]);
dev->data->nb_rx_queues = 0;
dev->data->nb_tx_queues = 0;
free(dev->data->tx_queues);
free(dev->data->rx_queues);
}
/**
* eth_dev_get_rx_queue - get the next available rx queue
* @dev: the ethernet device
* @rx_queue: pointer to store a pointer to struct eth_rx_queue
* @nb_desc: the number of descriptor ring entries (must be power of 2)
* @a: the packet allocator to use for received buffers
*
* Returns 0 if successful, otherwise failure.
*/
int eth_dev_get_rx_queue(struct rte_eth_dev *dev,
struct eth_rx_queue **rx_queue, uint16_t nb_desc,
struct mbuf_allocator *a)
{
int rx_idx, ret;
if (!is_power_of_two(nb_desc))
return -EINVAL;
spin_lock(&eth_dev_lock);
rx_idx = dev->data->nb_rx_queues;
if (rx_idx >= dev->data->max_rx_queues) {
spin_unlock(&eth_dev_lock);
return -EMFILE;
}
ret = dev->dev_ops->rx_queue_setup(dev, rx_idx, -1, nb_desc, a);
if (ret) {
spin_unlock(&eth_dev_lock);
return ret;
}
dev->data->nb_rx_queues++;
spin_unlock(&eth_dev_lock);
*rx_queue = dev->data->rx_queues[rx_idx];
return 0;
}
/**
* eth_dev_get_tx_queue - get the next available tx queue
* @dev: the ethernet device
* @tx_queue: pointer to store a pointer to struct eth_tx_queue
* @nb_desc: the number of descriptor ring entries (must be power of 2)
*
* Returns 0 if successful, otherwise failure.
*/
int eth_dev_get_tx_queue(struct rte_eth_dev *dev,
struct eth_tx_queue **tx_queue, uint16_t nb_desc)
{
int tx_idx, ret;
if (!is_power_of_two(nb_desc))
return -EINVAL;
spin_lock(&eth_dev_lock);
tx_idx = dev->data->nb_tx_queues;
if (tx_idx > dev->data->max_tx_queues) {
spin_unlock(&eth_dev_lock);
return -EMFILE;
}
ret = dev->dev_ops->tx_queue_setup(dev, tx_idx, -1, nb_desc);
if (ret) {
spin_unlock(&eth_dev_lock);
return ret;
}
dev->data->nb_tx_queues++;
spin_unlock(&eth_dev_lock);
*tx_queue = dev->data->tx_queues[tx_idx];
return 0;
}
/**
* eth_dev_alloc - allocates an ethernet device
* @private_len: the size of the private area
*
* Returns an ethernet device, or NULL if failure.
*/
struct rte_eth_dev *eth_dev_alloc(size_t private_len)
{
struct rte_eth_dev *dev;
dev = malloc(sizeof(struct rte_eth_dev));
if (!dev)
return NULL;
dev->pci_dev = NULL;
dev->dev_ops = NULL;
dev->data = malloc(sizeof(struct rte_eth_dev_data));
if (!dev->data) {
free(dev);
return NULL;
}
memset(dev->data, 0, sizeof(struct rte_eth_dev_data));
dev->data->dev_private = malloc(private_len);
if (!dev->data->dev_private) {
free(dev->data);
free(dev);
return NULL;
}
memset(dev->data->dev_private, 0, private_len);
return dev;
}
/**
* eth_dev_destroy - frees an ethernet device
* @dev: the ethernet device
*/
void eth_dev_destroy(struct rte_eth_dev *dev)
{
if (dev->dev_ops && dev->dev_ops->dev_close)
dev->dev_ops->dev_close(dev);
free(dev->data->dev_private);
free(dev->data);
free(dev);
}
/**
* eth_dev_configure - sets the device configuration
* @dev: the ethernet device to configure
* @conf: the configuration parameters to set
*
* Returns 0 if successful, otherwise fail.
*/
int eth_dev_configure(struct rte_eth_dev *dev, const struct rte_eth_conf *conf)
{
dev->data->dev_conf = *conf;
if (dev->dev_ops && dev->dev_ops->dev_configure)
return dev->dev_ops->dev_configure(dev);
return 0;
}
/*
* Intel 82599 Support (IXGBE)
* TODO: restructure these helpers to work with other ethernet chipsets.
* TODO: support more advanced configuration options.
*/
extern int ixgbe_init(struct pci_dev *pci_dev,
struct rte_eth_dev **ethp);
static uint8_t rss_key[40];
static const struct rte_eth_conf ixgbe_simple_conf = {
.rxmode = {
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
.hw_vlan_filter = 0, /**< VLAN filtering disabled */
.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
.hw_strip_crc = 1, /**< CRC stripped by hardware */
.mq_mode = ETH_MQ_RX_NONE, /*ETH_MQ_RX_RSS,*/
},
.rx_adv_conf = {
.rss_conf = {
.rss_hf = ETH_RSS_IPV4_TCP | ETH_RSS_IPV4_UDP,
.rss_key = rss_key,
},
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
},
};
/**
* ixgbe_create_simple - a simple helper to create an IXGBE ethernet device
* @addr: the PCI address of the device
* @ethp: a pointer to store the created ethernet device
* @rxp: a pointer to store the created RX queue
* @txp: a pointer to store the created TX queue
* @rx_desc_nr: the number of RX descriptor ring entries (must be a power of 2)
* @tx_desc_nr: the nubmer of TX descriptor ring entries (must be a power of 2)
* @a: the RX mbuf allocator to use
*
* This helper creates an ethernet device with a single RX and TX queue, the
* default hardware MAC, and checksum offloads enabled.
*
* Returns 0 if successful, otherwise fail.
*/
int ixgbe_create_simple(const struct pci_addr *addr, struct rte_eth_dev **ethp,
struct eth_rx_queue **rxp, struct eth_tx_queue **txp,
uint16_t rx_desc_nr, uint16_t tx_desc_nr,
struct mbuf_allocator *a)
{
struct pci_dev *dev;
struct rte_eth_dev *eth;
int ret;
dev = pci_alloc_dev(addr);
if (!dev)
return -ENOMEM;
ret = ixgbe_init(dev, &eth);
if (!dev) {
ret = -ENOMEM;
goto fail_release_pci;
}
ret = eth_dev_configure(eth, &ixgbe_simple_conf);
if (ret)
goto fail_release_eth;
ret = eth_dev_get_rx_queue(eth, rxp, rx_desc_nr, a);
if (ret)
goto fail_release_eth;
ret = eth_dev_get_tx_queue(eth, txp, tx_desc_nr);
if (ret)
goto fail_release_eth;
ret = eth_dev_start(eth);
if (ret)
goto fail_release_eth;
*ethp = eth;
return 0;
fail_release_eth:
eth_dev_destroy(eth);
fail_release_pci:
pci_dev_put(dev);
return ret;
}
/**
* ixgbe_destroy - tearsdown and frees an IXGBE ethernet device
* @dev: the ethernet device to destroy
*/
void ixgbe_destroy(struct rte_eth_dev *dev)
{
eth_dev_stop(dev);
pci_dev_put(dev->pci_dev);
eth_dev_destroy(dev);
}

View File

@ -1,66 +0,0 @@
..
BSD LICENSE
Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
cid-10g-shared-code.2012.11.09 released by LAD. The sub-directory of lad/
contains the original source package.
This driver is valid for the product(s) listed below
* Intel® 10 Gigabit AF DA Dual Port Server Adapter
* Intel® 10 Gigabit AT Server Adapter
* Intel® 10 Gigabit AT2 Server Adapter
* Intel® 10 Gigabit CX4 Dual Port Server Adapter
* Intel® 10 Gigabit XF LR Server Adapter
* Intel® 10 Gigabit XF SR Dual Port Server Adapter
* Intel® 10 Gigabit XF SR Server Adapter
* Intel® 82598 10 Gigabit Ethernet Controller
* Intel® 82599 10 Gigabit Ethernet Controller
* Intel® Ethernet Controller X540-AT2
* Intel® Ethernet Server Adapter X520 Series
* Intel® Ethernet Server Adapter X520-T2
Updating driver
===============
The following modifications have been made to this code to integrate it with the
Intel® DPDK:
ixgbe_osdep.h
-------------
The OS dependency layer has been extensively modified to support the drivers in
the Intel® DPDK environment. It is expected that these files will not need to be
changed on updating the driver.

File diff suppressed because it is too large Load Diff

View File

@ -1,52 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_82598_H_
#define _IXGBE_82598_H_
#ident "$Id: ixgbe_82598.h,v 1.3 2012/03/27 22:16:51 jtkirshe Exp $"
u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
#endif /* _IXGBE_82598_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_82599_H_
#define _IXGBE_82599_H_
#ident "$Id: ixgbe_82599.h,v 1.7 2012/10/03 07:10:29 jtkirshe Exp $"
s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
#endif /* _IXGBE_82599_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,177 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_API_H_
#define _IXGBE_API_H_
#include "ixgbe_type.h"
#ident "$Id: ixgbe_api.h,v 1.115 2012/08/23 23:30:15 jtkirshe Exp $"
s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
s32 ixgbe_start_hw(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 *phy_data);
s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
u16 phy_data);
s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *autoneg);
s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
u32 addr_count, ixgbe_mc_addr_itr func);
s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr func,
bool clear);
void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
u8 ver);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask);
s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id, u8 queue);
s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
u16 soft_id);
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input,
union ixgbe_atr_input *mask,
u16 soft_id,
u8 queue);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common);
bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 *data);
s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
#endif /* _IXGBE_API_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,155 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_COMMON_H_
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
#ident "$Id: ixgbe_common.h,v 1.133 2012/11/05 23:08:30 jtkirshe Exp $"
#define IXGBE_WRITE_REG64(hw, reg, value) \
do { \
IXGBE_WRITE_REG(hw, reg, (u32) value); \
IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
} while (0)
struct ixgbe_pba {
u16 word[2];
u16 *pba_block;
};
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 max_pba_block_size,
struct ixgbe_pba *pba);
s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, struct ixgbe_pba *pba);
s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
u32 eeprom_buf_size, u16 *pba_block_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
ixgbe_mc_addr_itr func, bool clear);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
u32 addr_count, ixgbe_mc_addr_itr func);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on);
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool *vfta_changed);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
int strategy);
void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#endif /* IXGBE_COMMON */

View File

@ -1,695 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
#include "ixgbe_dcb_82599.h"
#ident "$Id: ixgbe_dcb.c,v 1.52 2012/05/24 02:09:56 jtkirshe Exp $"
/**
* ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
* credits from the configured bandwidth percentages. Credits
* are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
*/
s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
int max_frame_size)
{
int min_percent = 100;
int min_credit, multiplier;
int i;
min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
IXGBE_DCB_CREDIT_QUANTUM;
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
if (bw[i] < min_percent && bw[i])
min_percent = bw[i];
}
multiplier = (min_credit / min_percent) + 1;
/* Find out the hw credits for each TC */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
if (val < min_credit)
val = min_credit;
refill[i] = (u16)val;
max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
}
return 0;
}
/**
* ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
* @ixgbe_dcb_config: Struct containing DCB settings.
* @direction: Configuring either Tx or Rx.
*
* This function calculates the credits allocated to each traffic class.
* It should be called only after the rules are checked by
* ixgbe_dcb_check_config_cee().
*/
s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config,
u32 max_frame_size, u8 direction)
{
struct ixgbe_dcb_tc_path *p;
u32 min_multiplier = 0;
u16 min_percent = 100;
s32 ret_val = IXGBE_SUCCESS;
/* Initialization values default for Tx settings */
u32 min_credit = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u16 link_percentage = 0;
u8 bw_percent = 0;
u8 i;
if (dcb_config == NULL) {
ret_val = IXGBE_ERR_CONFIG;
goto out;
}
min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
IXGBE_DCB_CREDIT_QUANTUM;
/* Find smallest link percentage */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
link_percentage = p->bwg_percent;
link_percentage = (link_percentage * bw_percent) / 100;
if (link_percentage && link_percentage < min_percent)
min_percent = link_percentage;
}
/*
* The ratio between traffic classes will control the bandwidth
* percentages seen on the wire. To calculate this ratio we use
* a multiplier. It is required that the refill credits must be
* larger than the max frame size so here we find the smallest
* multiplier that will allow all bandwidth percentages to be
* greater than the max frame size.
*/
min_multiplier = (min_credit / min_percent) + 1;
/* Find out the link percentage for each TC first */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
link_percentage = p->bwg_percent;
/* Must be careful of integer division for very small nums */
link_percentage = (link_percentage * bw_percent) / 100;
if (p->bwg_percent > 0 && link_percentage == 0)
link_percentage = 1;
/* Save link_percentage for reference */
p->link_percent = (u8)link_percentage;
/* Calculate credit refill ratio using multiplier */
credit_refill = min(link_percentage * min_multiplier,
(u32)IXGBE_DCB_MAX_CREDIT_REFILL);
p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */
credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100;
/*
* Adjustment based on rule checking, if the percentage
* of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration.
*/
if (credit_max && (credit_max < min_credit))
credit_max = min_credit;
if (direction == IXGBE_DCB_TX_CONFIG) {
/*
* Adjustment based on rule checking, if the
* percentage of a TC is too small, the maximum
* credit may not be enough to send out a TSO
* packet in descriptor plane arbitration.
*/
if (credit_max && (credit_max <
IXGBE_DCB_MIN_TSO_CREDIT)
&& (hw->mac.type == ixgbe_mac_82598EB))
credit_max = IXGBE_DCB_MIN_TSO_CREDIT;
dcb_config->tc_config[i].desc_credits_max =
(u16)credit_max;
}
p->data_credits_max = (u16)credit_max;
}
out:
return ret_val;
}
/**
* ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
* @cfg: dcb configuration to unpack into hardware consumable fields
* @map: user priority to traffic class map
* @pfc_up: u8 to store user priority PFC bitmask
*
* This unpacks the dcb configuration PFC info which is stored per
* traffic class into a 8bit user priority bitmask that can be
* consumed by hardware routines. The priority to tc map must be
* updated before calling this routine to use current up-to maps.
*/
void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
{
struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int up;
/*
* If the TC for this user priority has PFC enabled then set the
* matching bit in 'pfc_up' to reflect that PFC is enabled.
*/
for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) {
if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled)
*pfc_up |= 1 << up;
}
}
void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction,
u16 *refill)
{
struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
refill[tc] = tc_config[tc].path[direction].data_credits_refill;
}
void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max)
{
struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
max[tc] = tc_config[tc].desc_credits_max;
}
void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction,
u8 *bwgid)
{
struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
bwgid[tc] = tc_config[tc].path[direction].bwg_id;
}
void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction,
u8 *tsa)
{
struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
int tc;
for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
tsa[tc] = tc_config[tc].path[direction].tsa;
}
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{
struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
u8 prio_mask = 1 << up;
u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */
if (!tc)
goto out;
/*
* Test from maximum TC to 1 and report the first match we find. If
* we find no match we can assume that the TC is 0 since the TC must
* be set for all user priorities
*/
for (tc--; tc; tc--) {
if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
break;
}
out:
return tc;
}
void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
u8 *map)
{
u8 up;
for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++)
map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
}
/**
* ixgbe_dcb_config - Struct containing DCB settings.
* @dcb_config: Pointer to DCB config structure
*
* This function checks DCB rules for DCB settings.
* The following rules are checked:
* 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
* 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
* Group must total 100.
* 3. A Traffic Class should not be set to both Link Strict Priority
* and Group Strict Priority.
* 4. Link strict Bandwidth Groups can only have link strict traffic classes
* with zero bandwidth.
*/
s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config)
{
struct ixgbe_dcb_tc_path *p;
s32 ret_val = IXGBE_SUCCESS;
u8 i, j, bw = 0, bw_id;
u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP];
bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP];
memset(bw_sum, 0, sizeof(bw_sum));
memset(link_strict, 0, sizeof(link_strict));
/* First Tx, then Rx */
for (i = 0; i < 2; i++) {
/* Check each traffic class for rule violation */
for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
p = &dcb_config->tc_config[j].path[i];
bw = p->bwg_percent;
bw_id = p->bwg_id;
if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) {
ret_val = IXGBE_ERR_CONFIG;
goto err_config;
}
if (p->tsa == ixgbe_dcb_tsa_strict) {
link_strict[i][bw_id] = true;
/* Link strict should have zero bandwidth */
if (bw) {
ret_val = IXGBE_ERR_CONFIG;
goto err_config;
}
} else if (!bw) {
/*
* Traffic classes without link strict
* should have non-zero bandwidth.
*/
ret_val = IXGBE_ERR_CONFIG;
goto err_config;
}
bw_sum[i][bw_id] += bw;
}
bw = 0;
/* Check each bandwidth group for rule violation */
for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) {
bw += dcb_config->bw_percentage[i][j];
/*
* Sum of bandwidth percentages of all traffic classes
* within a Bandwidth Group must total 100 except for
* link strict group (zero bandwidth).
*/
if (link_strict[i][j]) {
if (bw_sum[i][j]) {
/*
* Link strict group should have zero
* bandwidth.
*/
ret_val = IXGBE_ERR_CONFIG;
goto err_config;
}
} else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT &&
bw_sum[i][j] != 0) {
ret_val = IXGBE_ERR_CONFIG;
goto err_config;
}
}
if (bw != IXGBE_DCB_BW_PERCENT) {
ret_val = IXGBE_ERR_CONFIG;
goto err_config;
}
}
err_config:
DEBUGOUT2("DCB error code %d while checking %s settings.\n",
ret_val, (i == IXGBE_DCB_TX_CONFIG) ? "Tx" : "Rx");
return ret_val;
}
/**
* ixgbe_dcb_get_tc_stats - Returns status of each traffic class
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the status data for each of the Traffic Classes in use.
*/
s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
u8 tc_count)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
break;
default:
break;
}
return ret;
}
/**
* ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the CBFC status data for each of the Traffic Classes.
*/
s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
u8 tc_count)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
break;
default:
break;
}
return ret;
}
/**
* ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 };
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
ixgbe_dcb_unpack_max_cee(dcb_config, max);
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
tsa, map);
break;
default:
break;
}
return ret;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
ixgbe_dcb_unpack_max_cee(dcb_config, max);
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwgid, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
bwgid, tsa);
break;
default:
break;
}
return ret;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
ixgbe_dcb_unpack_max_cee(dcb_config, max);
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
bwgid, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
bwgid, tsa,
map);
break;
default:
break;
}
return ret;
}
/**
* ixgbe_dcb_config_pfc_cee - Config priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
u8 pfc_en;
u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
break;
default:
break;
}
return ret;
}
/**
* ixgbe_dcb_config_tc_stats - Config traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_tc_stats_82598(hw);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
break;
default:
break;
}
return ret;
}
/**
* ixgbe_dcb_hw_config_cee - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = IXGBE_NOT_IMPLEMENTED;
u8 pfc_en;
u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
/* Unpack CEE standard containers */
ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
ixgbe_dcb_unpack_max_cee(dcb_config, max);
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed,
refill, max, bwgid, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ixgbe_dcb_config_82599(hw, dcb_config);
ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
refill, max, bwgid,
tsa, map);
ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
break;
default:
break;
}
if (!ret && dcb_config->pfc_mode_enable) {
ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
}
return ret;
}
/* Helper routines to abstract HW specifics from DCB netlink ops */
s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
{
int ret = IXGBE_ERR_PARAM;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
break;
default:
break;
}
return ret;
}
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
u8 *bwg_id, u8 *tsa, u8 *map)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
tsa);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
tsa, map);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
tsa);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
tsa, map);
break;
default:
break;
}
return 0;
}

View File

@ -1,176 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_DCB_H_
#define _IXGBE_DCB_H_
#ident "$Id: ixgbe_dcb.h,v 1.39 2012/04/17 00:07:40 jtkirshe Exp $"
#include "ixgbe_type.h"
/* DCB defines */
/* DCB credit calculation defines */
#define IXGBE_DCB_CREDIT_QUANTUM 64
#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
/* 513 for 32KB TSO packet */
#define IXGBE_DCB_MIN_TSO_CREDIT \
((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
/* DCB configuration defines */
#define IXGBE_DCB_MAX_USER_PRIORITY 8
#define IXGBE_DCB_MAX_BW_GROUP 8
#define IXGBE_DCB_BW_PERCENT 100
#define IXGBE_DCB_TX_CONFIG 0
#define IXGBE_DCB_RX_CONFIG 1
/* DCB capability defines */
#define IXGBE_DCB_PG_SUPPORT 0x00000001
#define IXGBE_DCB_PFC_SUPPORT 0x00000002
#define IXGBE_DCB_BCN_SUPPORT 0x00000004
#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008
#define IXGBE_DCB_GSP_SUPPORT 0x00000010
struct ixgbe_dcb_support {
u32 capabilities; /* DCB capabilities */
/* Each bit represents a number of TCs configurable in the hw.
* If 8 traffic classes can be configured, the value is 0x80. */
u8 traffic_classes;
u8 pfc_traffic_classes;
};
enum ixgbe_dcb_tsa {
ixgbe_dcb_tsa_ets = 0,
ixgbe_dcb_tsa_group_strict_cee,
ixgbe_dcb_tsa_strict
};
/* Traffic class bandwidth allocation per direction */
struct ixgbe_dcb_tc_path {
u8 bwg_id; /* Bandwidth Group (BWG) ID */
u8 bwg_percent; /* % of BWG's bandwidth */
u8 link_percent; /* % of link bandwidth */
u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
u16 data_credits_refill; /* Credit refill amount in 64B granularity */
u16 data_credits_max; /* Max credits for a configured packet buffer
* in 64B granularity.*/
enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
};
enum ixgbe_dcb_pfc {
ixgbe_dcb_pfc_disabled = 0,
ixgbe_dcb_pfc_enabled,
ixgbe_dcb_pfc_enabled_txonly,
ixgbe_dcb_pfc_enabled_rxonly
};
/* Traffic class configuration */
struct ixgbe_dcb_tc_config {
struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
u16 desc_credits_max; /* For Tx Descriptor arbitration */
u8 tc; /* Traffic class (TC) */
};
enum ixgbe_dcb_pba {
/* PBA[0-7] each use 64KB FIFO */
ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
/* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
};
struct ixgbe_dcb_num_tcs {
u8 pg_tcs;
u8 pfc_tcs;
};
struct ixgbe_dcb_config {
struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
struct ixgbe_dcb_support support;
struct ixgbe_dcb_num_tcs num_tcs;
u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
bool round_robin_enable;
enum ixgbe_dcb_pba rx_pba_cfg;
u32 dcb_cfg_version; /* Not used...OS-specific? */
u32 link_speed; /* For bandwidth allocation validation purpose */
bool vt_mode;
};
/* DCB driver APIs */
/* DCB rule checking */
s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
/* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
struct ixgbe_dcb_config *, u32, u8);
/* DCB PFC */
s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB stats */
s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
/* DCB config arbiters */
s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
struct ixgbe_dcb_config *);
s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
struct ixgbe_dcb_config *);
s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
struct ixgbe_dcb_config *);
/* DCB unpack routines */
void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
#endif /* _IXGBE_DCB_H_ */

View File

@ -1,359 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82598.h"
#ident "$Id: ixgbe_dcb_82598.c,v 1.29 2012/03/30 06:45:33 jtkirshe Exp $"
/**
* ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the status data for each of the Traffic Classes in use.
*/
s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *stats,
u8 tc_count)
{
int tc;
DEBUGFUNC("dcb_get_tc_stats");
if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
return IXGBE_ERR_PARAM;
/* Statistics pertaining to each traffic class */
for (tc = 0; tc < tc_count; tc++) {
/* Transmitted Packets */
stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
/* Transmitted Bytes */
stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
/* Received Packets */
stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
/* Received Bytes */
stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
#if 0
/* Can we get rid of these?? Consequently, getting rid
* of the tc_stats structure.
*/
tc_stats_array[up]->in_overflow_discards = 0;
tc_stats_array[up]->out_overflow_discards = 0;
#endif
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the CBFC status data for each of the Traffic Classes.
*/
s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *stats,
u8 tc_count)
{
int tc;
DEBUGFUNC("dcb_get_pfc_stats");
if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
return IXGBE_ERR_PARAM;
for (tc = 0; tc < tc_count; tc++) {
/* Priority XOFF Transmitted */
stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
/* Priority XOFF Received */
stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
u16 *max, u8 *tsa)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
/* Enable Arbiter */
reg &= ~IXGBE_RMCS_ARBDIS;
/* Enable Receive Recycle within the BWG */
reg |= IXGBE_RMCS_RRM;
/* Enable Deficit Fixed Priority arbitration*/
reg |= IXGBE_RMCS_DFP;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
if (tsa[i] == ixgbe_dcb_tsa_strict)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
}
reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
reg |= IXGBE_RDRXCTL_RDMTS_1_2;
reg |= IXGBE_RDRXCTL_MPBEN;
reg |= IXGBE_RDRXCTL_MCEN;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* Make sure there is enough descriptors before arbitration */
reg &= ~IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *tsa)
{
u32 reg, max_credits;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
reg |= IXGBE_TDTQ2TCCR_GSP;
if (tsa[i] == ixgbe_dcb_tsa_strict)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *tsa)
{
u32 reg;
u8 i;
reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
/* Enable Data Plane Arbiter */
reg &= ~IXGBE_PDPMCS_ARBDIS;
/* Enable DFP and Transmit Recycle Mode */
reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
reg |= IXGBE_TDPT2TCCR_GSP;
if (tsa[i] == ixgbe_dcb_tsa_strict)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
}
/* Enable Tx packet buffer division */
reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
reg |= IXGBE_DTXCTL_ENDBUBD;
IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_pfc_82598 - Config priority flow control
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Priority Flow Control for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 fcrtl, reg;
u8 i;
/* Enable Transmit Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
reg &= ~IXGBE_RMCS_TFCE_802_3X;
reg |= IXGBE_RMCS_TFCE_PRIORITY;
IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
if (pfc_en)
reg |= IXGBE_FCTRL_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
}
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
}
/* Configure pause time */
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
u8 j = 0;
/* Receive Queues stats setting - 8 queues per statistics reg */
for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
reg |= ((0x1010101) * j);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
}
/* Transmit Queues stats setting - 4 queues per statistics reg*/
for (i = 0; i < 8; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
reg |= ((0x1010101) * i);
IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_hw_config_82598 - Config and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
u16 *refill, u16 *max, u8 *bwg_id,
u8 *tsa)
{
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
tsa);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
tsa);
ixgbe_dcb_config_tc_stats_82598(hw);
return IXGBE_SUCCESS;
}

View File

@ -1,100 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_DCB_82598_H_
#define _IXGBE_DCB_82598_H_
#ident "$Id: ixgbe_dcb_82598.h,v 1.12 2012/03/26 22:28:19 jtkirshe Exp $"
/* DCB register definitions */
#define IXGBE_DPMCS_MTSOS_SHIFT 16
#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin,
* 1 DFP - Deficit Fixed Priority */
#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
* buffers enable */
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
* (RSS) enable */
#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
#define IXGBE_TDTQ2TCCR_GSP 0x40000000
#define IXGBE_TDTQ2TCCR_LSP 0x80000000
#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
#define IXGBE_TDPT2TCCR_GSP 0x40000000
#define IXGBE_TDPT2TCCR_LSP 0x80000000
#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin,
* 1 DFP - Deficit Fixed Priority */
#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
/* DCB driver APIs */
/* DCB PFC */
s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8);
/* DCB stats */
s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *,
struct ixgbe_hw_stats *, u8);
s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *,
struct ixgbe_hw_stats *, u8);
/* DCB config arbiters */
s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
u8 *, u8 *);
s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
u8 *, u8 *);
s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *);
/* DCB initialization */
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *);
#endif /* _IXGBE_DCB_82958_H_ */

View File

@ -1,585 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_type.h"
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82599.h"
#ident "$Id: ixgbe_dcb_82599.c,v 1.67 2012/03/30 06:45:33 jtkirshe Exp $"
/**
* ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the status data for each of the Traffic Classes in use.
*/
s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *stats,
u8 tc_count)
{
int tc;
DEBUGFUNC("dcb_get_tc_stats");
if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
return IXGBE_ERR_PARAM;
/* Statistics pertaining to each traffic class */
for (tc = 0; tc < tc_count; tc++) {
/* Transmitted Packets */
stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
/* Transmitted Bytes (read low first to prevent missed carry) */
stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
stats->qbtc[tc] +=
(((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
/* Received Packets */
stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
/* Received Bytes (read low first to prevent missed carry) */
stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
stats->qbrc[tc] +=
(((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
/* Received Dropped Packet */
stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
* @hw: pointer to hardware structure
* @stats: pointer to statistics structure
* @tc_count: Number of elements in bwg_array.
*
* This function returns the CBFC status data for each of the Traffic Classes.
*/
s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *stats,
u8 tc_count)
{
int tc;
DEBUGFUNC("dcb_get_pfc_stats");
if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
return IXGBE_ERR_PARAM;
for (tc = 0; tc < tc_count; tc++) {
/* Priority XOFF Transmitted */
stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
/* Priority XOFF Received */
stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *tsa,
u8 *map)
{
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
u8 i = 0;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; WSP)
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
/*
* map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
* bits sets for the UPs that needs to be mappped to that TC.
* e.g if priorities 6 and 7 are to be mapped to a TC then the
* up_to_tc_bitmap value for that TC will be 11000000 in binary.
*/
reg = 0;
for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
credit_refill = refill[i];
credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
if (tsa[i] == ixgbe_dcb_tsa_strict)
reg |= IXGBE_RTRPT4C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
}
/*
* Configure Rx packet plane (recycle mode; WSP) and
* enable arbiter
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *tsa)
{
u32 reg, max_credits;
u8 i;
/* Clear the per-Tx queue credits; we use per-TC instead */
for (i = 0; i < 128; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
}
/* Configure traffic class credits and priority */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
max_credits = max[i];
reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
reg |= refill[i];
reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
reg |= IXGBE_RTTDT2C_GSP;
if (tsa[i] == ixgbe_dcb_tsa_strict)
reg |= IXGBE_RTTDT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
}
/*
* Configure Tx descriptor plane (recycle mode; WSP) and
* enable arbiter
*/
reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
u16 *max, u8 *bwg_id, u8 *tsa,
u8 *map)
{
u32 reg;
u8 i;
/*
* Disable the arbiter before changing parameters
* (always enable recycle mode; SP; arb delay)
*/
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
IXGBE_RTTPCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
/*
* map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
* bits sets for the UPs that needs to be mappped to that TC.
* e.g if priorities 6 and 7 are to be mapped to a TC then the
* up_to_tc_bitmap value for that TC will be 11000000 in binary.
*/
reg = 0;
for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
/* Configure traffic class credits and priority */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = refill[i];
reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
reg |= IXGBE_RTTPT2C_GSP;
if (tsa[i] == ixgbe_dcb_tsa_strict)
reg |= IXGBE_RTTPT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
}
/*
* Configure Tx packet plane (recycle mode; SP; arb delay) and
* enable arbiter
*/
reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
(IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
* @hw: pointer to hardware structure
* @pfc_en: enabled pfc bitmask
* @map: priority to tc assignments indexed by priority
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
{
u32 i, j, fcrtl, reg;
u8 max_tc = 0;
/* Enable Transmit Priority Flow Control */
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
/* Enable Receive Priority Flow Control */
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
reg |= IXGBE_MFLCN_DPF;
/*
* X540 supports per TC Rx priority flow control. So
* clear all TCs and only enable those that should be
* enabled.
*/
reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
if (hw->mac.type == ixgbe_mac_X540)
reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
if (pfc_en)
reg |= IXGBE_MFLCN_RPFCE;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
if (map[i] > max_tc)
max_tc = map[i];
}
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
int enabled = 0;
for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
if ((map[j] == i) && (pfc_en & (1 << j))) {
enabled = 1;
break;
}
}
if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
}
for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
}
/* Configure pause time (2 TCs per register) */
reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
/* Configure flow control refresh threshold value */
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
* @hw: pointer to hardware structure
*
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u32 reg = 0;
u8 i = 0;
u8 tc_count = 8;
bool vt_mode = false;
if (dcb_config != NULL) {
tc_count = dcb_config->num_tcs.pg_tcs;
vt_mode = dcb_config->vt_mode;
}
if (!((tc_count == 8 && vt_mode == false) || tc_count == 4))
return IXGBE_ERR_PARAM;
if (tc_count == 8 && vt_mode == false) {
/*
* Receive Queues stats setting
* 32 RQSMR registers, each configuring 4 queues.
*
* Set all 16 queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
*/
for (i = 0; i < 32; i++) {
reg = 0x01010101 * (i / 4);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
}
/*
* Transmit Queues stats setting
* 32 TQSM registers, each controlling 4 queues.
*
* Set all queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
* Tx queues are allocated non-uniformly to TCs:
* 32, 32, 16, 16, 8, 8, 8, 8.
*/
for (i = 0; i < 32; i++) {
if (i < 8)
reg = 0x00000000;
else if (i < 16)
reg = 0x01010101;
else if (i < 20)
reg = 0x02020202;
else if (i < 24)
reg = 0x03030303;
else if (i < 26)
reg = 0x04040404;
else if (i < 28)
reg = 0x05050505;
else if (i < 30)
reg = 0x06060606;
else
reg = 0x07070707;
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
}
} else if (tc_count == 4 && vt_mode == false) {
/*
* Receive Queues stats setting
* 32 RQSMR registers, each configuring 4 queues.
*
* Set all 16 queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
*/
for (i = 0; i < 32; i++) {
if (i % 8 > 3)
/* In 4 TC mode, odd 16-queue ranges are
* not used.
*/
continue;
reg = 0x01010101 * (i / 8);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
}
/*
* Transmit Queues stats setting
* 32 TQSM registers, each controlling 4 queues.
*
* Set all queues of each TC to the same stat
* with TC 'n' going to stat 'n'.
* Tx queues are allocated non-uniformly to TCs:
* 64, 32, 16, 16.
*/
for (i = 0; i < 32; i++) {
if (i < 16)
reg = 0x00000000;
else if (i < 24)
reg = 0x01010101;
else if (i < 28)
reg = 0x02020202;
else
reg = 0x03030303;
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
}
} else if (tc_count == 4 && vt_mode == true) {
/*
* Receive Queues stats setting
* 32 RQSMR registers, each configuring 4 queues.
*
* Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
* pool. Set all 32 queues of each TC across pools to the same
* stat with TC 'n' going to stat 'n'.
*/
for (i = 0; i < 32; i++)
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
/*
* Transmit Queues stats setting
* 32 TQSM registers, each controlling 4 queues.
*
* Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
* pool. Set all 32 queues of each TC across pools to the same
* stat with TC 'n' going to stat 'n'.
*/
for (i = 0; i < 32; i++)
IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_config_82599 - Configure general DCB parameters
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure general DCB parameters.
*/
s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u32 reg;
u32 q;
/* Disable the Tx desc arbiter so that MTQC can be changed */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
if (dcb_config->num_tcs.pg_tcs == 8) {
/* Enable DCB for Rx with 8 TCs */
switch (reg & IXGBE_MRQC_MRQE_MASK) {
case 0:
case IXGBE_MRQC_RT4TCEN:
/* RSS disabled cases */
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_RT8TCEN;
break;
case IXGBE_MRQC_RSSEN:
case IXGBE_MRQC_RTRSS4TCEN:
/* RSS enabled cases */
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_RTRSS8TCEN;
break;
default:
/*
* Unsupported value, assume stale data,
* overwrite no RSS
*/
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_RT8TCEN;
}
}
if (dcb_config->num_tcs.pg_tcs == 4) {
/* We support both VT-on and VT-off with 4 TCs. */
if (dcb_config->vt_mode)
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_VMDQRT4TCEN;
else
reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
IXGBE_MRQC_RTRSS4TCEN;
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
/* Enable DCB for Tx with 8 TCs */
if (dcb_config->num_tcs.pg_tcs == 8)
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
else {
/* We support both VT-on and VT-off with 4 TCs. */
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
if (dcb_config->vt_mode)
reg |= IXGBE_MTQC_VT_ENA;
}
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
/* Disable drop for all queues */
for (q = 0; q < 128; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
/* Enable Security TX Buffer IFG for DCB */
reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
reg |= IXGBE_SECTX_DCB;
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
return IXGBE_SUCCESS;
}
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
u8 *map)
{
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
map);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
tsa);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
tsa, map);
return IXGBE_SUCCESS;
}

View File

@ -1,153 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_DCB_82599_H_
#define _IXGBE_DCB_82599_H_
#ident "$Id: ixgbe_dcb_82599.h,v 1.33 2012/03/26 22:28:19 jtkirshe Exp $"
/* DCB register definitions */
#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
* 1 WSP - Weighted Strict Priority
*/
#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
* 1 WRR - Weighted Round Robin
*/
#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
* clear!
*/
#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
/* Receive UP2TC mapping */
#define IXGBE_RTRUP2TC_UP_SHIFT 3
/* Transmit UP2TC mapping */
#define IXGBE_RTTUP2TC_UP_SHIFT 3
#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */
#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */
#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
* buffers enable
*/
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
* (RSS) enable
*/
/* RTRPCS Bit Masks */
#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
#define IXGBE_RTRPCS_RAC 0x00000004
#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */
/* RTTDT2C Bit Masks */
#define IXGBE_RTTDT2C_MCL_SHIFT 12
#define IXGBE_RTTDT2C_BWG_SHIFT 9
#define IXGBE_RTTDT2C_GSP 0x40000000
#define IXGBE_RTTDT2C_LSP 0x80000000
#define IXGBE_RTTPT2C_MCL_SHIFT 12
#define IXGBE_RTTPT2C_BWG_SHIFT 9
#define IXGBE_RTTPT2C_GSP 0x40000000
#define IXGBE_RTTPT2C_LSP 0x80000000
/* RTTPCS Bit Masks */
#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
* 1 SP - Strict Priority
*/
#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_RTTPCS_ARBD_SHIFT 22
#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
/* SECTXMINIFG DCB */
#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */
/* BCN register definitions */
#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
#define IXGBE_RTTBCNRC_RS_ENA 0x80000000
#define IXGBE_RTTBCNCR_MNG_CMTGI 0x00000001
#define IXGBE_RTTBCNCR_MGN_BCNA_MODE 0x00000002
#define IXGBE_RTTBCNCR_RSV7_11_SHIFT 5
#define IXGBE_RTTBCNCR_G 0x00000400
#define IXGBE_RTTBCNCR_I 0x00000800
#define IXGBE_RTTBCNCR_H 0x00001000
#define IXGBE_RTTBCNCR_VER_SHIFT 14
#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT 16
#define IXGBE_RTTBCNACL_SMAC_L_SHIFT 16
#define IXGBE_RTTBCNTG_BCNA_MODE 0x80000000
#define IXGBE_RTTBCNRTT_TS_SHIFT 3
#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT 16
#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL 0x00000002
#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT 2
#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT 16
#define IXGBE_RTTBCNRD_DRIFT_ENA 0x80000000
/* DCB driver APIs */
/* DCB PFC */
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *);
/* DCB stats */
s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *,
struct ixgbe_dcb_config *);
s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *,
struct ixgbe_hw_stats *, u8);
s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *,
struct ixgbe_hw_stats *, u8);
/* DCB config arbiters */
s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
u8 *, u8 *);
s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
u8 *, u8 *, u8 *);
s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *,
u8 *, u8 *);
/* DCB initialization */
s32 ixgbe_dcb_config_82599(struct ixgbe_hw *,
struct ixgbe_dcb_config *);
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *,
u8 *, u8 *);
#endif /* _IXGBE_DCB_82959_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,280 +0,0 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
#include "ixgbe_dcb.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_dcb_82598.h"
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
/*
* Defines that were not part of ixgbe_type.h as they are not used by the
* FreeBSD driver.
*/
#define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */
#define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */
#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */
#define IXGBE_RXDADV_ERR_CKSUM_BIT 30
#define IXGBE_RXDADV_ERR_CKSUM_MSK 3
#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */
#define IXGBE_NB_STAT_MAPPING_REGS 32
#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
#define IXGBE_MAX_RX_QUEUE_NUM 128
#ifndef NBBY
#define NBBY 8 /* number of bits in a byte */
#endif
#define IXGBE_HWSTRIP_BITMAP_SIZE (IXGBE_MAX_RX_QUEUE_NUM / (sizeof(uint32_t) * NBBY))
/* Loopback operation modes */
/* 82599 specific loopback operation types */
#define IXGBE_LPBK_82599_NONE 0x0 /* Default value. Loopback is disabled. */
#define IXGBE_LPBK_82599_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */
/*
* Information about the fdir mode.
*/
struct ixgbe_hw_fdir_info {
uint16_t collision;
uint16_t free;
uint16_t maxhash;
uint8_t maxlen;
uint64_t add;
uint64_t remove;
uint64_t f_add;
uint64_t f_remove;
};
/* structure for interrupt relative data */
struct ixgbe_interrupt {
uint32_t flags;
uint32_t mask;
};
struct ixgbe_stat_mapping_registers {
uint32_t tqsm[IXGBE_NB_STAT_MAPPING_REGS];
uint32_t rqsmr[IXGBE_NB_STAT_MAPPING_REGS];
};
struct ixgbe_vfta {
uint32_t vfta[IXGBE_VFTA_SIZE];
};
struct ixgbe_hwstrip {
uint32_t bitmap[IXGBE_HWSTRIP_BITMAP_SIZE];
};
/*
* VF data which used by PF host only
*/
#define IXGBE_MAX_VF_MC_ENTRIES 30
#define IXGBE_MAX_MR_RULE_ENTRIES 4 /* number of mirroring rules supported */
#define IXGBE_MAX_UTA 128
struct ixgbe_uta_info {
uint8_t uc_filter_type;
uint16_t uta_in_use;
uint32_t uta_shadow[IXGBE_MAX_UTA];
};
struct ixgbe_mirror_info {
struct rte_eth_vmdq_mirror_conf mr_conf[ETH_VMDQ_NUM_MIRROR_RULE];
/**< store PF mirror rules configuration*/
};
struct ixgbe_vf_info {
uint8_t vf_mac_addresses[ETH_ADDR_LEN];
uint16_t vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
uint16_t num_vf_mc_hashes;
uint16_t default_vf_vlan_id;
uint16_t vlans_enabled;
bool clear_to_send;
uint16_t tx_rate;
uint16_t vlan_count;
uint8_t spoofchk_enabled;
};
/*
* Structure to store private data for each driver instance (for each port).
*/
struct ixgbe_adapter {
struct ixgbe_hw hw;
struct ixgbe_hw_stats stats;
struct ixgbe_hw_fdir_info fdir;
struct ixgbe_interrupt intr;
struct ixgbe_stat_mapping_registers stat_mappings;
struct ixgbe_vfta shadow_vfta;
struct ixgbe_hwstrip hwstrip;
struct ixgbe_dcb_config dcb_config;
struct ixgbe_mirror_info mr_data;
struct ixgbe_vf_info *vfdata;
struct ixgbe_uta_info uta_info;
};
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
(&((struct ixgbe_adapter *)adapter)->stats)
#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
(&((struct ixgbe_adapter *)adapter)->intr)
#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \
(&((struct ixgbe_adapter *)adapter)->fdir)
#define IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(adapter) \
(&((struct ixgbe_adapter *)adapter)->stat_mappings)
#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \
(&((struct ixgbe_adapter *)adapter)->shadow_vfta)
#define IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(adapter) \
(&((struct ixgbe_adapter *)adapter)->hwstrip)
#define IXGBE_DEV_PRIVATE_TO_DCB_CFG(adapter) \
(&((struct ixgbe_adapter *)adapter)->dcb_config)
#define IXGBE_DEV_PRIVATE_TO_P_VFDATA(adapter) \
(&((struct ixgbe_adapter *)adapter)->vfdata)
#define IXGBE_DEV_PRIVATE_TO_PFDATA(adapter) \
(&((struct ixgbe_adapter *)adapter)->mr_data)
#define IXGBE_DEV_PRIVATE_TO_UTA(adapter) \
(&((struct ixgbe_adapter *)adapter)->uta_info)
/*
* RX/TX function prototypes
*/
void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
void ixgbe_dev_rx_queue_release(struct eth_rx_queue *rxq);
void ixgbe_dev_tx_queue_release(struct eth_tx_queue *txq);
int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, int rx_queue_id,
int numa_node, uint16_t nb_rx_desc, struct mbuf_allocator *a);
int ixgbe_dev_rx_queue_init(struct rte_eth_dev *dev, int rx_queue_id);
int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, int tx_queue_id,
int numa_node, uint16_t nb_tx_desc);
int ixgbe_dev_tx_queue_init(struct rte_eth_dev *dev, int tx_queue_id);
int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
void ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
uint16_t ixgbe_recv_pkts(void *rx_queue, struct mbuf **rx_pkts,
uint16_t nb_pkts);
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct mbuf **rx_pkts,
uint16_t nb_pkts);
#endif
uint16_t ixgbe_recv_scattered_pkts(void *rx_queue,
struct mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t ixgbe_xmit_pkts(void *tx_queue, struct mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct mbuf **tx_pkts,
uint16_t nb_pkts);
/*
* Flow director function prototypes
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
int ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint8_t queue);
int ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint8_t queue);
int ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter);
void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
struct rte_eth_fdir *fdir);
int ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
uint8_t queue, uint8_t drop);
int ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter,uint16_t soft_id,
uint8_t queue, uint8_t drop);
int ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint16_t soft_id);
int ixgbe_fdir_set_masks(struct rte_eth_dev *dev,
struct rte_fdir_masks *fdir_masks);
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
/*
* misc function prototypes
*/
void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
#endif /* _IXGBE_ETHDEV_H_ */

View File

@ -1,867 +0,0 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_ethdev.h"
/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
#define FDIRCTRL_PBALLOC_MASK 0x03
/* For calculating memory required for FDIR filters */
#define PBALLOC_SIZE_SHIFT 15
/* Number of bits used to mask bucket hash for different pballoc sizes */
#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */
#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */
#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */
#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */
#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */
#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */
/**
* This function is based on ixgbe_fdir_enable_82599() in ixgbe/ixgbe_82599.c.
* It adds extra configuration of fdirctrl that is common for all filter types.
*
* Initialize Flow Director control registers
* @hw: pointer to hardware structure
* @fdirctrl: value to write to flow director control register
**/
static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
int i;
/* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
/*
* Continue setup of fdirctrl register bits:
* Set the maximum length per hash bucket to 0xA filters
* Send interrupt when 64 filters are left
*/
fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
(4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
/*
* Poll init-done after we write the register. Estimated times:
* 10G: PBALLOC = 11b, timing is 60us
* 1G: PBALLOC = 11b, timing is 600us
* 100M: PBALLOC = 11b, timing is 6ms
*
* Multiple these timings by 4 if under full Rx load
*
* So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
* 1 msec per poll time. If we're at line rate and drop to 100M, then
* this might not finish in our poll time, but we can live with that
* for now.
*/
IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL)
log_warn("ixgbe: Flow Director poll time exceeded!\n");
}
/*
* Set appropriate bits in fdirctrl for: variable reporting levels, moving
* flexbytes matching field, and drop queue (only for perfect matching mode).
*/
static int
configure_fdir_flags(struct rte_fdir_conf *conf, uint32_t *fdirctrl)
{
*fdirctrl = 0;
switch (conf->pballoc) {
case RTE_FDIR_PBALLOC_64K:
/* 8k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
break;
case RTE_FDIR_PBALLOC_128K:
/* 16k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
break;
case RTE_FDIR_PBALLOC_256K:
/* 32k - 1 signature filters */
*fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
break;
default:
/* bad value */
log_err("ixgbe: Invalid fdir_conf->pballoc value");
return -EINVAL;
};
/* status flags: write hash & swindex in the rx descriptor */
switch (conf->status) {
case RTE_FDIR_NO_REPORT_STATUS:
/* do nothing, default mode */
break;
case RTE_FDIR_REPORT_STATUS:
/* report status when the packet matches a fdir rule */
*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
break;
case RTE_FDIR_REPORT_STATUS_ALWAYS:
/* always report status */
*fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
break;
default:
/* bad value */
log_err("ixgbe: Invalid fdir_conf->status value");
return -EINVAL;
};
*fdirctrl |= (conf->flexbytes_offset << IXGBE_FDIRCTRL_FLEX_SHIFT);
if (conf->mode == RTE_FDIR_MODE_PERFECT) {
*fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
*fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
}
return 0;
}
int
ixgbe_fdir_configure(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int err;
uint32_t fdirctrl, pbsize;
int i;
if (hw->mac.type != ixgbe_mac_82599EB)
return -ENOSYS;
err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
if (err)
return err;
/*
* Before enabling Flow Director, the Rx Packet Buffer size
* must be reduced. The new value is the current size minus
* flow director memory usage size.
*/
pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
(IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
/*
* The defaults in the HW for RX PB 1-7 are not zero and so should be
* intialized to zero for non DCB mode otherwise actual total RX PB
* would be bigger than programmed and filter space would run into
* the PB 0 region.
*/
for (i = 1; i < 8; i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
fdir_enable_82599(hw, fdirctrl);
return 0;
}
/*
* The below function is taken from the FreeBSD IXGBE drivers release
* 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
* before returning, as the signature hash can use 16bits.
*
* The newer driver has optimised functions for calculating bucket and
* signature hashes. However they don't support IPv6 type packets for signature
* filters so are not used here.
*
* Note that the bkt_hash field in the ixgbe_atr_input structure is also never
* set.
*
* Compute the hashes for SW ATR
* @stream: input bitstream to compute the hash on
* @key: 32-bit hash key
**/
static u32
ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
u32 key)
{
/*
* The algorithm is as follows:
* Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
* where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
* and A[n] x B[n] is bitwise AND between same length strings
*
* K[n] is 16 bits, defined as:
* for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
* for n modulo 32 < 15, K[n] =
* K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
*
* S[n] is 16 bits, defined as:
* for n >= 15, S[n] = S[n:n - 15]
* for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
*
* To simplify for programming, the algorithm is implemented
* in software this way:
*
* key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
*
* for (i = 0; i < 352; i+=32)
* hi_hash_dword[31:0] ^= Stream[(i+31):i];
*
* lo_hash_dword[15:0] ^= Stream[15:0];
* lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
* lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
*
* hi_hash_dword[31:0] ^= Stream[351:320];
*
* if(key[0])
* hash[15:0] ^= Stream[15:0];
*
* for (i = 0; i < 16; i++) {
* if (key[i])
* hash[15:0] ^= lo_hash_dword[(i+15):i];
* if (key[i + 16])
* hash[15:0] ^= hi_hash_dword[(i+15):i];
* }
*
*/
__be32 common_hash_dword = 0;
u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
u32 hash_result = 0;
u8 i;
/* record the flow_vm_vlan bits as they are a key part to the hash */
flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
/* generate common hash dword */
for (i = 10; i; i -= 2)
common_hash_dword ^= atr_input->dword_stream[i] ^
atr_input->dword_stream[i - 1];
hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
/* low dword is word swapped version of common */
lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
/* apply flow ID/VM pool/VLAN ID bits to hash words */
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
if (key & 0x0001) hash_result ^= lo_hash_dword;
if (key & 0x00010000) hash_result ^= hi_hash_dword;
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
* delay this because bit 0 of the stream should not be processed
* so we do not add the vlan until after bit 0 was processed
*/
lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
/* process the remaining 30 bits in the key 2 bits at a time */
for (i = 15; i; i-- ) {
if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
}
return hash_result;
}
/*
* Calculate the hash value needed for signature-match filters. In the FreeBSD
* driver, this is done by the optimised function
* ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
* doesn't support calculating a hash for an IPv6 filter.
*/
static uint32_t
atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
enum rte_fdir_pballoc_type pballoc)
{
uint32_t bucket_hash, sig_hash;
if (pballoc == RTE_FDIR_PBALLOC_256K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_256KB_HASH_MASK;
else if (pballoc == RTE_FDIR_PBALLOC_128K)
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_128KB_HASH_MASK;
else
bucket_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
SIG_BUCKET_64KB_HASH_MASK;
sig_hash = ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_SIGNATURE_HASH_KEY);
return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
}
/**
* This function is based on ixgbe_atr_add_signature_filter_82599() in
* ixgbe/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
* setting extra fields in the FDIRCMD register, and removes the code that was
* verifying the flow_type field. According to the documentation, a flow type of
* 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
* work ok...
*
* Adds a signature hash filter
* @hw: pointer to hardware structure
* @input: unique input dword
* @queue: queue index to direct traffic to
* @fdircmd: any extra flags to set in fdircmd register
* @fdirhash: pre-calculated hash value for the filter
**/
static void
fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input, u8 queue, u32 fdircmd,
u32 fdirhash)
{
u64 fdirhashcmd;
/* configure FDIRCMD register */
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
/*
* The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
* is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
*/
fdirhashcmd = (u64)fdircmd << 32;
fdirhashcmd |= fdirhash;
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
log_debug("ixgbe: Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
}
/*
* Convert DPDK rte_fdir_filter struct to ixgbe_atr_input union that is used
* by the IXGBE driver code.
*/
static int
fdir_filter_to_atr_input(struct rte_fdir_filter *fdir_filter,
union ixgbe_atr_input *input)
{
if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP ||
fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) &&
(fdir_filter->port_src || fdir_filter->port_dst)) {
log_err("ixgbe: Invalid fdir_filter");
return -EINVAL;
}
memset(input, 0, sizeof(*input));
input->formatted.vlan_id = fdir_filter->vlan_id;
input->formatted.src_port = fdir_filter->port_src;
input->formatted.dst_port = fdir_filter->port_dst;
input->formatted.flex_bytes = fdir_filter->flex_bytes;
switch (fdir_filter->l4type) {
case RTE_FDIR_L4TYPE_TCP:
input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case RTE_FDIR_L4TYPE_UDP:
input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case RTE_FDIR_L4TYPE_SCTP:
input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
case RTE_FDIR_L4TYPE_NONE:
input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
default:
log_err("ixgbe: Error on l4type input");
return -EINVAL;
}
if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) {
input->formatted.flow_type |= IXGBE_ATR_L4TYPE_IPV6_MASK;
input->formatted.src_ip[0] = fdir_filter->ip_src.ipv6_addr[0];
input->formatted.src_ip[1] = fdir_filter->ip_src.ipv6_addr[1];
input->formatted.src_ip[2] = fdir_filter->ip_src.ipv6_addr[2];
input->formatted.src_ip[3] = fdir_filter->ip_src.ipv6_addr[3];
input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv6_addr[0];
input->formatted.dst_ip[1] = fdir_filter->ip_dst.ipv6_addr[1];
input->formatted.dst_ip[2] = fdir_filter->ip_dst.ipv6_addr[2];
input->formatted.dst_ip[3] = fdir_filter->ip_dst.ipv6_addr[3];
} else {
input->formatted.src_ip[0] = fdir_filter->ip_src.ipv4_addr;
input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv4_addr;
}
return 0;
}
/*
* Adds or updates a signature filter.
*
* dev: ethernet device to add filter to
* fdir_filter: filter details
* queue: queue index to direct traffic to
* update: 0 to add a new filter, otherwise update existing.
*/
static int
fdir_add_update_signature_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint8_t queue, int update)
{
struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
uint32_t fdirhash;
union ixgbe_atr_input input;
int err;
if (hw->mac.type != ixgbe_mac_82599EB)
return -ENOSYS;
err = fdir_filter_to_atr_input(fdir_filter, &input);
if (err)
return err;
fdirhash = atr_compute_sig_hash_82599(&input,
dev->data->dev_conf.fdir_conf.pballoc);
fdir_add_signature_filter_82599(hw, &input, queue, fdircmd_flags,
fdirhash);
return 0;
}
int
ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint8_t queue)
{
return fdir_add_update_signature_filter(dev, fdir_filter, queue, 0);
}
int
ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint8_t queue)
{
return fdir_add_update_signature_filter(dev, fdir_filter, queue, 1);
}
/*
* This is based on ixgbe_fdir_erase_perfect_filter_82599() in
* ixgbe/ixgbe_82599.c. It is modified to take in the hash as a parameter so
* that it can be used for removing signature and perfect filters.
*/
static s32
fdir_erase_filter_82599(struct ixgbe_hw *hw,
__notused union ixgbe_atr_input *input, uint32_t fdirhash)
{
u32 fdircmd = 0;
u32 retry_count;
s32 err = 0;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
/* flush hash to HW */
IXGBE_WRITE_FLUSH(hw);
/* Query if filter is present */
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
for (retry_count = 10; retry_count; retry_count--) {
/* allow 10us for query to process */
usec_delay(10);
/* verify query completed successfully */
fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
break;
}
if (!retry_count) {
log_err("ixgbe: Timeout querying for flow director filter");
err = -EIO;
}
/* if filter exists in hardware then remove it */
if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
}
return err;
}
int
ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
union ixgbe_atr_input input;
int err;
if (hw->mac.type != ixgbe_mac_82599EB)
return -ENOSYS;
err = fdir_filter_to_atr_input(fdir_filter, &input);
if (err)
return err;
return fdir_erase_filter_82599(hw, &input,
atr_compute_sig_hash_82599(&input,
dev->data->dev_conf.fdir_conf.pballoc));
}
/**
* Reverse the bits in FDIR registers that store 2 x 16 bit masks.
*
* @hi_dword: Bits 31:16 mask to be bit swapped.
* @lo_dword: Bits 15:0 mask to be bit swapped.
*
* Flow director uses several registers to store 2 x 16 bit masks with the
* bits reversed such as FDIRTCPM, FDIRUDPM and FDIRIP6M. The LS bit of the
* mask affects the MS bit/byte of the target. This function reverses the
* bits in these masks.
* **/
static uint32_t
reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
{
u32 mask = hi_dword << 16;
mask |= lo_dword;
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
}
/*
* This macro exists in ixgbe/ixgbe_82599.c, however in that file it reverses
* the bytes, and then reverses them again. So here it does nothing.
*/
#define IXGBE_WRITE_REG_BE32 IXGBE_WRITE_REG
/*
* This is based on ixgbe_fdir_set_input_mask_82599() in ixgbe/ixgbe_82599.c,
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
fdir_set_input_mask_82599(struct ixgbe_hw *hw,
struct rte_fdir_masks *input_mask)
{
/* mask VM pool since it is currently not supported */
u32 fdirm = IXGBE_FDIRM_POOL;
u32 fdirtcpm; /* TCP source and destination port masks. */
u32 fdiripv6m; /* IPv6 source and destination masks. */
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*/
if (input_mask->only_ip_flow) {
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
if (input_mask->dst_port_mask || input_mask->src_port_mask) {
log_err("ixgbe: Error on src/dst port mask\n");
return -EINVAL;
}
}
if (!input_mask->comp_ipv6_dst)
/* mask DIPV6 */
fdirm |= IXGBE_FDIRM_DIPv6;
if (!input_mask->vlan_id)
/* mask VLAN ID*/
fdirm |= IXGBE_FDIRM_VLANID;
if (!input_mask->vlan_prio)
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
if (!input_mask->flexbytes)
/* Mask Flex Bytes */
fdirm |= IXGBE_FDIRM_FLEX;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = reverse_fdir_bitmasks(input_mask->dst_port_mask,
input_mask->src_port_mask);
/* write both the same so that UDP and TCP use the same mask */
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
if (!input_mask->set_ipv6_mask) {
/* Store source and destination IPv4 masks (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
IXGBE_NTOHL(~input_mask->src_ipv4_mask));
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
IXGBE_NTOHL(~input_mask->dst_ipv4_mask));
}
else {
/* Store source and destination IPv6 masks (bit reversed) */
fdiripv6m = reverse_fdir_bitmasks(input_mask->dst_ipv6_mask,
input_mask->src_ipv6_mask);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
}
return IXGBE_SUCCESS;
}
int
ixgbe_fdir_set_masks(struct rte_eth_dev *dev, struct rte_fdir_masks *fdir_masks)
{
struct ixgbe_hw *hw;
int err;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (hw->mac.type != ixgbe_mac_82599EB)
return -ENOSYS;
err = ixgbe_reinit_fdir_tables_82599(hw);
if (err) {
log_err("ixgbe: reinit of fdir tables failed");
return -EIO;
}
return fdir_set_input_mask_82599(hw, fdir_masks);
}
static uint32_t
atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
enum rte_fdir_pballoc_type pballoc)
{
if (pballoc == RTE_FDIR_PBALLOC_256K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_256KB_HASH_MASK;
else if (pballoc == RTE_FDIR_PBALLOC_128K)
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_128KB_HASH_MASK;
else
return ixgbe_atr_compute_hash_82599(input,
IXGBE_ATR_BUCKET_HASH_KEY) &
PERFECT_BUCKET_64KB_HASH_MASK;
}
/*
* This is based on ixgbe_fdir_write_perfect_filter_82599() in
* ixgbe/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
* added, and IPv6 support also added. The hash value is also pre-calculated
* as the pballoc value is needed to do it.
*/
static void
fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input,
uint16_t soft_id, uint8_t queue, uint32_t fdircmd,
uint32_t fdirhash)
{
u32 fdirport, fdirvlan;
/* record the source address (big-endian) */
if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[3]);
}
else {
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
}
/* record the first 32 bits of the destination address (big-endian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record source and destination port (little-endian)*/
fdirport = IXGBE_NTOHS(input->formatted.dst_port);
fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
fdirport |= IXGBE_NTOHS(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
/* record vlan (little-endian) and flex_bytes(big-endian) */
fdirvlan = input->formatted.flex_bytes;
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
/* configure FDIRHASH register */
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
/*
* flush all previous writes to make certain registers are
* programmed prior to issuing the command
*/
IXGBE_WRITE_FLUSH(hw);
/* configure FDIRCMD register */
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
}
/*
* Adds or updates a perfect filter.
*
* dev: ethernet device to add filter to
* fdir_filter: filter details
* soft_id: software index for the filters
* queue: queue index to direct traffic to
* drop: non-zero if packets should be sent to the drop queue
* update: 0 to add a new filter, otherwise update existing.
*/
static int
fdir_add_update_perfect_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
uint8_t queue, int drop, int update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
uint32_t fdirhash;
union ixgbe_atr_input input;
int err;
if (hw->mac.type != ixgbe_mac_82599EB)
return -ENOSYS;
err = fdir_filter_to_atr_input(fdir_filter, &input);
if (err)
return err;
if (drop) {
queue = dev->data->dev_conf.fdir_conf.drop_queue;
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
}
fdirhash = atr_compute_perfect_hash_82599(&input,
dev->data->dev_conf.fdir_conf.pballoc);
fdir_write_perfect_filter_82599(hw, &input, soft_id, queue,
fdircmd_flags, fdirhash);
return 0;
}
int
ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
uint8_t queue, uint8_t drop)
{
return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
drop, 0);
}
int
ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
uint8_t queue, uint8_t drop)
{
return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
drop, 1);
}
int
ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
struct rte_fdir_filter *fdir_filter,
uint16_t soft_id)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
union ixgbe_atr_input input;
uint32_t fdirhash;
int err;
if (hw->mac.type != ixgbe_mac_82599EB)
return -ENOSYS;
err = fdir_filter_to_atr_input(fdir_filter, &input);
if (err)
return err;
/* configure FDIRHASH register */
fdirhash = atr_compute_perfect_hash_82599(&input,
dev->data->dev_conf.fdir_conf.pballoc);
fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
return fdir_erase_filter_82599(hw, &input, fdirhash);
}
void
ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir *fdir)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint32_t reg;
if (hw->mac.type != ixgbe_mac_82599EB)
return;
/* Get the information from registers */
reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
IXGBE_FDIRFREE_COLL_SHIFT);
info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
IXGBE_FDIRFREE_FREE_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
IXGBE_FDIRLEN_MAXHASH_SHIFT);
info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
IXGBE_FDIRLEN_MAXLEN_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
IXGBE_FDIRUSTAT_REMOVE_SHIFT;
info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
IXGBE_FDIRUSTAT_ADD_SHIFT;
reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
IXGBE_FDIRFSTAT_FADD_SHIFT;
/* Copy the new information in the fdir parameter */
fdir->collision = info->collision;
fdir->free = info->free;
fdir->maxhash = info->maxhash;
fdir->maxlen = info->maxlen;
fdir->remove = info->remove;
fdir->add = info->add;
fdir->f_remove = info->f_remove;
fdir->f_add = info->f_add;
}

View File

@ -1,74 +0,0 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _IXGBE_LOGS_H_
#define _IXGBE_LOGS_H_
#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
#define PMD_INIT_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
#else
#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
#define PMD_INIT_FUNC_TRACE() do { } while(0)
#endif
#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
#define PMD_TX_FREE_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
#define PMD_DRV_LOG(level, fmt, args...) \
RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
#else
#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
#endif
#endif /* _IXGBE_LOGS_H_ */

View File

@ -1,745 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_type.h"
#include "ixgbe_mbx.h"
/**
* ixgbe_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfuly read message from buffer
**/
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_read_mbx");
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
if (mbx->ops.read)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
return ret_val;
}
/**
* ixgbe_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_write_mbx");
if (size > mbx->size)
ret_val = IXGBE_ERR_MBX;
else if (mbx->ops.write)
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_msg");
if (mbx->ops.check_for_msg)
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_ack");
if (mbx->ops.check_for_ack)
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_rst - checks to see if other side has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_rst");
if (mbx->ops.check_for_rst)
ret_val = mbx->ops.check_for_rst(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
DEBUGFUNC("ixgbe_poll_for_msg");
if (!countdown || !mbx->ops.check_for_msg)
goto out;
while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
/**
* ixgbe_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
DEBUGFUNC("ixgbe_poll_for_ack");
if (!countdown || !mbx->ops.check_for_ack)
goto out;
while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
out:
return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
}
/**
* ixgbe_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_read_posted_mbx");
if (!mbx->ops.read)
goto out;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
out:
return ret_val;
}
/**
* ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_write_posted_mbx");
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
/* send msg */
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = ixgbe_poll_for_ack(hw, mbx_id);
out:
return ret_val;
}
/**
* ixgbe_init_mbx_ops_generic - Initialize MB function pointers
* @hw: pointer to the HW structure
*
* Setups up the mailbox read and write message function pointers
**/
void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
mbx->ops.read_posted = ixgbe_read_posted_mbx;
mbx->ops.write_posted = ixgbe_write_posted_mbx;
}
/**
* ixgbe_read_v2p_mailbox - read v2p mailbox
* @hw: pointer to the HW structure
*
* This function is used to read the v2p mailbox without losing the read to
* clear status bits.
**/
STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
{
u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
v2p_mailbox |= hw->mbx.v2p_mailbox;
hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
return v2p_mailbox;
}
/**
* ixgbe_check_for_bit_vf - Determine if a status bit was set
* @hw: pointer to the HW structure
* @mask: bitmask for bits to be tested and cleared
*
* This function is used to check for the read to clear bits within
* the V2P mailbox.
**/
STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
s32 ret_val = IXGBE_ERR_MBX;
if (v2p_mailbox & mask)
ret_val = IXGBE_SUCCESS;
hw->mbx.v2p_mailbox &= ~mask;
return ret_val;
}
/**
* ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the PF has set the Status bit or else ERR_MBX
**/
STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_msg_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.reqs++;
}
return ret_val;
}
/**
* ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
**/
STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_ack_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.acks++;
}
return ret_val;
}
/**
* ixgbe_check_for_rst_vf - checks to see if the PF has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns true if the PF has set the reset done bit or else false
**/
STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_rst_vf");
if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
IXGBE_VFMAILBOX_RSTI))) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.rsts++;
}
return ret_val;
}
/**
* ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
* @hw: pointer to the HW structure
*
* return SUCCESS if we obtained the mailbox lock
**/
STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
/* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
/* reserve mailbox for vf use */
if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
ret_val = IXGBE_SUCCESS;
return ret_val;
}
/**
* ixgbe_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_write_mbx_vf");
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
ixgbe_check_for_msg_vf(hw, 0);
ixgbe_check_for_ack_vf(hw, 0);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
/* update stats */
hw->mbx.stats.msgs_tx++;
/* Drop VFU and interrupt the PF to tell it a message has been sent */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
out_no_write:
return ret_val;
}
/**
* ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfuly read message from buffer
**/
STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
s32 ret_val = IXGBE_SUCCESS;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_vf");
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
goto out_no_read;
/* copy the message from the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
/* Acknowledge receipt and release mailbox, then we're done */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return ret_val;
}
/**
* ixgbe_init_mbx_params_vf - set initial values for vf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for vf mailbox
*/
void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications */
mbx->timeout = 0;
mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
mbx->ops.read = ixgbe_read_mbx_vf;
mbx->ops.write = ixgbe_write_mbx_vf;
mbx->ops.read_posted = ixgbe_read_posted_mbx;
mbx->ops.write_posted = ixgbe_write_posted_mbx;
mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}
STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
s32 ret_val = IXGBE_ERR_MBX;
if (mbvficr & mask) {
ret_val = IXGBE_SUCCESS;
IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
}
return ret_val;
}
/**
* ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
DEBUGFUNC("ixgbe_check_for_msg_pf");
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
index)) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.reqs++;
}
return ret_val;
}
/**
* ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
DEBUGFUNC("ixgbe_check_for_ack_pf");
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
index)) {
ret_val = IXGBE_SUCCESS;
hw->mbx.stats.acks++;
}
return ret_val;
}
/**
* ixgbe_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
u32 vflre = 0;
s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_check_for_rst_pf");
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
if (vflre & (1 << vf_shift)) {
ret_val = IXGBE_SUCCESS;
IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
hw->mbx.stats.rsts++;
}
return ret_val;
}
/**
* ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
u32 p2v_mailbox;
DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
/* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
/* reserve mailbox for vf use */
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
ret_val = IXGBE_SUCCESS;
return ret_val;
}
/**
* ixgbe_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_write_mbx_pf");
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
ixgbe_check_for_msg_pf(hw, vf_number);
ixgbe_check_for_ack_pf(hw, vf_number);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
/* Interrupt VF to tell it a message has been sent and release buffer*/
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
/* update stats */
hw->mbx.stats.msgs_tx++;
out_no_write:
return ret_val;
}
/**
* ixgbe_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_pf");
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_read;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
/* Acknowledge the message and release buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return ret_val;
}
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = 0;
mbx->usec_delay = 0;
mbx->size = IXGBE_VFMAILBOX_SIZE;
mbx->ops.read = ixgbe_read_mbx_pf;
mbx->ops.write = ixgbe_write_mbx_pf;
mbx->ops.read_posted = ixgbe_read_posted_mbx;
mbx->ops.write_posted = ixgbe_write_posted_mbx;
mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
}

View File

@ -1,150 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
#include "ixgbe_type.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
/* Define mailbox register bits */
#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
* Message ACK's are the value or'd with 0xF0000000
*/
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
* this are the ACK */
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
* this are the NACK */
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
* clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
/*
* each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* mailbox API, version 1.1 VF requests */
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
#define IXGBE_VF_MC_TYPE_WORD 3
#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
/* mailbox API, version 2.0 VF requests */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */
#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */
#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */
#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */
#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */
#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */
/* mailbox API, version 2.0 PF requests */
#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* _IXGBE_MBX_H_ */

View File

@ -1,127 +0,0 @@
/******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
******************************************************************************/
/*$FreeBSD$*/
#ifndef _IXGBE_OS_H_
#define _IXGBE_OS_H_
#include <stdarg.h>
#include <stdint.h>
#include <string.h>
#include <base/stddef.h>
#include <base/byteorder.h>
#include <base/time.h>
#include <base/atomic.h>
#include <base/log.h>
#include <base/lock.h>
#include <base/assert.h>
#include <net/ethdev.h>
#include <net/ethqueue.h>
#include "ixgbe_logs.h"
#define ASSERT(x) assert(x)
#define DELAY(x) delay_us(x)
#define usec_delay(x) DELAY(x)
#define msec_delay(x) DELAY(1000*(x))
#define DEBUGFUNC(F) DEBUGOUT(F);
#define DEBUGOUT(S, args...) PMD_DRV_LOG(DEBUG, S, ##args)
#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args)
#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args)
#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args)
#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args)
#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args)
#define FALSE 0
#define TRUE 1
#define EWARN(hw, S, args...) DEBUGOUT1(S, ##args)
/* Bunch of defines for shared code bogosity */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
#define STATIC static
#define IXGBE_NTOHL(_i) ntoh32(_i)
#define IXGBE_NTOHS(_i) ntoh16(_i)
#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i)
#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpu(_i)
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
static inline uint32_t ixgbe_read_addr(volatile void *addr)
{
return IXGBE_PCI_REG(addr);
}
#define IXGBE_PCI_REG_WRITE(reg, value) do { \
IXGBE_PCI_REG((reg)) = (value); \
} while(0)
#define IXGBE_PCI_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \
IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
/* Not implemented !! */
#define IXGBE_READ_PCIE_WORD(hw, reg) 0
#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0)
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
#define IXGBE_READ_REG(hw, reg) \
ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg)))
#define IXGBE_WRITE_REG(hw, reg, value) \
IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value))
#define IXGBE_READ_REG_ARRAY(hw, reg, index) \
IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
#endif /* _IXGBE_OS_H_ */

View File

@ -1,533 +0,0 @@
/*-
* BSD LICENSE
*
* Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "ixgbe_common.h"
#include "ixgbe_ethdev.h"
#define IXGBE_MAX_VFTA (128)
static inline
int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
{
unsigned char vf_mac_addr[ETH_ADDR_LEN];
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
uint16_t vfn;
for (vfn = 0; vfn < vf_num; vfn++) {
//FIXME: eth_random_addr(vf_mac_addr);
/* keep the random address as default */
memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
ETH_ADDR_LEN);
}
return 0;
}
static inline int
ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
intr->mask |= IXGBE_EICR_MAILBOX;
return 0;
}
void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
struct ixgbe_mirror_info *mirror_info =
IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
struct ixgbe_uta_info *uta_info =
IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
uint16_t vf_num = hw->num_vfs;
uint8_t nb_queue;
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
if (!vf_num)
return;
*vfinfo = malloc(sizeof(struct ixgbe_vf_info) * vf_num);
if (*vfinfo == NULL)
panic("Cannot allocate memory for private VF data\n");
memset(vfinfo, 0, sizeof(struct ixgbe_vf_info) * vf_num);
memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
memset(uta_info,0,sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
if (vf_num >= ETH_32_POOLS) {
nb_queue = 2;
RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
} else if (vf_num >= ETH_16_POOLS) {
nb_queue = 4;
RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
} else {
nb_queue = 8;
RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
}
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
/* init_mailbox_params */
hw->mbx.ops.init_params(hw);
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
return;
}
int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
{
uint32_t vtctl, fcrth;
uint32_t vfre_slot, vfre_offset;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
uint16_t vf_num = hw->num_vfs;
uint32_t gpie, gcr_ext;
uint32_t vlanctrl;
int i;
if (!vf_num)
return -1;
/* enable VMDq and set the default pool for PF */
vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
<< IXGBE_VT_CTL_POOL_SHIFT;
vtctl |= IXGBE_VT_CTL_REPLEN;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
vfre_offset = vf_num & VFRE_MASK;
vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
/* Enable pools reserved to PF only */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0) << vfre_offset);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0) << vfre_offset);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
/* PFDMA Tx General Switch Control Enables VMDQ loopback */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* clear VMDq map to perment rar 0 */
hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
/* clear VMDq map to scan rar 127 */
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
/* set VMDq map to default PF pool */
hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
/*
* SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
*/
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
gpie |= IXGBE_GPIE_MSIX_MODE;
switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
case ETH_64_POOLS:
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
gpie |= IXGBE_GPIE_VTMODE_64;
break;
case ETH_32_POOLS:
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
gpie |= IXGBE_GPIE_VTMODE_32;
break;
case ETH_16_POOLS:
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
gpie |= IXGBE_GPIE_VTMODE_16;
break;
}
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
/*
* enable vlan filtering and allow all vlan tags through
*/
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
/* VFTA - enable all vlan filters */
for (i = 0; i < IXGBE_MAX_VFTA; i++) {
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
}
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
/* set flow control threshold to max to avoid tx switch hang */
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
}
return 0;
}
static void
set_rx_mode(struct rte_eth_dev *dev)
{
struct rte_eth_dev_data *dev_data =
(struct rte_eth_dev_data*)dev->data->dev_private;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
uint16_t vfn = hw->num_vfs;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
fctrl |= IXGBE_FCTRL_BAM;
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
if (dev_data->promiscuous) {
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
} else {
if (dev_data->all_multicast) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
} else {
vmolr |= IXGBE_VMOLR_ROMPE;
}
}
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
}
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (dev->data->dev_conf.rxmode.hw_vlan_strip)
ixgbe_vlan_hw_strip_enable_all(dev);
else
ixgbe_vlan_hw_strip_disable_all(dev);
}
static inline void
ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
/* reset multicast table array for vf */
vfinfo[vf].num_vf_mc_hashes = 0;
/* reset rx mode */
set_rx_mode(dev);
hw->mac.ops.clear_rar(hw, rar_entry);
}
static inline void
ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
uint32_t reg_offset, vf_shift;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
vf_shift = vf & VFRE_MASK;
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
/* enable transmit and receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
ixgbe_vf_reset_event(dev, vf);
}
static int
ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
ixgbe_vf_reset_msg(dev, vf);
hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
memcpy(new_mac, vf_mac, ETH_ADDR_LEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
*/
msgbuf[3] = hw->mac.mc_filter_type;
ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
return 0;
}
static int
ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
struct eth_addr *addr = (struct eth_addr *) new_mac;
if (!eth_addr_is_zero(addr) && !eth_addr_is_multicast(addr)) {
memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
}
return -1;
}
static int
ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __notused uint32_t vf, uint32_t *msgbuf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
IXGBE_VT_MSGINFO_SHIFT;
uint16_t *hash_list = (uint16_t *)&msgbuf[1];
uint32_t mta_idx;
uint32_t mta_shift;
const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
uint32_t reg_val;
int i;
/* only so many hash values supported */
nb_entries = min(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
/* store the mc entries */
vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
for (i = 0; i < nb_entries; i++) {
vfinfo->vf_mc_hashes[i] = hash_list[i];
}
for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
& IXGBE_MTA_INDEX_MASK;
mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
reg_val |= (1 << mta_shift);
IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
}
return 0;
}
static int
ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
{
int add, vid;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
if (add)
vfinfo[vf].vlan_count++;
else if (vfinfo[vf].vlan_count)
vfinfo[vf].vlan_count--;
return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
}
static int
ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __notused uint32_t vf, uint32_t *msgbuf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t new_mtu = msgbuf[1];
uint32_t max_frs;
int max_frame = new_mtu + ETH_HDR_LEN + ETH_CRC_LEN;
/* Only X540 supports jumbo frames in IOV mode */
if (hw->mac.type != ixgbe_mac_X540)
return -1;
if ((max_frame < ETH_MIN_LEN) || (max_frame > ETH_MAX_LEN_JUMBO))
return -1;
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < new_mtu) {
max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
return 0;
}
static int
ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
{
uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
int32_t retval;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
if (retval) {
log_err("ixgbe: Error mbx recv msg from VF %d\n", vf);
return retval;
}
/* do nothing with the message already been processed */
if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
return retval;
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
return ixgbe_vf_reset(dev, vf, msgbuf);
}
/* check & process VF to PF mailbox message */
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
break;
case IXGBE_VF_SET_MULTICAST:
retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
break;
case IXGBE_VF_SET_LPE:
retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
break;
case IXGBE_VF_SET_VLAN:
retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
break;
default:
log_debug("ixgbe: Unhandled Msg %8.8x\n", (unsigned) msgbuf[0]);
retval = IXGBE_ERR_MBX;
break;
}
/* response the VF according to the message process result */
if (retval)
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
else
msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
return retval;
}
static inline void
ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
{
uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
ixgbe_write_mbx(hw, &msg, 1, vf);
}
void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
{
uint16_t vf;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
for (vf = 0; vf < hw->num_vfs; vf++) {
/* check & process vf function level reset */
if (!ixgbe_check_for_rst(hw, vf))
ixgbe_vf_reset_event(eth_dev, vf);
/* check & process vf mailbox messages */
if (!ixgbe_check_for_msg(hw, vf))
ixgbe_rcv_msg_from_vf(eth_dev, vf);
/* check & process acks from vf */
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(eth_dev, vf);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,144 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_PHY_H_
#define _IXGBE_PHY_H_
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
#define IXGBE_SFF_IDENTIFIER_SFP 0x3
#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
#define IXGBE_TAF_ASM_PAUSE 0x800
/* Bit-shift macros */
#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24
#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16
#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8
/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100
/* I2C SDA and SCL timing parameters for standard mode */
#define IXGBE_I2C_T_HD_STA 4
#define IXGBE_I2C_T_LOW 5
#define IXGBE_I2C_T_HIGH 4
#define IXGBE_I2C_T_SU_STA 5
#define IXGBE_I2C_T_HD_DATA 5
#define IXGBE_I2C_T_SU_DATA 1
#define IXGBE_I2C_T_RISE 1
#define IXGBE_I2C_T_FALL 1
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
#ident "$Id: ixgbe_phy.h,v 1.48 2012/01/04 01:49:02 jtkirshe Exp $"
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
#endif /* _IXGBE_PHY_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,628 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_api.h"
#include "ixgbe_type.h"
#include "ixgbe_vf.h"
#ident "$Id: ixgbe_vf.c,v 1.58 2012/08/09 20:24:53 cmwyborn Exp $"
#ifndef IXGBE_VFWRITE_REG
#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
#endif
#ifndef IXGBE_VFREAD_REG
#define IXGBE_VFREAD_REG IXGBE_READ_REG
#endif
/**
* ixgbe_init_ops_vf - Initialize the pointers for vf
* @hw: pointer to hardware structure
*
* This will assign function pointers, adapter-specific functions can
* override the assignment of generic function pointers by assigning
* their own adapter-specific function pointers.
* Does not touch the hardware.
**/
s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
{
/* MAC */
hw->mac.ops.init_hw = ixgbe_init_hw_vf;
hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
hw->mac.ops.start_hw = ixgbe_start_hw_vf;
/* Cannot clear stats on VF */
hw->mac.ops.clear_hw_cntrs = NULL;
hw->mac.ops.get_media_type = NULL;
hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
hw->mac.ops.get_bus_info = NULL;
/* Link */
hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
hw->mac.ops.get_link_capabilities = NULL;
/* RAR, Multicast, VLAN */
hw->mac.ops.set_rar = ixgbe_set_rar_vf;
hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
hw->mac.ops.init_rx_addrs = NULL;
hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
hw->mac.ops.enable_mc = NULL;
hw->mac.ops.disable_mc = NULL;
hw->mac.ops.clear_vfta = NULL;
hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
hw->mac.max_tx_queues = 1;
hw->mac.max_rx_queues = 1;
hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
return IXGBE_SUCCESS;
}
/**
* ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware by filling the bus info structure and media type, clears
* all on chip counters, initializes receive address registers, multicast
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
{
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
return IXGBE_SUCCESS;
}
/**
* ixgbe_init_hw_vf - virtual function hardware initialization
* @hw: pointer to hardware structure
*
* Initialize the hardware by resetting the hardware and then starting
* the hardware
**/
s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
{
s32 status = hw->mac.ops.start_hw(hw);
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
return status;
}
/**
* ixgbe_reset_hw_vf - Performs hardware reset
* @hw: pointer to hardware structure
*
* Resets the hardware by reseting the transmit and receive units, masks and
* clears all interrupts.
**/
s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
u32 ctrl, msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
DEBUGFUNC("ixgbevf_reset_hw_vf");
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
DEBUGOUT("Issuing a function level reset to MAC\n");
ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
msec_delay(50);
/* we cannot reset while the RSTI / RSTD bits are asserted */
while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
timeout--;
usec_delay(5);
}
if (timeout) {
/* mailbox timeout can now become active */
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
mbx->ops.write_posted(hw, msgbuf, 1, 0);
msec_delay(10);
/*
* set our "perm_addr" based on info provided by PF
* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3
*/
ret_val = mbx->ops.read_posted(hw, msgbuf,
IXGBE_VF_PERMADDR_MSG_LEN, 0);
if (!ret_val) {
if (msgbuf[0] == (IXGBE_VF_RESET |
IXGBE_VT_MSGTYPE_ACK)) {
memcpy(hw->mac.perm_addr, addr,
IXGBE_ETH_LENGTH_OF_ADDRESS);
hw->mac.mc_filter_type =
msgbuf[IXGBE_VF_MC_TYPE_WORD];
} else {
ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
}
}
}
return ret_val;
}
/**
* ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
* Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
* disables transmit and receive units. The adapter_stopped flag is used by
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
{
u32 reg_val;
u16 i;
/*
* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
hw->adapter_stopped = true;
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
/* Clear any pending interrupts, flush previous writes */
IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
/* Disable the transmit unit. Each queue must be disabled. */
for (i = 0; i < hw->mac.max_tx_queues; i++)
IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
/* Disable the receive unit by stopping each queue */
for (i = 0; i < hw->mac.max_rx_queues; i++) {
reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
reg_val &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
}
/* flush all queues disables */
IXGBE_WRITE_FLUSH(hw);
msec_delay(2);
return IXGBE_SUCCESS;
}
/**
* ixgbe_mta_vector - Determines bit-vector in multicast table to set
* @hw: pointer to hardware structure
* @mc_addr: the multicast address
*
* Extracts the 12 bits, from a multicast address, to determine which
* bit-vector to set in the multicast table. The hardware uses 12 bits, from
* incoming rx multicast addresses, to determine the bit-vector to check in
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
break;
case 1: /* use bits [46:35] of the address */
vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
break;
case 2: /* use bits [45:34] of the address */
vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
break;
case 3: /* use bits [43:32] of the address */
vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
break;
default: /* Invalid mc_filter_type */
DEBUGOUT("MC filter type param set incorrectly\n");
break;
}
/* vector can only be 12-bits or boundary will be exceeded */
vector &= 0xFFF;
return vector;
}
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 retmsg[IXGBE_VFMAILBOX_SIZE];
s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
if (!retval)
mbx->ops.read_posted(hw, retmsg, size, 0);
}
/**
* ixgbe_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @vmdq: VMDq "set" or "pool" index
* @enable_addr: set flag that address is active
**/
s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, 12);
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
(msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
return ret_val;
}
/**
* ixgbe_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
* @next: caller supplied function to return next address in list
*
* Updates the Multicast Table Array.
**/
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr next,
bool clear)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 vector;
u32 cnt, i;
u32 vmdq;
DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
/* Each entry in the list uses 1 16 bit word. We have 30
* 16 bit words available in our HW msg buffer (minus 1 for the
* msg type). That's 30 hash values if we pack 'em right. If
* there are more than 30 MC addresses to add then punt the
* extras for now and then add code to handle more than 30 later.
* It would be unusual for a server to request that many multi-cast
* addresses except for in large enterprise network environments.
*/
DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
msgbuf[0] = IXGBE_VF_SET_MULTICAST;
msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
for (i = 0; i < cnt; i++) {
vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
DEBUGOUT1("Hash value = 0x%03X\n", vector);
vector_list[i] = (u16)vector;
}
return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
}
/**
* ixgbe_set_vfta_vf - Set/Unset vlan filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
**/
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 ret_val;
msgbuf[0] = IXGBE_VF_SET_VLAN;
msgbuf[1] = vlan;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0);
if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
return IXGBE_SUCCESS;
return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
}
/**
* ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
* @hw: pointer to hardware structure
*
* Returns the number of transmit queues for the given adapter.
**/
u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
{
return IXGBE_VF_MAX_TX_QUEUES;
}
/**
* ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
* @hw: pointer to hardware structure
*
* Returns the number of receive queues for the given adapter.
**/
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
{
return IXGBE_VF_MAX_RX_QUEUES;
}
/**
* ixgbe_get_mac_addr_vf - Read device MAC address
* @hw: pointer to the HW structure
**/
s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
int i;
for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
mac_addr[i] = hw->mac.perm_addr[i];
return IXGBE_SUCCESS;
}
s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf));
/*
* If index is one then this is the start of a new list and needs
* indication to the PF so it can do it's own list management.
* If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list.
*/
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr)
memcpy(msg_addr, addr, 6);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
if (!ret_val)
if (msgbuf[0] == (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
ret_val = IXGBE_ERR_OUT_OF_MEM;
return ret_val;
}
/**
* ixgbe_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
return IXGBE_SUCCESS;
}
/**
* ixgbe_check_mac_link_vf - Get link/speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true is link is up, false otherwise
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete)
{
u32 links_reg;
if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
*link_up = false;
*speed = 0;
return -1;
}
links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
if (links_reg & IXGBE_LINKS_UP)
*link_up = true;
else
*link_up = false;
switch (links_reg & IXGBE_LINKS_SPEED_10G_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
break;
}
return IXGBE_SUCCESS;
}
/**
* ixgbevf_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
}
/**
* ixgbevf_negotiate_api_version - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
**/
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
{
int err;
u32 msg[3];
/* Negotiate the mailbox API version */
msg[0] = IXGBE_VF_API_NEGOTIATE;
msg[1] = api;
msg[2] = 0;
err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
if (!err)
err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
hw->api_version = api;
return 0;
}
err = IXGBE_ERR_INVALID_ARGUMENT;
}
return err;
}
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
int err;
u32 msg[5];
/* do nothing if API doesn't support ixgbevf_get_queues */
switch (hw->api_version) {
case ixgbe_mbox_api_11:
break;
default:
return 0;
}
/* Fetch queue configuration from the PF */
msg[0] = IXGBE_VF_GET_QUEUES;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
if (!err)
err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/*
* if we we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_MBX;
/* record and validate values from message */
hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
if (hw->mac.max_tx_queues == 0 ||
hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
if (hw->mac.max_rx_queues == 0 ||
hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
/* in case of unknown state assume we cannot tag frames */
if (*num_tcs > hw->mac.max_rx_queues)
*num_tcs = 1;
*default_tc = msg[IXGBE_VF_DEF_QUEUE];
/* default to queue 0 on out-of-bounds queue number */
if (*default_tc >= hw->mac.max_tx_queues)
*default_tc = 0;
}
return err;
}

View File

@ -1,138 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef __IXGBE_VF_H__
#define __IXGBE_VF_H__
#ident "$Id: ixgbe_vf.h,v 1.34 2012/08/09 17:04:18 cmwyborn Exp $"
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
/* DCB define */
#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
#define IXGBE_VFCTRL 0x00000
#define IXGBE_VFSTATUS 0x00008
#define IXGBE_VFLINKS 0x00010
#define IXGBE_VFFRTIMER 0x00048
#define IXGBE_VFRXMEMWRAP 0x03190
#define IXGBE_VTEICR 0x00100
#define IXGBE_VTEICS 0x00104
#define IXGBE_VTEIMS 0x00108
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
#define IXGBE_VTIVAR_MISC 0x00140
#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
/* define IXGBE_VFPBACL still says TBD in EAS */
#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
#define IXGBE_VFPSRTYPE 0x00300
#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
#define IXGBE_VFGORC_MSB 0x01024
#define IXGBE_VFGOTC_LSB 0x02020
#define IXGBE_VFGOTC_MSB 0x02024
#define IXGBE_VFMPRC 0x01034
struct ixgbevf_hw_stats {
u64 base_vfgprc;
u64 base_vfgptc;
u64 base_vfgorc;
u64 base_vfgotc;
u64 base_vfmprc;
u64 last_vfgprc;
u64 last_vfgptc;
u64 last_vfgorc;
u64 last_vfgotc;
u64 last_vfmprc;
u64 vfgprc;
u64 vfgptc;
u64 vfgorc;
u64 vfgotc;
u64 vfmprc;
u64 saved_reset_vfgprc;
u64 saved_reset_vfgptc;
u64 saved_reset_vfgorc;
u64 saved_reset_vfgotc;
u64 saved_reset_vfmprc;
};
s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool autoneg_wait_to_complete);
s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool autoneg_wait_to_complete);
s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr,
bool clear);
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
#endif /* __IXGBE_VF_H__ */

View File

@ -1,975 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ixgbe_x540.h"
#include "ixgbe_type.h"
#include "ixgbe_api.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
STATIC s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
/**
* ixgbe_init_ops_X540 - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
* Initialize the function pointers and assign the MAC type for X540.
* Does not touch the hardware.
**/
s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
s32 ret_val;
DEBUGFUNC("ixgbe_init_ops_X540");
ret_val = ixgbe_init_phy_ops_generic(hw);
ret_val = ixgbe_init_ops_generic(hw);
/* EEPROM */
eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
eeprom->ops.read = &ixgbe_read_eerd_X540;
eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
eeprom->ops.write = &ixgbe_write_eewr_X540;
eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
/* PHY */
phy->ops.init = &ixgbe_init_phy_ops_generic;
phy->ops.reset = NULL;
/* MAC */
mac->ops.reset_hw = &ixgbe_reset_hw_X540;
mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
mac->ops.get_media_type = &ixgbe_get_media_type_X540;
mac->ops.get_supported_physical_layer =
&ixgbe_get_supported_physical_layer_X540;
mac->ops.read_analog_reg8 = NULL;
mac->ops.write_analog_reg8 = NULL;
mac->ops.start_hw = &ixgbe_start_hw_X540;
mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
mac->rar_highwater = 1;
mac->ops.set_vfta = &ixgbe_set_vfta_generic;
mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
/* Link */
mac->ops.get_link_capabilities =
&ixgbe_get_copper_link_capabilities_generic;
mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
mac->ops.check_link = &ixgbe_check_mac_link_generic;
mac->mcft_size = 128;
mac->vft_size = 128;
mac->num_rar_entries = 128;
mac->rx_pb_size = 384;
mac->max_tx_queues = 128;
mac->max_rx_queues = 128;
mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
/*
* FWSM register
* ARC supported; valid only if manageability features are
* enabled.
*/
mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
IXGBE_FWSM_MODE_MASK) ? true : false;
hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
/* LEDs */
mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
/* Manageability interface */
mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
return ret_val;
}
/**
* ixgbe_get_link_capabilities_X540 - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
*
* Determines the link capabilities by reading the AUTOC register.
**/
s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg)
{
ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg);
return IXGBE_SUCCESS;
}
/**
* ixgbe_get_media_type_X540 - Get media type
* @hw: pointer to hardware structure
*
* Returns the media type (fiber, copper, backplane)
**/
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
{
return ixgbe_media_type_copper;
}
/**
* ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
* @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
ixgbe_link_speed speed, bool autoneg,
bool autoneg_wait_to_complete)
{
DEBUGFUNC("ixgbe_setup_mac_link_X540");
return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
autoneg_wait_to_complete);
}
/**
* ixgbe_reset_hw_X540 - Perform hardware reset
* @hw: pointer to hardware structure
*
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, and perform a reset.
**/
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
DEBUGFUNC("ixgbe_reset_hw_X540");
/* Call adapter stop to disable tx/rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
if (status != IXGBE_SUCCESS)
goto reset_hw_out;
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
mac_reset_top:
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
usec_delay(1);
ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
if (!(ctrl & IXGBE_CTRL_RST_MASK))
break;
}
if (ctrl & IXGBE_CTRL_RST_MASK) {
status = IXGBE_ERR_RESET_FAILED;
DEBUGOUT("Reset polling failed to complete.\n");
}
msec_delay(100);
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
* for any pending HW events to complete.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
goto mac_reset_top;
}
/* Set the Rx packet buffer size. */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
/* Store the permanent mac address */
hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
/*
* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
&hw->mac.wwpn_prefix);
reset_hw_out:
return status;
}
/**
* ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function
* and the generation start_hw function.
* Then performs revision-specific operations, if any.
**/
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_start_hw_X540");
ret_val = ixgbe_start_hw_generic(hw);
if (ret_val != IXGBE_SUCCESS)
goto out;
ret_val = ixgbe_start_hw_gen2(hw);
out:
return ret_val;
}
/**
* ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
* @hw: pointer to hardware structure
*
* Determines physical layer capabilities of the current configuration.
**/
u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
{
u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
return physical_layer;
}
/**
* ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
* @hw: pointer to hardware structure
*
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
u16 eeprom_size;
DEBUGFUNC("ixgbe_init_eeprom_params_X540");
if (eeprom->type == ixgbe_eeprom_uninitialized) {
eeprom->semaphore_delay = 10;
eeprom->type = ixgbe_flash;
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
}
return IXGBE_SUCCESS;
}
/**
* ixgbe_read_eerd_X540- Read EEPROM word using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_read_eerd_X540");
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS)
status = ixgbe_read_eerd_generic(hw, offset, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS)
status = ixgbe_read_eerd_buffer_generic(hw, offset,
words, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
{
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_write_eewr_X540");
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS)
status = ixgbe_write_eewr_generic(hw, offset, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @words: number of words
* @data: word(s) write to the EEPROM
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
u16 offset, u16 words, u16 *data)
{
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS)
status = ixgbe_write_eewr_buffer_generic(hw, offset,
words, data);
else
status = IXGBE_ERR_SWFW_SYNC;
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
*
* This function does not use synchronization for EERD and EEWR. It can
* be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
*
* @hw: pointer to hardware structure
**/
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
u16 checksum = 0;
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
/*
* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores here. Instead use
* ixgbe_read_eerd_generic
*/
DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word) != IXGBE_SUCCESS) {
DEBUGOUT("EEPROM read failed\n");
break;
}
checksum += word;
}
/*
* Include all data from pointers 0x3, 0x6-0xE. This excludes the
* FW, PHY module, and PCIe Expansion/Option ROM pointers.
*/
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
if (ixgbe_read_eerd_generic(hw, i, &pointer) != IXGBE_SUCCESS) {
DEBUGOUT("EEPROM read failed\n");
break;
}
/* Skip pointer section if the pointer is invalid. */
if (pointer == 0xFFFF || pointer == 0 ||
pointer >= hw->eeprom.word_size)
continue;
if (ixgbe_read_eerd_generic(hw, pointer, &length) !=
IXGBE_SUCCESS) {
DEBUGOUT("EEPROM read failed\n");
break;
}
/* Skip pointer section if length is invalid. */
if (length == 0xFFFF || length == 0 ||
(pointer + length) >= hw->eeprom.word_size)
continue;
for (j = pointer+1; j <= pointer+length; j++) {
if (ixgbe_read_eerd_generic(hw, j, &word) !=
IXGBE_SUCCESS) {
DEBUGOUT("EEPROM read failed\n");
break;
}
checksum += word;
}
}
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
return checksum;
}
/**
* ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
* @hw: pointer to hardware structure
* @checksum_val: calculated checksum
*
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
u16 *checksum_val)
{
s32 status;
u16 checksum;
u16 read_checksum = 0;
DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
/*
* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status != IXGBE_SUCCESS) {
DEBUGOUT("EEPROM read failed\n");
goto out;
}
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS) {
checksum = hw->eeprom.ops.calc_checksum(hw);
/*
* Do not use hw->eeprom.ops.read because we do not want to take
* the synchronization semaphores twice here.
*/
ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
&read_checksum);
/*
* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum)
status = IXGBE_ERR_EEPROM_CHECKSUM;
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
} else {
status = IXGBE_ERR_SWFW_SYNC;
}
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
out:
return status;
}
/**
* ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
* @hw: pointer to hardware structure
*
* After writing EEPROM to shadow RAM using EEWR register, software calculates
* checksum and updates the EEPROM and instructs the hardware to update
* the flash.
**/
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
s32 status;
u16 checksum;
DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
/*
* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status != IXGBE_SUCCESS)
DEBUGOUT("EEPROM read failed\n");
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
IXGBE_SUCCESS) {
checksum = hw->eeprom.ops.calc_checksum(hw);
/*
* Do not use hw->eeprom.ops.write because we do not want to
* take the synchronization semaphores twice here.
*/
status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
checksum);
if (status == IXGBE_SUCCESS)
status = ixgbe_update_flash_X540(hw);
else
status = IXGBE_ERR_SWFW_SYNC;
}
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
* ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
* @hw: pointer to hardware structure
*
* Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
* EEPROM from shadow RAM to the flash device.
**/
STATIC s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
{
u32 flup;
s32 status = IXGBE_ERR_EEPROM;
DEBUGFUNC("ixgbe_update_flash_X540");
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == IXGBE_ERR_EEPROM) {
DEBUGOUT("Flash update time out\n");
goto out;
}
flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == IXGBE_SUCCESS)
DEBUGOUT("Flash update complete\n");
else
DEBUGOUT("Flash update time out\n");
if (hw->revision_id == 0) {
flup = IXGBE_READ_REG(hw, IXGBE_EEC);
if (flup & IXGBE_EEC_SEC1VAL) {
flup |= IXGBE_EEC_FLUP;
IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
}
status = ixgbe_poll_flash_update_done_X540(hw);
if (status == IXGBE_SUCCESS)
DEBUGOUT("Flash update complete\n");
else
DEBUGOUT("Flash update time out\n");
}
out:
return status;
}
/**
* ixgbe_poll_flash_update_done_X540 - Poll flash update status
* @hw: pointer to hardware structure
*
* Polls the FLUDONE (bit 26) of the EEC Register to determine when the
* flash update is done.
**/
STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
{
u32 i;
u32 reg;
s32 status = IXGBE_ERR_EEPROM;
DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_EEC);
if (reg & IXGBE_EEC_FLUDONE) {
status = IXGBE_SUCCESS;
break;
}
usec_delay(5);
}
return status;
}
/**
* ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
* Acquires the SWFW semaphore thought the SW_FW_SYNC register for
* the specified function (CSR, PHY0, PHY1, NVM, Flash)
**/
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 5;
u32 hwmask = 0;
u32 timeout = 200;
u32 i;
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
if (swmask == IXGBE_GSSR_EEP_SM)
hwmask = IXGBE_GSSR_FLASH_SM;
/* SW only mask doesn't have FW bit pair */
if (swmask == IXGBE_GSSR_SW_MNG_SM)
fwmask = 0;
for (i = 0; i < timeout; i++) {
/*
* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_swfw_sync_semaphore(hw)) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
goto out;
} else {
/*
* Firmware currently using resource (fwmask), hardware
* currently using resource (hwmask), or other software
* thread currently using resource (swmask)
*/
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
}
}
/* Failed to get SW only semaphore */
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
/* If the resource is not released by the FW/HW the SW can assume that
* the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
* of the requested resource(s) while ignoring the corresponding FW/HW
* bits in the SW_FW_SYNC register.
*/
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (swfw_sync & (fwmask | hwmask)) {
if (ixgbe_get_swfw_sync_semaphore(hw)) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto out;
}
swfw_sync |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
}
out:
return ret_val;
}
/**
* ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
* Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
{
u32 swfw_sync;
u32 swmask = mask;
DEBUGFUNC("ixgbe_release_swfw_sync_X540");
ixgbe_get_swfw_sync_semaphore(hw);
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
swfw_sync &= ~swmask;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
msec_delay(5);
}
/**
* ixgbe_get_nvm_semaphore - Get hardware semaphore
* @hw: pointer to hardware structure
*
* Sets the hardware semaphores so SW/FW can gain control of shared resources
**/
STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM;
u32 timeout = 2000;
u32 i;
u32 swsm;
DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
if (!(swsm & IXGBE_SWSM_SMBI)) {
status = IXGBE_SUCCESS;
break;
}
usec_delay(50);
}
/* Now get the semaphore between SW/FW through the REGSMP bit */
if (status == IXGBE_SUCCESS) {
for (i = 0; i < timeout; i++) {
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
if (!(swsm & IXGBE_SWFW_REGSMP))
break;
usec_delay(50);
}
/*
* Release semaphores and return error if SW NVM semaphore
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
DEBUGOUT("REGSMP Software NVM semaphore not "
"granted.\n");
ixgbe_release_swfw_sync_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
DEBUGOUT("Software semaphore SMBI between device drivers "
"not granted.\n");
}
return status;
}
/**
* ixgbe_release_nvm_semaphore - Release hardware semaphore
* @hw: pointer to hardware structure
*
* This function clears hardware semaphore bits.
**/
STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
{
u32 swsm;
DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
swsm &= ~IXGBE_SWSM_SMBI;
IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
swsm &= ~IXGBE_SWFW_REGSMP;
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
IXGBE_WRITE_FLUSH(hw);
}
/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
*
* Devices that implement the version 2 interface:
* X540
**/
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
ixgbe_link_speed speed;
bool link_up;
DEBUGFUNC("ixgbe_blink_led_start_X540");
/*
* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
* This will be reversed when we stop the blinking.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (link_up == false) {
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
}
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
ledctl_reg |= IXGBE_LED_BLINK(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
IXGBE_WRITE_FLUSH(hw);
return IXGBE_SUCCESS;
}
/**
* ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
* @hw: pointer to hardware structure
* @index: led number to stop blinking
*
* Devices that implement the version 2 interface:
* X540
**/
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
DEBUGFUNC("ixgbe_blink_led_stop_X540");
/* Restore the LED to its default value. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
ledctl_reg &= ~IXGBE_LED_BLINK(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
/* Unforce link and speed in the MAC. */
macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
IXGBE_WRITE_FLUSH(hw);
return IXGBE_SUCCESS;
}

View File

@ -1,65 +0,0 @@
/*******************************************************************************
Copyright (c) 2001-2012, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#ifndef _IXGBE_X540_H_
#define _IXGBE_X540_H_
#include "ixgbe_type.h"
#ident "$Id: ixgbe_x540.h,v 1.6 2012/08/09 20:43:58 cmwyborn Exp $"
s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
ixgbe_link_speed *speed, bool *autoneg);
enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool autoneg, bool link_up_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
u16 *data);
s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words,
u16 *data);
s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
#endif /* _IXGBE_X540_H_ */

View File

@ -57,7 +57,7 @@ static void *pthread_entry(void *data)
* runtime_init - starts the runtime
* @main_fn: the first function to run as a thread
* @arg: an argument to @main_fn
* @cores: the number of threads to use
* @threads: the number of threads to use
*
* Does not return if successful, otherwise return < 0 if an error.
*/