net/txgbe: add PF module configure for SRIOV

Add PF module configure for SRIOV.

Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
Jiawen Wu 2020-10-19 16:53:57 +08:00 committed by Ferruh Yigit
parent fa7c130df4
commit 770a352363
7 changed files with 699 additions and 0 deletions

View File

@ -17,6 +17,7 @@ Unicast MAC filter = Y
Multicast MAC filter = Y
SR-IOV = Y
VLAN filter = Y
Rate limitation = Y
CRC offload = P
VLAN offload = P
QinQ offload = P

View File

@ -966,6 +966,92 @@ s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr)
return 0;
}
/**
* txgbe_clear_vmdq - Disassociate a VMDq pool index from a rx address
* @hw: pointer to hardware struct
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
DEBUGFUNC("txgbe_clear_vmdq");
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
DEBUGOUT("RAR index %d is out of range.\n", rar);
return TXGBE_ERR_INVALID_ARGUMENT;
}
wr32(hw, TXGBE_ETHADDRIDX, rar);
mpsar_lo = rd32(hw, TXGBE_ETHADDRASSL);
mpsar_hi = rd32(hw, TXGBE_ETHADDRASSH);
if (TXGBE_REMOVED(hw->hw_addr))
goto done;
if (!mpsar_lo && !mpsar_hi)
goto done;
if (vmdq == BIT_MASK32) {
if (mpsar_lo) {
wr32(hw, TXGBE_ETHADDRASSL, 0);
mpsar_lo = 0;
}
if (mpsar_hi) {
wr32(hw, TXGBE_ETHADDRASSH, 0);
mpsar_hi = 0;
}
} else if (vmdq < 32) {
mpsar_lo &= ~(1 << vmdq);
wr32(hw, TXGBE_ETHADDRASSL, mpsar_lo);
} else {
mpsar_hi &= ~(1 << (vmdq - 32));
wr32(hw, TXGBE_ETHADDRASSH, mpsar_hi);
}
/* was that the last pool using this rar? */
if (mpsar_lo == 0 && mpsar_hi == 0 &&
rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.clear_rar(hw, rar);
done:
return 0;
}
/**
* txgbe_set_vmdq - Associate a VMDq pool index with a rx address
* @hw: pointer to hardware struct
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq pool index
**/
s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
DEBUGFUNC("txgbe_set_vmdq");
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
DEBUGOUT("RAR index %d is out of range.\n", rar);
return TXGBE_ERR_INVALID_ARGUMENT;
}
wr32(hw, TXGBE_ETHADDRIDX, rar);
if (vmdq < 32) {
mpsar = rd32(hw, TXGBE_ETHADDRASSL);
mpsar |= 1 << vmdq;
wr32(hw, TXGBE_ETHADDRASSL, mpsar);
} else {
mpsar = rd32(hw, TXGBE_ETHADDRASSH);
mpsar |= 1 << (vmdq - 32);
wr32(hw, TXGBE_ETHADDRASSH, mpsar);
}
return 0;
}
/**
* txgbe_init_uta_tables - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
@ -983,6 +1069,214 @@ s32 txgbe_init_uta_tables(struct txgbe_hw *hw)
return 0;
}
/**
* txgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vlvf_bypass: true to find vlanid only, false returns first empty slot if
* vlanid not found
*
*
* return the VLVF index where this VLAN id should be placed
*
**/
s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan, bool vlvf_bypass)
{
s32 regindex, first_empty_slot;
u32 bits;
/* short cut the special case */
if (vlan == 0)
return 0;
/* if vlvf_bypass is set we don't want to use an empty slot, we
* will simply bypass the VLVF if there are no entries present in the
* VLVF that contain our VLAN
*/
first_empty_slot = vlvf_bypass ? TXGBE_ERR_NO_SPACE : 0;
/* add VLAN enable bit for comparison */
vlan |= TXGBE_PSRVLAN_EA;
/* Search for the vlan id in the VLVF entries. Save off the first empty
* slot found along the way.
*
* pre-decrement loop covering (TXGBE_NUM_POOL - 1) .. 1
*/
for (regindex = TXGBE_NUM_POOL; --regindex;) {
wr32(hw, TXGBE_PSRVLANIDX, regindex);
bits = rd32(hw, TXGBE_PSRVLAN);
if (bits == vlan)
return regindex;
if (!first_empty_slot && !bits)
first_empty_slot = regindex;
}
/* If we are here then we didn't find the VLAN. Return first empty
* slot we found during our search, else error.
*/
if (!first_empty_slot)
DEBUGOUT("No space in VLVF.\n");
return first_empty_slot ? first_empty_slot : TXGBE_ERR_NO_SPACE;
}
/**
* txgbe_set_vfta - Set VLAN filter table
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VLVFB
* @vlan_on: boolean flag to turn on/off VLAN
* @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
u32 regidx, vfta_delta, vfta;
s32 err;
DEBUGFUNC("txgbe_set_vfta");
if (vlan > 4095 || vind > 63)
return TXGBE_ERR_PARAM;
/*
* this is a 2 part operation - first the VFTA, then the
* VLVF and VLVFB if VT Mode is set
* We don't write the VFTA until we know the VLVF part succeeded.
*/
/* Part 1
* The VFTA is a bitstring made up of 128 32-bit registers
* that enable the particular VLAN id, much like the MTA:
* bits[11-5]: which register
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
vfta_delta = 1 << (vlan % 32);
vfta = rd32(hw, TXGBE_VLANTBL(regidx));
/*
* vfta_delta represents the difference between the current value
* of vfta and the value we want in the register. Since the diff
* is an XOR mask we can just update the vfta using an XOR
*/
vfta_delta &= vlan_on ? ~vfta : vfta;
vfta ^= vfta_delta;
/* Part 2
* Call txgbe_set_vlvf to set VLVFB and VLVF
*/
err = txgbe_set_vlvf(hw, vlan, vind, vlan_on, &vfta_delta,
vfta, vlvf_bypass);
if (err != 0) {
if (vlvf_bypass)
goto vfta_update;
return err;
}
vfta_update:
/* Update VFTA now that we are ready for traffic */
if (vfta_delta)
wr32(hw, TXGBE_VLANTBL(regidx), vfta);
return 0;
}
/**
* txgbe_set_vlvf - Set VLAN Pool Filter
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in PSRVLANPLM
* @vlan_on: boolean flag to turn on/off VLAN in PSRVLAN
* @vfta_delta: pointer to the difference between the current value
* of PSRVLANPLM and the desired value
* @vfta: the desired value of the VFTA
* @vlvf_bypass: boolean flag indicating updating default pool is okay
*
* Turn on/off specified bit in VLVF table.
**/
s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, u32 *vfta_delta, u32 vfta,
bool vlvf_bypass)
{
u32 bits;
u32 portctl;
s32 vlvf_index;
DEBUGFUNC("txgbe_set_vlvf");
if (vlan > 4095 || vind > 63)
return TXGBE_ERR_PARAM;
/* If VT Mode is set
* Either vlan_on
* make sure the vlan is in PSRVLAN
* set the vind bit in the matching PSRVLANPLM
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
portctl = rd32(hw, TXGBE_PORTCTL);
if (!(portctl & TXGBE_PORTCTL_NUMVT_MASK))
return 0;
vlvf_index = txgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
if (vlvf_index < 0)
return vlvf_index;
wr32(hw, TXGBE_PSRVLANIDX, vlvf_index);
bits = rd32(hw, TXGBE_PSRVLANPLM(vind / 32));
/* set the pool bit */
bits |= 1 << (vind % 32);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
bits ^= 1 << (vind % 32);
if (!bits &&
!rd32(hw, TXGBE_PSRVLANPLM(vind / 32))) {
/* Clear PSRVLANPLM first, then disable PSRVLAN. Otherwise
* we run the risk of stray packets leaking into
* the PF via the default pool
*/
if (*vfta_delta)
wr32(hw, TXGBE_PSRVLANPLM(vlan / 32), vfta);
/* disable VLVF and clear remaining bit from pool */
wr32(hw, TXGBE_PSRVLAN, 0);
wr32(hw, TXGBE_PSRVLANPLM(vind / 32), 0);
return 0;
}
/* If there are still bits set in the PSRVLANPLM registers
* for the VLAN ID indicated we need to see if the
* caller is requesting that we clear the PSRVLANPLM entry bit.
* If the caller has requested that we clear the PSRVLANPLM
* entry bit but there are still pools/VFs using this VLAN
* ID entry then ignore the request. We're not worried
* about the case where we're turning the PSRVLANPLM VLAN ID
* entry bit on, only when requested to turn it off as
* there may be multiple pools and/or VFs using the
* VLAN ID entry. In that case we cannot clear the
* PSRVLANPLM bit until all pools/VFs using that VLAN ID have also
* been cleared. This will be indicated by "bits" being
* zero.
*/
*vfta_delta = 0;
vlvf_update:
/* record pool change and enable VLAN ID if not already enabled */
wr32(hw, TXGBE_PSRVLANPLM(vind / 32), bits);
wr32(hw, TXGBE_PSRVLAN, TXGBE_PSRVLAN_EA | vlan);
return 0;
}
/**
* txgbe_clear_vfta - Clear VLAN filter table
* @hw: pointer to hardware structure
@ -1173,6 +1467,49 @@ wwn_prefix_err:
return 0;
}
/**
* txgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for MAC anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
*
**/
void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8;
u32 pfvfspoof;
pfvfspoof = rd32(hw, TXGBE_POOLTXASMAC(vf_target_reg));
if (enable)
pfvfspoof |= (1 << vf_target_shift);
else
pfvfspoof &= ~(1 << vf_target_shift);
wr32(hw, TXGBE_POOLTXASMAC(vf_target_reg), pfvfspoof);
}
/**
* txgbe_set_ethertype_anti_spoofing - Configure Ethertype anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
*
**/
void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw,
bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8;
u32 pfvfspoof;
pfvfspoof = rd32(hw, TXGBE_POOLTXASET(vf_target_reg));
if (enable)
pfvfspoof |= (1 << vf_target_shift);
else
pfvfspoof &= ~(1 << vf_target_shift);
wr32(hw, TXGBE_POOLTXASET(vf_target_reg), pfvfspoof);
}
/**
* txgbe_get_device_caps - Get additional device capabilities
* @hw: pointer to hardware structure
@ -1747,14 +2084,22 @@ s32 txgbe_init_ops_pf(struct txgbe_hw *hw)
mac->autoc_read = txgbe_autoc_read;
mac->autoc_write = txgbe_autoc_write;
/* RAR, Multicast, VLAN */
mac->set_rar = txgbe_set_rar;
mac->clear_rar = txgbe_clear_rar;
mac->init_rx_addrs = txgbe_init_rx_addrs;
mac->enable_rx = txgbe_enable_rx;
mac->disable_rx = txgbe_disable_rx;
mac->set_vmdq = txgbe_set_vmdq;
mac->clear_vmdq = txgbe_clear_vmdq;
mac->set_vfta = txgbe_set_vfta;
mac->set_vlvf = txgbe_set_vlvf;
mac->clear_vfta = txgbe_clear_vfta;
mac->init_uta_tables = txgbe_init_uta_tables;
mac->setup_sfp = txgbe_setup_sfp_modules;
mac->set_mac_anti_spoofing = txgbe_set_mac_anti_spoofing;
mac->set_ethertype_anti_spoofing = txgbe_set_ethertype_anti_spoofing;
/* Link */
mac->get_link_capabilities = txgbe_get_link_capabilities_raptor;
mac->check_link = txgbe_check_mac_link;

View File

@ -35,8 +35,17 @@ void txgbe_release_swfw_sync(struct txgbe_hw *hw, u32 mask);
s32 txgbe_get_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
s32 txgbe_set_san_mac_addr(struct txgbe_hw *hw, u8 *san_mac_addr);
s32 txgbe_set_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
s32 txgbe_clear_vmdq(struct txgbe_hw *hw, u32 rar, u32 vmdq);
s32 txgbe_init_uta_tables(struct txgbe_hw *hw);
s32 txgbe_set_vfta(struct txgbe_hw *hw, u32 vlan,
u32 vind, bool vlan_on, bool vlvf_bypass);
s32 txgbe_set_vlvf(struct txgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, u32 *vfta_delta, u32 vfta,
bool vlvf_bypass);
s32 txgbe_clear_vfta(struct txgbe_hw *hw);
s32 txgbe_find_vlvf_slot(struct txgbe_hw *hw, u32 vlan, bool vlvf_bypass);
s32 txgbe_check_mac_link(struct txgbe_hw *hw,
u32 *speed,
bool *link_up, bool link_up_wait_to_complete);
@ -44,6 +53,9 @@ s32 txgbe_check_mac_link(struct txgbe_hw *hw,
s32 txgbe_get_wwn_prefix(struct txgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
void txgbe_set_mac_anti_spoofing(struct txgbe_hw *hw, bool enable, int vf);
void txgbe_set_ethertype_anti_spoofing(struct txgbe_hw *hw,
bool enable, int vf);
s32 txgbe_get_device_caps(struct txgbe_hw *hw, u16 *device_caps);
void txgbe_clear_tx_pending(struct txgbe_hw *hw);

View File

@ -5,6 +5,7 @@
#ifndef _TXGBE_TYPE_H_
#define _TXGBE_TYPE_H_
#define TXGBE_DCB_TC_MAX TXGBE_MAX_UP
#define TXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define TXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */

View File

@ -1151,6 +1151,83 @@ txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
intr->mask_misc |= TXGBE_ICRMISC_GPIO;
}
int
txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk)
{
struct txgbe_hw *hw;
struct txgbe_vf_info *vfinfo;
struct rte_eth_link link;
uint8_t nb_q_per_pool;
uint32_t queue_stride;
uint32_t queue_idx, idx = 0, vf_idx;
uint32_t queue_end;
uint16_t total_rate = 0;
struct rte_pci_device *pci_dev;
int ret;
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
if (ret < 0)
return ret;
if (vf >= pci_dev->max_vfs)
return -EINVAL;
if (tx_rate > link.link_speed)
return -EINVAL;
if (q_msk == 0)
return 0;
hw = TXGBE_DEV_HW(dev);
vfinfo = *(TXGBE_DEV_VFDATA(dev));
nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
queue_idx = vf * queue_stride;
queue_end = queue_idx + nb_q_per_pool - 1;
if (queue_end >= hw->mac.max_tx_queues)
return -EINVAL;
if (vfinfo) {
for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
if (vf_idx == vf)
continue;
for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
idx++)
total_rate += vfinfo[vf_idx].tx_rate[idx];
}
} else {
return -EINVAL;
}
/* Store tx_rate for this vf. */
for (idx = 0; idx < nb_q_per_pool; idx++) {
if (((uint64_t)0x1 << idx) & q_msk) {
if (vfinfo[vf].tx_rate[idx] != tx_rate)
vfinfo[vf].tx_rate[idx] = tx_rate;
total_rate += tx_rate;
}
}
if (total_rate > dev->data->dev_link.link_speed) {
/* Reset stored TX rate of the VF if it causes exceed
* link speed.
*/
memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
return -EINVAL;
}
/* Set ARBTXRATE of each queue/pool for vf X */
for (; queue_idx <= queue_end; queue_idx++) {
if (0x1 & q_msk)
txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
q_msk = q_msk >> 1;
}
return 0;
}
/*
* Configure device link speed and setup link.
* It returns 0 on success.
@ -1160,6 +1237,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
@ -1169,6 +1247,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
uint32_t allowed_speeds = 0;
int mask = 0;
int status;
uint16_t vf, idx;
uint32_t *link_speeds;
PMD_INIT_FUNC_TRACE();
@ -1205,6 +1284,9 @@ txgbe_dev_start(struct rte_eth_dev *dev)
hw->mac.start_hw(hw);
hw->mac.get_link_status = true;
/* configure PF module if SRIOV enabled */
txgbe_pf_host_configure(dev);
txgbe_dev_phy_intr_setup(dev);
/* check and configure queue intr-vector mapping */
@ -1248,6 +1330,16 @@ txgbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
/* Restore vf rate limit */
if (vfinfo != NULL) {
for (vf = 0; vf < pci_dev->max_vfs; vf++)
for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
if (vfinfo[vf].tx_rate[idx] != 0)
txgbe_set_vf_rate_limit(dev, vf,
vfinfo[vf].tx_rate[idx],
1 << idx);
}
err = txgbe_dev_rxtx_start(dev);
if (err < 0) {
PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
@ -1369,8 +1461,10 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
if (hw->adapter_stopped)
return 0;
@ -1389,6 +1483,9 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
/* stop adapter */
txgbe_stop_hw(hw);
for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
vfinfo[vf].clear_to_send = false;
if (hw->phy.media_type == txgbe_media_type_copper) {
/* Turn off the copper */
hw->phy.set_phy_power(hw, false);
@ -2799,6 +2896,37 @@ txgbe_configure_msix(struct rte_eth_dev *dev)
| TXGBE_ITR_WRDSA);
}
int
txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
uint32_t bcnrc_val;
if (queue_idx >= hw->mac.max_tx_queues)
return -EINVAL;
if (tx_rate != 0) {
bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
} else {
bcnrc_val = 0;
}
/*
* Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
*/
wr32(hw, TXGBE_ARBTXMMW, 0x14);
/* Set ARBTXRATE of queue X */
wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
txgbe_flush(hw);
return 0;
}
static u8 *
txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
u8 **mc_addr_ptr, u32 *vmdq)
@ -2863,6 +2991,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.mac_addr_set = txgbe_set_default_mac_addr,
.uc_hash_table_set = txgbe_uc_hash_table_set,
.uc_all_hash_table_set = txgbe_uc_all_hash_table_set,
.set_queue_rate_limit = txgbe_set_queue_rate_limit,
.set_mc_addr_list = txgbe_dev_set_mc_addr_list,
.rxq_info_get = txgbe_rxq_info_get,
.txq_info_get = txgbe_txq_info_get,

View File

@ -34,6 +34,8 @@
#define TXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
#define TXGBE_MAX_QUEUE_NUM_PER_VF 8
#define TXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
@ -97,6 +99,7 @@ struct txgbe_vf_info {
uint16_t vf_mc_hashes[TXGBE_MAX_VF_MC_ENTRIES];
uint16_t num_vf_mc_hashes;
bool clear_to_send;
uint16_t tx_rate[TXGBE_MAX_QUEUE_NUM_PER_VF];
uint16_t vlan_count;
uint8_t api_version;
uint16_t switch_domain_id;
@ -104,6 +107,26 @@ struct txgbe_vf_info {
uint16_t mac_count;
};
struct txgbe_ethertype_filter {
uint16_t ethertype;
uint32_t etqf;
uint32_t etqs;
/**
* If this filter is added by configuration,
* it should not be removed.
*/
bool conf;
};
/*
* Structure to store filters' info.
*/
struct txgbe_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
struct txgbe_ethertype_filter ethertype_filters[TXGBE_ETF_ID_MAX];
};
/*
* Structure to store private data for each driver instance (for each port).
*/
@ -117,6 +140,7 @@ struct txgbe_adapter {
struct txgbe_mirror_info mr_data;
struct txgbe_vf_info *vfdata;
struct txgbe_uta_info uta_info;
struct txgbe_filter_info filter;
bool rx_bulk_alloc_allowed;
};
@ -150,6 +174,9 @@ struct txgbe_adapter {
#define TXGBE_DEV_UTA_INFO(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info)
#define TXGBE_DEV_FILTER(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->filter)
/*
* RX/TX function prototypes
*/
@ -227,6 +254,50 @@ void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev);
void txgbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
static inline int
txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
uint16_t ethertype)
{
int i;
for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
if (filter_info->ethertype_filters[i].ethertype == ethertype &&
(filter_info->ethertype_mask & (1 << i)))
return i;
}
return -1;
}
static inline int
txgbe_ethertype_filter_insert(struct txgbe_filter_info *filter_info,
struct txgbe_ethertype_filter *ethertype_filter)
{
int i;
for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
if (filter_info->ethertype_mask & (1 << i))
continue;
filter_info->ethertype_mask |= 1 << i;
filter_info->ethertype_filters[i].ethertype =
ethertype_filter->ethertype;
filter_info->ethertype_filters[i].etqf =
ethertype_filter->etqf;
filter_info->ethertype_filters[i].etqs =
ethertype_filter->etqs;
filter_info->ethertype_filters[i].conf =
ethertype_filter->conf;
break;
}
return (i < TXGBE_ETF_ID_MAX ? i : -1);
}
#define TXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define TXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */

View File

@ -25,6 +25,7 @@
#include "txgbe_ethdev.h"
#include "rte_pmd_txgbe.h"
#define TXGBE_MAX_VFTA (128)
#define TXGBE_VF_MSG_SIZE_DEFAULT 1
#define TXGBE_VF_GET_QUEUE_MSG_SIZE 5
@ -144,6 +145,145 @@ void txgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
*vfinfo = NULL;
}
static void
txgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
{
struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
uint16_t vf_num;
int i;
struct txgbe_ethertype_filter ethertype_filter;
if (!hw->mac.set_ethertype_anti_spoofing) {
PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n");
return;
}
i = txgbe_ethertype_filter_lookup(filter_info,
TXGBE_ETHERTYPE_FLOW_CTRL);
if (i >= 0) {
PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n");
return;
}
ethertype_filter.ethertype = TXGBE_ETHERTYPE_FLOW_CTRL;
ethertype_filter.etqf = TXGBE_ETFLT_ENA |
TXGBE_ETFLT_TXAS |
TXGBE_ETHERTYPE_FLOW_CTRL;
ethertype_filter.etqs = 0;
ethertype_filter.conf = TRUE;
i = txgbe_ethertype_filter_insert(filter_info,
&ethertype_filter);
if (i < 0) {
PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n");
return;
}
wr32(hw, TXGBE_ETFLT(i),
(TXGBE_ETFLT_ENA |
TXGBE_ETFLT_TXAS |
TXGBE_ETHERTYPE_FLOW_CTRL));
vf_num = dev_num_vf(eth_dev);
for (i = 0; i < vf_num; i++)
hw->mac.set_ethertype_anti_spoofing(hw, true, i);
}
int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
{
uint32_t vtctl, fcrth;
uint32_t vfre_slot, vfre_offset;
uint16_t vf_num;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
uint32_t gpie;
uint32_t gcr_ext;
uint32_t vlanctrl;
int i;
vf_num = dev_num_vf(eth_dev);
if (vf_num == 0)
return -1;
/* enable VMDq and set the default pool for PF */
vtctl = rd32(hw, TXGBE_POOLCTL);
vtctl &= ~TXGBE_POOLCTL_DEFPL_MASK;
vtctl |= TXGBE_POOLCTL_DEFPL(RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
vtctl |= TXGBE_POOLCTL_RPLEN;
wr32(hw, TXGBE_POOLCTL, vtctl);
vfre_offset = vf_num & VFRE_MASK;
vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
/* Enable pools reserved to PF only */
wr32(hw, TXGBE_POOLRXENA(vfre_slot), (~0U) << vfre_offset);
wr32(hw, TXGBE_POOLRXENA(vfre_slot ^ 1), vfre_slot - 1);
wr32(hw, TXGBE_POOLTXENA(vfre_slot), (~0U) << vfre_offset);
wr32(hw, TXGBE_POOLTXENA(vfre_slot ^ 1), vfre_slot - 1);
wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
/* clear VMDq map to perment rar 0 */
hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
/* clear VMDq map to scan rar 127 */
wr32(hw, TXGBE_ETHADDRIDX, hw->mac.num_rar_entries);
wr32(hw, TXGBE_ETHADDRASSL, 0);
wr32(hw, TXGBE_ETHADDRASSH, 0);
/* set VMDq map to default PF pool */
hw->mac.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
/*
* SW msut set PORTCTL.VT_Mode the same as GPIE.VT_Mode
*/
gpie = rd32(hw, TXGBE_GPIE);
gpie |= TXGBE_GPIE_MSIX;
gcr_ext = rd32(hw, TXGBE_PORTCTL);
gcr_ext &= ~TXGBE_PORTCTL_NUMVT_MASK;
switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
case ETH_64_POOLS:
gcr_ext |= TXGBE_PORTCTL_NUMVT_64;
break;
case ETH_32_POOLS:
gcr_ext |= TXGBE_PORTCTL_NUMVT_32;
break;
case ETH_16_POOLS:
gcr_ext |= TXGBE_PORTCTL_NUMVT_16;
break;
}
wr32(hw, TXGBE_PORTCTL, gcr_ext);
wr32(hw, TXGBE_GPIE, gpie);
/*
* enable vlan filtering and allow all vlan tags through
*/
vlanctrl = rd32(hw, TXGBE_VLANCTL);
vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
wr32(hw, TXGBE_VLANCTL, vlanctrl);
/* enable all vlan filters */
for (i = 0; i < TXGBE_MAX_VFTA; i++)
wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
/* Enable MAC Anti-Spoofing */
hw->mac.set_mac_anti_spoofing(hw, FALSE, vf_num);
/* set flow control threshold to max to avoid tx switch hang */
for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
wr32(hw, TXGBE_FCWTRLO(i), 0);
fcrth = rd32(hw, TXGBE_PBRXSIZE(i)) - 32;
wr32(hw, TXGBE_FCWTRHI(i), fcrth);
}
txgbe_add_tx_flow_control_drop_filter(eth_dev);
return 0;
}
static void
txgbe_set_rx_mode(struct rte_eth_dev *eth_dev)
{