ixgbe: clean up code style

Run ixgbe driver through checkpatch and fix the issues highlighted
Fix line spacing, some bad indentation, and in a couple
of cases use short circuit (already there) return to lessen indentation.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Wenzhuo Lu <wenzhuo.lu@intel.com>

Applied with four additional fixes for issues highlighted by checkpatch
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
This commit is contained in:
Stephen Hemminger 2016-04-07 16:45:28 -07:00 committed by Bruce Richardson
parent 84c9b5a9fe
commit f83f50394b
9 changed files with 362 additions and 342 deletions

View File

@ -297,8 +297,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
{
int rc;
if ((rc = ixgbe_init_hw(hw)) == 0 &&
hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
rc = ixgbe_init_hw(hw);
if (rc == 0 && hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
hw->mac.ops.setup_link =
&ixgbe_setup_mac_link_multispeed_fixed_fiber;
@ -306,8 +306,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw)
hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type;
hw->mac.ops.disable_tx_laser = NULL;
hw->mac.ops.enable_tx_laser = NULL;
hw->mac.ops.flap_tx_laser = NULL;
hw->mac.ops.enable_tx_laser = NULL;
hw->mac.ops.flap_tx_laser = NULL;
}
return rc;

View File

@ -82,7 +82,7 @@ ixgbe_bypass_set_time(struct ixgbe_adapter *adapter)
BYPASS_CTL1_VALID_M |
BYPASS_CTL1_OFFTRST_M;
value = (sec & BYPASS_CTL1_TIME_M) |
BYPASS_CTL1_VALID |
BYPASS_CTL1_VALID |
BYPASS_CTL1_OFFTRST;
FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set);
@ -275,8 +275,8 @@ s32
ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout)
{
struct ixgbe_hw *hw;
u32 status;
u32 mask;
u32 status;
u32 mask;
s32 ret_val;
struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev);

View File

@ -37,10 +37,10 @@
#ifdef RTE_NIC_BYPASS
struct ixgbe_bypass_mac_ops {
s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status);
bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg);
s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value);
s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status);
bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg);
s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action);
s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value);
};
struct ixgbe_bypass_info {

View File

@ -136,7 +136,7 @@ enum ixgbe_state_t {
#define BYPASS_LOG_EVENT_SHIFT 28
#define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */
#define IXGBE_DEV_TO_ADPATER(dev) \
((struct ixgbe_adapter*)(dev->data->dev_private))
((struct ixgbe_adapter *)(dev->data->dev_private))
/* extractions from ixgbe_phy.h */
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2

View File

@ -232,7 +232,7 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
@ -264,14 +264,14 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
ether_addr* mac_addr,uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
ether_addr * mac_addr, uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
uint16_t rx_mask, uint8_t on);
static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
uint64_t pool_mask,uint8_t vlan_on);
uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
@ -397,21 +397,21 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
last = latest; \
}
#define IXGBE_SET_HWSTRIP(h, q) do{\
uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
#define IXGBE_SET_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
} while (0)
#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
} while (0)
#define IXGBE_GET_HWSTRIP(h, q, r) do{\
uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
#define IXGBE_GET_HWSTRIP(h, q, r) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
@ -901,8 +901,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
}
else {
} else {
PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
@ -911,7 +910,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
}
static void
ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
{
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
@ -929,7 +928,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
}
static void
ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
@ -952,7 +951,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
tc = &dcb_config->tc_config[0];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
}
@ -1016,7 +1015,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta * shadow_vfta =
struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@ -1039,17 +1038,18 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
* has already done this work. Only check we don't need a different
* RX and TX function.
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process */
* Tx queue may not initialized by primary process
*/
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
"Using default TX function.");
"Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
@ -1086,7 +1086,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
ixgbe_dcb_init(hw,dcb_config);
ixgbe_dcb_init(hw, dcb_config);
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
@ -1127,11 +1127,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
"LOM. Please be aware there may be issues associated "
"with your hardware.");
"LOM. Please be aware there may be issues associated "
"with your hardware.");
PMD_INIT_LOG(ERR, "If you are experiencing problems "
"please contact your Intel or hardware representative "
"who provided you with this hardware.");
"please contact your Intel or hardware representative "
"who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
@ -1150,12 +1150,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
"Failed to allocate %u bytes needed to store "
"MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
@ -1164,11 +1164,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing hash filter MAC addresses */
eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
IXGBE_VMDQ_NUM_UC_MAC, 0);
IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
"Failed to allocate %d bytes needed to store MAC addresses",
ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
@ -1198,8 +1198,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
rte_intr_callback_register(&pci_dev->intr_handle,
ixgbe_dev_interrupt_handler,
@ -1214,7 +1214,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
memset(filter_info->fivetuple_mask, 0,
sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
return 0;
}
@ -1313,7 +1313,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta * shadow_vfta =
struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
@ -1327,8 +1327,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
@ -1339,7 +1340,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE,
"No TX queues configured yet. Using default TX function.");
"No TX queues configured yet. Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
@ -1398,12 +1399,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
hw->mac.num_rar_entries, 0);
hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
"Failed to allocate %u bytes needed to store "
"MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
"Failed to allocate %u bytes needed to store "
"MAC addresses",
ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
@ -1433,12 +1434,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
/* reset the hardware with the new settings */
diag = hw->mac.ops.start_hw(hw);
switch (diag) {
case 0:
break;
case 0:
break;
default:
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
return -EIO;
default:
PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
return -EIO;
}
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
@ -1537,7 +1538,7 @@ ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vfta * shadow_vfta =
struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vfta;
uint32_t vid_idx;
@ -1611,7 +1612,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vfta * shadow_vfta =
struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vlnctrl;
uint16_t i;
@ -1659,12 +1660,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
}
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
}
@ -1683,12 +1684,12 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
}
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
@ -1707,8 +1708,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl &= ~IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
}
else {
} else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@ -1735,8 +1735,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
}
else {
} else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
@ -1836,6 +1835,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
@ -2116,7 +2116,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
ixgbe_stop_adapter(hw);
/* reinitialize adapter
* this calls reset and start */
* this calls reset and start
*/
status = ixgbe_pf_reset_hw(hw);
if (status != 0)
return -1;
@ -2251,7 +2252,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbe_vlan_offload_set(dev, mask);
@ -2471,8 +2472,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++) {
uint32_t mp;
mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
/* global total per queue */
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
@ -2664,15 +2665,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Rx Errors */
stats->imissed = total_missed_rx;
stats->ierrors = hw_stats->crcerrs +
hw_stats->mspdc +
hw_stats->rlec +
hw_stats->ruc +
hw_stats->roc +
hw_stats->illerrc +
hw_stats->errbc +
hw_stats->rfc +
hw_stats->fccrc +
hw_stats->fclast;
hw_stats->mspdc +
hw_stats->rlec +
hw_stats->ruc +
hw_stats->roc +
hw_stats->illerrc +
hw_stats->errbc +
hw_stats->rfc +
hw_stats->fccrc +
hw_stats->fclast;
/* Tx Errors */
stats->oerrors = 0;
@ -2786,7 +2787,7 @@ static void
ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Good Rx packet, include VF loopback */
@ -2857,7 +2858,7 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
static void
ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
{
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Sync HW register to the last stats */
@ -3352,7 +3353,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
if (intr_enable_delay) {
if (rte_eal_alarm_set(timeout * 1000,
ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
} else {
PMD_DRV_LOG(DEBUG, "enable intr immediately");
@ -3571,7 +3572,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* Enable flow control according to the current settings.
*/
static int
ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
{
int ret_val = 0;
uint32_t mflcn_reg, fccfg_reg;
@ -3618,13 +3619,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_rx_pause:
/*
@ -3641,20 +3642,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
@ -3665,7 +3666,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
/* Set 802.3x based flow control settings. */
@ -3704,13 +3704,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
}
static int
ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
if (hw->mac.type != ixgbe_mac_82598EB) {
ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
}
return ret_val;
}
@ -3724,9 +3724,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
uint8_t tc_num;
uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
@ -3759,7 +3759,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p
hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
err = ixgbe_dcb_pfc_enable(dev,tc_num);
err = ixgbe_dcb_pfc_enable(dev, tc_num);
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
@ -3907,7 +3907,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before. */
* feature has not been enabled before.
*/
if (!dev->data->scattered_rx &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
@ -3967,7 +3968,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_conf* conf = &dev->data->dev_conf;
struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
@ -4029,10 +4030,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
}
/* Set vfta */
ixgbevf_set_vfta_all(dev,1);
ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbevf_vlan_offload_set(dev, mask);
@ -4080,7 +4081,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
* Clear what we set, but we still keep shadow_vfta to
* restore after device starts
*/
ixgbevf_set_vfta_all(dev,0);
ixgbevf_set_vfta_all(dev, 0);
/* Clear stored conf */
dev->data->scattered_rx = 0;
@ -4119,18 +4120,18 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vfta * shadow_vfta =
struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
int i = 0, j = 0, vfta = 0, mask = 1;
for (i = 0; i < IXGBE_VFTA_SIZE; i++){
for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
vfta = shadow_vfta->vfta[i];
if (vfta) {
mask = 1;
for (j = 0; j < 32; j++){
for (j = 0; j < 32; j++) {
if (vfta & mask)
ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
mask<<=1;
mask <<= 1;
}
}
}
@ -4142,7 +4143,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vfta * shadow_vfta =
struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vid_idx = 0;
uint32_t vid_bit = 0;
@ -4187,7 +4188,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
static void
@ -4203,7 +4204,7 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
for (i = 0; i < hw->mac.max_rx_queues; i++)
ixgbevf_vlan_strip_queue_set(dev,i,on);
ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
@ -4223,9 +4224,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
}
static uint32_t
ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
{
uint32_t vector = 0;
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((uc_addr->addr_bytes[4] >> 4) |
@ -4253,8 +4255,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
}
static int
ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
uint8_t on)
ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint8_t on)
{
uint32_t vector;
uint32_t uta_idx;
@ -4275,7 +4277,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
if (hw->mac.type < ixgbe_mac_82599EB)
return -ENOTSUP;
vector = ixgbe_uta_vector(hw,mac_addr);
vector = ixgbe_uta_vector(hw, mac_addr);
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
uta_shift = vector & ixgbe_uta_bit_mask;
@ -4300,7 +4302,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
else
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
return 0;
}
@ -4385,7 +4387,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
static int
ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
uint32_t reg,addr;
uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
@ -4404,7 +4406,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
else
reg &= ~val;
IXGBE_WRITE_REG(hw, addr,reg);
IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
@ -4412,7 +4414,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
static int
ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
uint32_t reg,addr;
uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
@ -4431,7 +4433,7 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
else
reg &= ~val;
IXGBE_WRITE_REG(hw, addr,reg);
IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
@ -4449,7 +4451,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
return -ENOTSUP;
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, vlan_on);
if (ret < 0)
return ret;
}
@ -4471,7 +4473,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
uint32_t mr_ctl,vlvf;
uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
uint32_t mv_msb = 0;
uint32_t mv_lsb = 0;
@ -4484,7 +4486,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
const uint8_t vlan_mask_offset = 32;
const uint8_t dst_pool_offset = 8;
const uint8_t rule_mr_offset = 4;
const uint8_t mirror_rule_mask= 0x0F;
const uint8_t mirror_rule_mask = 0x0F;
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
@ -4507,7 +4509,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
@ -5302,7 +5304,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before. */
* feature has not been enabled before.
*/
if (!dev->data->scattered_rx &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))

View File

@ -189,14 +189,13 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
IXGBE_FDIRCTRL_INIT_DONE)
break;
msec_delay(1);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
PMD_INIT_LOG(ERR, "Flow Director poll time exceeded "
"during enabling!");
PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!");
return -ETIMEDOUT;
}
return 0;
@ -282,6 +281,7 @@ static inline uint32_t
reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
{
uint32_t mask = hi_dword << 16;
mask |= lo_dword;
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
@ -810,8 +810,10 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
/* Process bits 0 and 16 */
if (key & 0x0001) hash_result ^= lo_hash_dword;
if (key & 0x00010000) hash_result ^= hi_hash_dword;
if (key & 0x0001)
hash_result ^= lo_hash_dword;
if (key & 0x00010000)
hash_result ^= hi_hash_dword;
/*
* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
@ -822,9 +824,11 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
/* process the remaining 30 bits in the key 2 bits at a time */
for (i = 15; i; i-- ) {
if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
for (i = 15; i; i--) {
if (key & (0x0001 << i))
hash_result ^= lo_hash_dword >> i;
if (key & (0x00010000 << i))
hash_result ^= hi_hash_dword >> i;
}
return hash_result;
@ -1016,7 +1020,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@ -1080,9 +1084,9 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
*/
static int
ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
bool del,
bool update)
const struct rte_eth_fdir_filter *fdir_filter,
bool del,
bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
@ -1092,7 +1096,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
if (fdir_mode == RTE_FDIR_MODE_NONE)
@ -1109,12 +1113,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
(fdir_filter->input.flow_type ==
RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
" IPv4-other is not supported without"
" L4 protocol and ports masked!");
" IPv4-other is not supported without"
" L4 protocol and ports masked!");
return -ENOTSUP;
}
@ -1132,16 +1136,16 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
if (is_perfect) {
if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
" perfect mode!");
" perfect mode!");
return -ENOTSUP;
}
fdirhash = atr_compute_perfect_hash_82599(&input,
dev->data->dev_conf.fdir_conf.pballoc);
dev->data->dev_conf.fdir_conf.pballoc);
fdirhash |= fdir_filter->soft_id <<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
fdirhash = atr_compute_sig_hash_82599(&input,
dev->data->dev_conf.fdir_conf.pballoc);
dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
err = fdir_erase_filter_82599(hw, fdirhash);
@ -1159,22 +1163,22 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
} else {
PMD_DRV_LOG(ERR, "Drop option is not supported in"
" signature mode.");
" signature mode.");
return -EINVAL;
}
} else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
queue = (uint8_t)fdir_filter->action.rx_queue;
else
return -EINVAL;
if (is_perfect) {
err = fdir_write_perfect_filter_82599(hw, &input, queue,
fdircmd_flags, fdirhash,
fdir_mode);
fdircmd_flags, fdirhash,
fdir_mode);
} else {
err = fdir_add_signature_filter_82599(hw, &input, queue,
fdircmd_flags, fdirhash);
fdircmd_flags, fdirhash);
}
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
@ -1269,22 +1273,22 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
uint32_t reg, max_num;
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
/* Get the information from registers */
reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
IXGBE_FDIRFREE_COLL_SHIFT);
IXGBE_FDIRFREE_COLL_SHIFT);
info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
IXGBE_FDIRFREE_FREE_SHIFT);
IXGBE_FDIRFREE_FREE_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
IXGBE_FDIRLEN_MAXHASH_SHIFT);
IXGBE_FDIRLEN_MAXHASH_SHIFT);
info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
IXGBE_FDIRLEN_MAXLEN_SHIFT);
IXGBE_FDIRLEN_MAXLEN_SHIFT);
reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
@ -1310,10 +1314,10 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st
reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
(reg & FDIRCTRL_PBALLOC_MASK)));
(reg & FDIRCTRL_PBALLOC_MASK)));
if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
fdir_stats->guarant_cnt = max_num - fdir_stats->free;
fdir_stats->guarant_cnt = max_num - fdir_stats->free;
else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE)
fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;

View File

@ -97,9 +97,9 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
struct ixgbe_mirror_info *mirror_info =
IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
struct ixgbe_uta_info *uta_info =
IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
uint16_t vf_num;
@ -108,15 +108,16 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE();
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
if (0 == (vf_num = dev_num_vf(eth_dev)))
vf_num = dev_num_vf(eth_dev);
if (vf_num == 0)
return;
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
memset(uta_info,0,sizeof(struct ixgbe_uta_info));
memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
if (vf_num >= ETH_32_POOLS) {
@ -141,8 +142,6 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
return;
}
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
@ -220,7 +219,8 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
uint32_t vlanctrl;
int i;
if (0 == (vf_num = dev_num_vf(eth_dev)))
vf_num = dev_num_vf(eth_dev);
if (vf_num == 0)
return -1;
/* enable VMDq and set the default pool for PF */
@ -280,19 +280,18 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
}
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
/*
/*
* enable vlan filtering and allow all vlan tags through
*/
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
/* VFTA - enable all vlan filters */
for (i = 0; i < IXGBE_MAX_VFTA; i++) {
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
}
/* VFTA - enable all vlan filters */
for (i = 0; i < IXGBE_MAX_VFTA; i++)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
@ -481,7 +480,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
}
@ -678,6 +677,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
int ret = ixgbe_vf_reset(dev, vf, msgbuf);
vfinfo[vf].clear_to_send = true;
return ret;
}

View File

@ -352,6 +352,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
nb_tx = 0;
while (nb_pkts) {
uint16_t ret, n;
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
nb_tx = (uint16_t)(nb_tx + ret);
@ -478,30 +479,28 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
*/
static inline uint32_t
what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
union ixgbe_tx_offload tx_offload)
union ixgbe_tx_offload tx_offload)
{
/* If match with the current used context */
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
& tx_offload.data[0])) &&
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
& tx_offload.data[1])))) {
return txq->ctx_curr;
}
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
& tx_offload.data[0])) &&
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
& tx_offload.data[1]))))
return txq->ctx_curr;
/* What if match with the next context */
txq->ctx_curr ^= 1;
if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
& tx_offload.data[0])) &&
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
& tx_offload.data[1])))) {
return txq->ctx_curr;
}
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
& tx_offload.data[0])) &&
(txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
(txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
& tx_offload.data[1]))))
return txq->ctx_curr;
/* Mismatch, use the previous context */
return IXGBE_CTX_NUM;
@ -511,6 +510,7 @@ static inline uint32_t
tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
{
uint32_t tmp = 0;
if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM)
tmp |= IXGBE_ADVTXD_POPTS_TXSM;
if (ol_flags & PKT_TX_IP_CKSUM)
@ -524,6 +524,7 @@ static inline uint32_t
tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
{
uint32_t cmdtype = 0;
if (ol_flags & PKT_TX_VLAN_PKT)
cmdtype |= IXGBE_ADVTXD_DCMD_VLE;
if (ol_flags & PKT_TX_TCP_SEG)
@ -561,8 +562,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
/* Check to make sure the last descriptor to clean is done */
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
status = txr[desc_to_clean_to].wb.status;
if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
{
if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) {
PMD_TX_FREE_LOG(DEBUG,
"TX descriptor %4u is not done"
"(port=%d queue=%d)",
@ -1313,8 +1313,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
* reference packets that are ready to be received.
*/
for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD)
{
i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
/* Read desc statuses backwards to avoid race condition */
for (j = LOOK_AHEAD-1; j >= 0; --j)
s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
@ -1472,6 +1471,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
int i, j;
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
(unsigned) rxq->queue_id);
@ -1523,6 +1523,7 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_rx = 0;
while (nb_pkts) {
uint16_t ret, n;
n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
nb_rx = (uint16_t)(nb_rx + ret);
@ -1889,8 +1890,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
rx_mbuf_alloc_failed++;
break;
}
}
else if (nb_hold > rxq->rx_free_thresh) {
} else if (nb_hold > rxq->rx_free_thresh) {
uint16_t next_rdt = rxq->rx_free_trigger;
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
@ -2151,6 +2151,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
prev = (uint16_t) (txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
txe[i].mbuf = NULL;
txe[i].last_id = i;
@ -2170,7 +2171,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
txq->ctx_curr = 0;
memset((void*)&txq->ctx_cache, 0,
memset((void *)&txq->ctx_cache, 0,
IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
}
@ -2443,6 +2444,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
if (rxq->rx_nb_avail) {
for (i = 0; i < rxq->rx_nb_avail; ++i) {
struct rte_mbuf *mb;
mb = rxq->rx_stage[rxq->rx_next_avail + i];
rte_pktmbuf_free_seg(mb);
}
@ -2665,7 +2667,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
/*
* Zero init all the descriptors in the ring.
*/
memset (rz->addr, 0, RX_RING_SZ);
memset(rz->addr, 0, RX_RING_SZ);
/*
* Modified to setup VFRDT for Virtual Function
@ -2679,8 +2681,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
rxq->rdh_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx));
}
else {
} else {
rxq->rdt_reg_addr =
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx));
rxq->rdh_reg_addr =
@ -2816,6 +2817,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
if (txq != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
@ -2824,6 +2826,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
ixgbe_reset_rx_queue(adapter, rxq);
@ -3139,6 +3142,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
}
for (i = 0; i < nb_tcs; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
@ -3147,14 +3151,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
/* zero alloc all unused TCs */
for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
/* clear 10 bits. */
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
/* MRQC: enable vmdq and dcb */
mrqc = ((num_pools == ETH_16_POOLS) ? \
IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
mrqc = (num_pools == ETH_16_POOLS) ?
IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
/* PFVTCTL: turn on virtualisation and set the default pool */
@ -3192,7 +3197,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
}
/* VFRE: pool enabling for receive - 16 or 32 */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
IXGBE_WRITE_REG(hw, IXGBE_VFRE(0),
num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*
@ -3205,7 +3210,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & 0xFFF)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 32
@ -3223,7 +3228,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
*/
static void
ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t q;
@ -3238,18 +3243,17 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
/* Enable DCB for Tx with 8 TCs */
if (dcb_config->num_tcs.pg_tcs == 8) {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
}
else {
} else {
reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
}
if (dcb_config->vt_mode)
reg |= IXGBE_MTQC_VT_ENA;
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
/* Disable drop for all queues */
for (q = 0; q < 128; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
@ -3261,7 +3265,6 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
reg |= IXGBE_SECTX_DCB;
IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
}
return;
}
/**
@ -3285,25 +3288,23 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
ixgbe_dcb_tx_hw_config(hw,dcb_config);
return;
ixgbe_dcb_tx_hw_config(hw, dcb_config);
}
static void
ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
struct ixgbe_dcb_tc_config *tc;
uint8_t i,j;
uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) {
if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
}
else {
} else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
@ -3318,19 +3319,18 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
static void
ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
struct ixgbe_dcb_config *dcb_config)
{
struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
&dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
uint8_t i,j;
uint8_t i, j;
/* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */
if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) {
if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) {
dcb_config->num_tcs.pg_tcs = ETH_8_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_8_TCS;
}
else {
} else {
dcb_config->num_tcs.pg_tcs = ETH_4_TCS;
dcb_config->num_tcs.pfc_tcs = ETH_4_TCS;
}
@ -3342,7 +3342,6 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap =
(uint8_t)(1 << j);
}
return;
}
static void
@ -3352,7 +3351,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
struct rte_eth_dcb_rx_conf *rx_conf =
&dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
struct ixgbe_dcb_tc_config *tc;
uint8_t i,j;
uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
@ -3373,7 +3372,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
struct rte_eth_dcb_tx_conf *tx_conf =
&dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
struct ixgbe_dcb_tc_config *tc;
uint8_t i,j;
uint8_t i, j;
dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
@ -3394,7 +3393,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
*/
static void
ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
@ -3454,13 +3453,11 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
*/
reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
return;
}
static void
ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill,
uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@ -3485,16 +3482,16 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m
{
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa);
ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa);
ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map);
break;
default:
break;
@ -3515,7 +3512,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
int ret = 0;
uint8_t i,pfc_en,nb_tcs;
uint8_t i, pfc_en, nb_tcs;
uint16_t pbsize, rx_buffer_size;
uint8_t config_dcb_rx = 0;
uint8_t config_dcb_tx = 0;
@ -3529,7 +3526,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
switch(dev->data->dev_conf.rxmode.mq_mode){
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
dcb_config->vt_mode = true;
if (hw->mac.type != ixgbe_mac_82598EB) {
@ -3560,10 +3557,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
case ETH_MQ_TX_VMDQ_DCB:
dcb_config->vt_mode = true;
config_dcb_tx = DCB_TX_CONFIG;
/* get DCB and VT TX configuration parameters from rte_eth_conf */
ixgbe_dcb_vt_tx_config(dev,dcb_config);
/* get DCB and VT TX configuration parameters
* from rte_eth_conf
*/
ixgbe_dcb_vt_tx_config(dev, dcb_config);
/*Configure general VMDQ and DCB TX parameters*/
ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config);
ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
break;
case ETH_MQ_TX_DCB:
@ -3586,8 +3585,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Avoid un-configured priority mapping to TC0 */
uint8_t j = 4;
uint8_t mask = 0xFF;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
mask = (uint8_t)(mask & (~ (1 << map[i])));
mask = (uint8_t)(mask & (~(1 << map[i])));
for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) {
if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES))
map[j++] = i;
@ -3623,6 +3623,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Set RX buffer size */
pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT;
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
}
@ -3632,9 +3633,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
}
}
if (config_dcb_tx) {
/* Only support an equally distributed Tx packet buffer strategy. */
/* Only support an equally distributed
* Tx packet buffer strategy.
*/
uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs;
uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX;
for (i = 0; i < nb_tcs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
@ -3647,9 +3651,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
}
/*Calculates traffic class credits*/
ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_TX_CONFIG);
ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame,
ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
IXGBE_DCB_RX_CONFIG);
if (config_dcb_rx) {
@ -3659,7 +3663,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa);
/* Configure PG(ETS) RX */
ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map);
ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
}
if (config_dcb_tx) {
@ -3669,7 +3673,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
/* Configure PG(ETS) TX */
ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map);
ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
}
/*Configure queue statistics registers*/
@ -3683,7 +3687,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
* If the TC count is 8,and the default high_water is 48,
* the low_water is 16 as default.
*/
hw->fc.high_water[i] = (pbsize * 3 ) / 4;
hw->fc.high_water[i] = (pbsize * 3) / 4;
hw->fc.low_water[i] = pbsize / 4;
/* Enable pfc for this TC */
tc = &dcb_config->tc_config[i];
@ -3721,8 +3725,6 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
/** Configure DCB hardware **/
ixgbe_dcb_hw_configure(dev, dcb_cfg);
return;
}
/*
@ -3787,7 +3789,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
/* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
for (i = 0; i < cfg->nb_pool_maps; i++) {
/* set vlan id in VF register and set the valid bit */
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN |
(cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK)));
/*
* Put the allowed pools in VFB reg. As we only have 16 or 64
@ -3795,12 +3797,11 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
* i.e. bits 0-31
*/
if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2),
(cfg->pool_map[i].pools & UINT32_MAX));
else
IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \
((cfg->pool_map[i].pools >> 32) \
& UINT32_MAX));
IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)),
((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
}
@ -3840,7 +3841,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
/* Disable drop for all queues */
for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
IXGBE_WRITE_REG(hw, IXGBE_QDE,
(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
@ -3848,8 +3849,6 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
IXGBE_WRITE_FLUSH(hw);
return;
}
static int __attribute__((cold))
@ -3857,12 +3856,13 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
uint64_t dma_addr;
unsigned i;
unsigned int i;
/* Initialize software ring entries */
for (i = 0; i < rxq->nb_rx_desc; i++) {
volatile union ixgbe_adv_rx_desc *rxd;
struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned) rxq->queue_id);
@ -4253,6 +4253,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
rxq->rx_using_sse = rx_using_sse;
}
}
@ -4305,6 +4306,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
if (rsc_capable) {
uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
if (rx_conf->enable_lro)
/*
* Since NFS packets coalescing is not supported - clear
@ -4498,6 +4500,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB) {
/* Must setup the PSRTYPE register */
uint32_t psrtype;
psrtype = IXGBE_PSRTYPE_TCPHDR |
IXGBE_PSRTYPE_UDPHDR |
IXGBE_PSRTYPE_IPV4HDR |
@ -4597,7 +4600,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* Enable TX CRC (checksum offload requirement) and hw padding
* (TSO requirement) */
* (TSO requirement)
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN);
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@ -4622,26 +4626,26 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
* bookkeeping if things aren't delivered in order.
*/
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL(txq->reg_idx));
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
txctrl);
break;
case ixgbe_mac_82598EB:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL(txq->reg_idx));
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx),
txctrl);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
default:
txctrl = IXGBE_READ_REG(hw,
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
default:
txctrl = IXGBE_READ_REG(hw,
IXGBE_DCA_TXCTRL_82599(txq->reg_idx));
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
txctrl);
break;
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx),
txctrl);
break;
}
}
@ -4892,49 +4896,49 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (tx_queue_id < dev->data->nb_tx_queues) {
txq = dev->data->tx_queues[tx_queue_id];
/* Wait until TX queue is empty */
if (hw->mac.type == ixgbe_mac_82599EB) {
poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
do {
rte_delay_us(RTE_IXGBE_WAIT_100_US);
txtdh = IXGBE_READ_REG(hw,
IXGBE_TDH(txq->reg_idx));
txtdt = IXGBE_READ_REG(hw,
IXGBE_TDT(txq->reg_idx));
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
"when stopping.", tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
txdctl &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
/* Wait until TX Enable ready */
if (hw->mac.type == ixgbe_mac_82599EB) {
poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
do {
rte_delay_ms(1);
txdctl = IXGBE_READ_REG(hw,
IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not disable "
"Tx Queue %d", tx_queue_id);
}
if (txq->ops != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
} else
if (tx_queue_id >= dev->data->nb_tx_queues)
return -1;
txq = dev->data->tx_queues[tx_queue_id];
/* Wait until TX queue is empty */
if (hw->mac.type == ixgbe_mac_82599EB) {
poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
do {
rte_delay_us(RTE_IXGBE_WAIT_100_US);
txtdh = IXGBE_READ_REG(hw,
IXGBE_TDH(txq->reg_idx));
txtdt = IXGBE_READ_REG(hw,
IXGBE_TDT(txq->reg_idx));
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
"when stopping.", tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
txdctl &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
/* Wait until TX Enable ready */
if (hw->mac.type == ixgbe_mac_82599EB) {
poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
do {
rte_delay_ms(1);
txdctl = IXGBE_READ_REG(hw,
IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
PMD_INIT_LOG(ERR, "Could not disable "
"Tx Queue %d", tx_queue_id);
}
if (txq->ops != NULL) {
txq->ops->release_mbufs(txq);
txq->ops->reset(txq);
}
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}

View File

@ -154,7 +154,8 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
/* pkt type + vlan olflags mask */
const __m128i pkttype_msk = _mm_set_epi16(
0x0000, 0x0000, 0x0000, 0x0000,
PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT,
PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT);
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
@ -228,18 +229,21 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP);
/* Just the act of getting into the function from the application is
* going to cost about 7 cycles */
* going to cost about 7 cycles
*/
rxdp = rxq->rx_ring + rxq->rx_tail;
_mm_prefetch((const void *)rxdp, _MM_HINT_T0);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act */
* of time to act
*/
if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH)
ixgbe_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
* there is actually a packet available */
* there is actually a packet available
*/
if (!(rxdp->wb.upper.status_error &
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
return 0;
@ -262,7 +266,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
);
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache */
* the next 'n' mbufs into the cache
*/
sw_ring = &rxq->sw_ring[rxq->rx_tail];
/* A. load 4 packet in one loop
@ -359,7 +364,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* the staterr values are not in order, as the count
* count of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
* compresses the 32-bit values to 8-bit */
* compresses the 32-bit values to 8-bit
*/
eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask);
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
@ -416,12 +422,12 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
static inline uint16_t
reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
uint16_t nb_bufs, uint8_t *split_flags)
uint16_t nb_bufs, uint8_t *split_flags)
{
struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
struct rte_mbuf *start = rxq->pkt_first_seg;
struct rte_mbuf *end = rxq->pkt_last_seg;
unsigned pkt_idx, buf_idx;
unsigned int pkt_idx, buf_idx;
for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
if (end != NULL) {
@ -535,6 +541,7 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
{
int i;
for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
vtx1(txdp, *pkt, flags);
}
@ -601,6 +608,7 @@ tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
for (i = 0; i < (int)nb_pkts; ++i)
txep[i].mbuf = tx_pkts[i];
}
@ -675,7 +683,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
static void __attribute__((cold))
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
unsigned i;
unsigned int i;
struct ixgbe_tx_entry_v *txe;
const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
@ -701,8 +709,8 @@ ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
void __attribute__((cold))
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
const unsigned mask = rxq->nb_rx_desc - 1;
unsigned i;
const unsigned int mask = rxq->nb_rx_desc - 1;
unsigned int i;
if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
return;
@ -731,7 +739,7 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
static void __attribute__((cold))
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
uint16_t i;
@ -742,6 +750,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
/* Initialize SW ring entries */
for (i = 0; i < txq->nb_tx_desc; i++) {
volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
txd->wb.status = IXGBE_TXD_STAT_DD;
txe[i].mbuf = NULL;
}