ethdev: add device flag to bypass auto-filled queue xstats

Queue stats are stored in 'struct rte_eth_stats' as array and array size
is defined by 'RTE_ETHDEV_QUEUE_STAT_CNTRS' compile time flag.

As a result of technical board discussion, decided to remove the queue
statistics from 'struct rte_eth_stats' in the long term.

Instead PMDs should represent the queue statistics via xstats, this
gives more flexibility on the number of the queues supported.

Currently queue stats in the xstats are filled by ethdev layer, using
some basic stats, when queue stats removed from basic stats the
responsibility to fill the relevant xstats will be pushed to the PMDs.

During the switch period, temporary 'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS'
device flag is created. Initially all PMDs using xstats set this flag.
The PMDs implemented queue stats in the xstats should clear the flag.

When all PMDs switch to the xstats for the queue stats, queue stats
related fields from 'struct rte_eth_stats' will be removed, as well as
'RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS' flag.
Later 'RTE_ETHDEV_QUEUE_STAT_CNTRS' compile time flag also can be
removed.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Haiyue Wang <haiyue.wang@intel.com>
Acked-by: Xiao Wang <xiao.w.wang@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
This commit is contained in:
Ferruh Yigit 2020-10-14 03:26:47 +01:00
parent 62024eb827
commit f30e69b41f
60 changed files with 106 additions and 12 deletions

View File

@ -860,6 +860,7 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
data->nb_tx_queues = (uint16_t)nb_queues;
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
(*eth_dev)->dev_ops = &ops;

View File

@ -1562,6 +1562,7 @@ init_internals(struct rte_vdev_device *dev, const char *if_name,
eth_dev->data->dev_private = internals;
eth_dev->data->dev_link = pmd_link;
eth_dev->data->mac_addrs = &internals->eth_addr;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
eth_dev->rx_pkt_burst = eth_af_xdp_rx;
eth_dev->tx_pkt_burst = eth_af_xdp_tx;

View File

@ -256,6 +256,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
return ret;
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
rte_eth_copy_pci_info(dev, pci_dev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Use dummy function until setup */
dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
@ -383,6 +384,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->data->mac_addrs = rte_zmalloc(name,
RTE_ETHER_ADDR_LEN, 0);

View File

@ -380,6 +380,8 @@ eth_atl_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;

View File

@ -974,6 +974,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Check current migration status */
if (avp_dev_migration_pending(eth_dev)) {

View File

@ -1972,6 +1972,8 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
pdata = eth_dev->data->dev_private;
/* initial state */
rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);

View File

@ -648,6 +648,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
sc->pcie_bus = pci_dev->addr.bus;
sc->pcie_device = pci_dev->addr.devid;

View File

@ -5997,6 +5997,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev, void *params __rte_unused)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
bp = eth_dev->data->dev_private;

View File

@ -184,7 +184,8 @@ int bnxt_representor_init(struct rte_eth_dev *eth_dev, void *params)
vf_rep_bp->rep_fc_r2f = rep_params->rep_fc_r2f;
vf_rep_bp->rep_fc_f2r = rep_params->rep_fc_f2r;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->data->representor_id = rep_params->vf_id;
rte_eth_random_addr(vf_rep_bp->dflt_mac_addr);

View File

@ -3234,7 +3234,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
}
eth_dev->dev_ops = &default_dev_ops;
eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_spinlock_init(&internals->lock);
rte_spinlock_init(&internals->lsc_lock);

View File

@ -1262,6 +1262,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
if (!adapter)

View File

@ -2219,6 +2219,8 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
if (diag == 0) {

View File

@ -2794,6 +2794,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Invoke PMD device initialization function */
diag = dpaa2_dev_init(eth_dev);
if (diag == 0) {

View File

@ -265,6 +265,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;

View File

@ -765,6 +765,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
@ -959,6 +960,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;

View File

@ -1779,6 +1779,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
memset(adapter, 0, sizeof(struct ena_adapter));
ena_dev = &adapter->ena_dev;

View File

@ -885,6 +885,8 @@ enetc_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &enetc_recv_pkts;
eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Retrieving and storing the HW base address of device */
hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;

View File

@ -1300,6 +1300,7 @@ static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pdev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
enic->pdev = pdev;
addr = &pdev->addr;

View File

@ -672,7 +672,8 @@ int enic_vf_representor_init(struct rte_eth_dev *eth_dev, void *init_params)
eth_dev->device->driver = pf->rte_dev->device->driver;
eth_dev->dev_ops = &enic_vf_representor_dev_ops;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->data->representor_id = vf->vf_id;
eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr_vf",
sizeof(struct rte_ether_addr) *

View File

@ -264,7 +264,8 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
mac->addr_bytes[0], mac->addr_bytes[1],
mac->addr_bytes[2], mac->addr_bytes[3],
mac->addr_bytes[4], mac->addr_bytes[5]);
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
PRIV(dev)->intr_handle = (struct rte_intr_handle){
.fd = -1,
.type = RTE_INTR_HANDLE_EXT,

View File

@ -3076,6 +3076,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
}
rte_eth_copy_pci_info(dev, pdev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));

View File

@ -3108,6 +3108,8 @@ static int hinic_func_init(struct rte_eth_dev *eth_dev)
return 0;
}
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
memset(nic_dev, 0, sizeof(*nic_dev));

View File

@ -6106,6 +6106,8 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
ret = hns3_mp_init_primary();
if (ret) {
PMD_INIT_LOG(ERR,

View File

@ -2753,6 +2753,8 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
ret = hns3_mp_init_primary();
if (ret) {
PMD_INIT_LOG(ERR,

View File

@ -1465,6 +1465,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;

View File

@ -1575,6 +1575,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
}
i40e_set_default_ptype_table(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;

View File

@ -508,7 +508,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
return -ENODEV;
}
ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
ethdev->data->representor_id = representor->vf_id;
/* Setting the number queues allocated to the VF */

View File

@ -1434,6 +1434,7 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->vendor_id = pci_dev->id.vendor_id;
hw->device_id = pci_dev->id.device_id;

View File

@ -906,6 +906,8 @@ ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
PMD_INIT_LOG(ERR, "Failed to init DCF hardware");

View File

@ -2137,6 +2137,8 @@ ice_dev_init(struct rte_eth_dev *dev)
return 0;
}
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
ice_set_default_ptype_table(dev);
pci_dev = RTE_DEV_TO_PCI(dev->device);
intr_handle = &pci_dev->intr_handle;

View File

@ -1244,6 +1244,7 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
return 0;
rte_eth_copy_pci_info(dev, pci_dev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->back = pci_dev;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;

View File

@ -1003,6 +1003,7 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
lif->index = adapter->nlifs;
lif->eth_dev = eth_dev;

View File

@ -2966,7 +2966,8 @@ ipn3ke_rpst_init(struct rte_eth_dev *ethdev, void *init_params)
return -ENODEV;
}
ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_spinlock_lock(&ipn3ke_link_notify_list_lk);
TAILQ_INSERT_TAIL(&ipn3ke_rpst_list, rpst, next);

View File

@ -1118,6 +1118,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
rte_atomic32_clear(&ad->link_thread_running);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@ -1596,6 +1597,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
pci_dev->device.devargs);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;

View File

@ -392,6 +392,7 @@ eth_kni_create(struct rte_vdev_device *vdev,
data->mac_addrs = &internals->eth_addr;
data->promiscuous = 1;
data->all_multicast = 1;
data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_eth_random_addr(internals->eth_addr.addr_bytes);

View File

@ -2094,6 +2094,7 @@ lio_eth_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pdev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
if (pdev->mem_resource[0].addr) {
lio_dev->hw_addr = pdev->mem_resource[0].addr;

View File

@ -1539,6 +1539,7 @@ memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
data->dev_link = pmd_link;
data->mac_addrs = ether_addr;
data->promiscuous = 1;
data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
eth_dev->device = &vdev->device;

View File

@ -1035,6 +1035,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Initialize local interrupt handle for current port. */
memset(&priv->intr_handle, 0, sizeof(struct rte_intr_handle));
priv->intr_handle.fd = -1;

View File

@ -1277,6 +1277,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
priv->dev_data = eth_dev->data;
eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = dpdk_dev;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Configure the first MAC address by default. */
if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
DRV_LOG(ERR,

View File

@ -840,6 +840,7 @@ mvneta_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
eth_dev->rx_pkt_burst = mvneta_rx_pkt_burst;
mvneta_set_tx_function(eth_dev);
eth_dev->dev_ops = &mvneta_ops;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_eth_dev_probing_finish(eth_dev);
return 0;

View File

@ -2865,6 +2865,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
mrvl_set_tx_function(eth_dev);
eth_dev->dev_ops = &mrvl_ops;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_eth_dev_probing_finish(eth_dev);
return 0;

View File

@ -950,6 +950,8 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Since Hyper-V only supports one MAC address */
eth_dev->data->mac_addrs = rte_calloc("hv_mac", HN_MAX_MAC_ADDRS,
sizeof(struct rte_ether_addr), 0);

View File

@ -516,6 +516,8 @@ nfb_eth_dev_init(struct rte_eth_dev *dev)
data->all_multicast = nfb_eth_allmulticast_get(dev);
internals->rx_filter_original = data->promiscuous;
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
RTE_LOG(INFO, PMD, "NFB device ("
PCI_PRI_FMT ") successfully initialized\n",
pci_addr->domain, pci_addr->bus, pci_addr->devid,

View File

@ -2992,6 +2992,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
"mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, pci_dev->id.vendor_id,

View File

@ -550,6 +550,7 @@ eth_dev_null_create(struct rte_vdev_device *dev, struct pmd_options *args)
data->mac_addrs = &internals->eth_addr;
data->promiscuous = 1;
data->all_multicast = 1;
data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;

View File

@ -1375,6 +1375,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
data->promiscuous = 0;
data->all_multicast = 0;
data->scattered_rx = 0;
data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Get maximum number of supported MAC entries */
max_entries = octeontx_bgx_port_mac_entries_get(nic->port_id);

View File

@ -2424,6 +2424,7 @@ otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Zero out everything after OTX2_DEV to allow proper dev_reset() */
memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -

View File

@ -1158,6 +1158,7 @@ pmd_init_internals(struct rte_vdev_device *vdev,
data->mac_addrs = &(*internals)->eth_addr;
data->promiscuous = 1;
data->all_multicast = 1;
data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/*
* NOTE: we'll replace the data element, of originally allocated

View File

@ -855,6 +855,8 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
eth_dev->data->nb_rx_queues = 1;
eth_dev->data->nb_tx_queues = 1;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* For link status, open the PFE CDEV; Error from this function
* is silently ignored; In case of error, the link status will not
* be available.

View File

@ -2545,6 +2545,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* @DPDK */
edev->vendor_id = pci_dev->id.vendor_id;

View File

@ -361,6 +361,7 @@ do_eth_dev_ring_create(const char *name,
data->mac_addrs = &internals->address;
data->promiscuous = 1;
data->all_multicast = 1;
data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
eth_dev->dev_ops = &ops;
data->numa_node = numa_node;

View File

@ -2217,6 +2217,7 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rc = sfc_kvargs_parse(sa);
if (rc != 0)

View File

@ -1548,6 +1548,8 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
rte_ether_addr_copy(&eth_addr, data->mac_addrs);
dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
RTE_STR(RTE_SZEDATA2_DRIVER_NAME), data->name);

View File

@ -1922,7 +1922,8 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, const char *tap_name,
/* Setup some default values */
data = dev->data;
data->dev_private = pmd;
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
data->dev_flags = RTE_ETH_DEV_INTR_LSC |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
data->numa_node = numa_node;
data->dev_link = pmd_link;

View File

@ -2155,6 +2155,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
nic->device_id = pci_dev->id.device_id;
nic->vendor_id = pci_dev->id.vendor_id;

View File

@ -1447,7 +1447,8 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
internal->flags = flags;
internal->disable_flags = disable_flags;
data->dev_link = pmd_link;
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
data->dev_flags = RTE_ETH_DEV_INTR_LSC |
RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
data->promiscuous = 1;
data->all_multicast = 1;

View File

@ -1718,6 +1718,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||

View File

@ -250,6 +250,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;

View File

@ -2698,8 +2698,10 @@ get_xstats_basic_count(struct rte_eth_dev *dev)
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
count = RTE_NB_STATS;
count += nb_rxqs * RTE_NB_RXQ_STATS;
count += nb_txqs * RTE_NB_TXQ_STATS;
if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) {
count += nb_rxqs * RTE_NB_RXQ_STATS;
count += nb_txqs * RTE_NB_TXQ_STATS;
}
return count;
}
@ -2790,6 +2792,10 @@ rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
sizeof(xstats_names[0].name));
cnt_used_entries++;
}
if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
return cnt_used_entries;
num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (id_queue = 0; id_queue < num_q; id_queue++) {
for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
@ -2988,6 +2994,9 @@ rte_eth_basic_stats_get(uint16_t port_id, struct rte_eth_xstat *xstats)
xstats[count++].value = val;
}
if ((dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS) == 0)
return count;
/* per-rxq stats */
for (q = 0; q < nb_rxqs; q++) {
for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
@ -3123,8 +3132,9 @@ rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
/* Return generic statistics */
count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
(nb_txqs * RTE_NB_TXQ_STATS);
count = RTE_NB_STATS;
if (dev->data->dev_flags & RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS)
count += (nb_rxqs * RTE_NB_RXQ_STATS) + (nb_txqs * RTE_NB_TXQ_STATS);
/* implemented by the driver */
if (dev->dev_ops->xstats_get != NULL) {

View File

@ -1802,6 +1802,11 @@ struct rte_eth_dev_owner {
#define RTE_ETH_DEV_REPRESENTOR 0x0010
/** Device does not support MAC change after started */
#define RTE_ETH_DEV_NOLIVE_MAC_ADDR 0x0020
/**
* Queue xstats filled automatically by ethdev layer.
* PMDs filling the queue xstats themselves should not set this flag
*/
#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS 0x0040
/**
* Iterates over valid ethdev ports owned by a specific owner.