net/txgbe: support device statistics
Add device stats get from reading hardware registers. Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com> Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
This commit is contained in:
parent
db9767a583
commit
c9bb590d42
@ -23,6 +23,7 @@ L4 checksum offload = P
|
||||
Inner L3 checksum = P
|
||||
Inner L4 checksum = P
|
||||
Packet type parsing = Y
|
||||
Basic stats = Y
|
||||
Multiprocess aware = Y
|
||||
Linux UIO = Y
|
||||
Linux VFIO = Y
|
||||
|
@ -15,6 +15,7 @@ Features
|
||||
- Packet type information
|
||||
- Checksum offload
|
||||
- TSO offload
|
||||
- Port hardware statistics
|
||||
- Jumbo frames
|
||||
- Link state information
|
||||
- Interrupt mode for RX
|
||||
|
@ -1071,30 +1071,30 @@ enum txgbe_5tuple_protocol {
|
||||
#define TXGBE_MACRXERRCRCH 0x01192C
|
||||
#define TXGBE_MACRXERRLENL 0x011978
|
||||
#define TXGBE_MACRXERRLENH 0x01197C
|
||||
#define TXGBE_MACRX1to64L 0x001940
|
||||
#define TXGBE_MACRX1to64H 0x001944
|
||||
#define TXGBE_MACRX65to127L 0x001948
|
||||
#define TXGBE_MACRX65to127H 0x00194C
|
||||
#define TXGBE_MACRX128to255L 0x001950
|
||||
#define TXGBE_MACRX128to255H 0x001954
|
||||
#define TXGBE_MACRX256to511L 0x001958
|
||||
#define TXGBE_MACRX256to511H 0x00195C
|
||||
#define TXGBE_MACRX512to1023L 0x001960
|
||||
#define TXGBE_MACRX512to1023H 0x001964
|
||||
#define TXGBE_MACRX1024toMAXL 0x001968
|
||||
#define TXGBE_MACRX1024toMAXH 0x00196C
|
||||
#define TXGBE_MACTX1to64L 0x001834
|
||||
#define TXGBE_MACTX1to64H 0x001838
|
||||
#define TXGBE_MACTX65to127L 0x00183C
|
||||
#define TXGBE_MACTX65to127H 0x001840
|
||||
#define TXGBE_MACTX128to255L 0x001844
|
||||
#define TXGBE_MACTX128to255H 0x001848
|
||||
#define TXGBE_MACTX256to511L 0x00184C
|
||||
#define TXGBE_MACTX256to511H 0x001850
|
||||
#define TXGBE_MACTX512to1023L 0x001854
|
||||
#define TXGBE_MACTX512to1023H 0x001858
|
||||
#define TXGBE_MACTX1024toMAXL 0x00185C
|
||||
#define TXGBE_MACTX1024toMAXH 0x001860
|
||||
#define TXGBE_MACRX1TO64L 0x001940
|
||||
#define TXGBE_MACRX1TO64H 0x001944
|
||||
#define TXGBE_MACRX65TO127L 0x001948
|
||||
#define TXGBE_MACRX65TO127H 0x00194C
|
||||
#define TXGBE_MACRX128TO255L 0x001950
|
||||
#define TXGBE_MACRX128TO255H 0x001954
|
||||
#define TXGBE_MACRX256TO511L 0x001958
|
||||
#define TXGBE_MACRX256TO511H 0x00195C
|
||||
#define TXGBE_MACRX512TO1023L 0x001960
|
||||
#define TXGBE_MACRX512TO1023H 0x001964
|
||||
#define TXGBE_MACRX1024TOMAXL 0x001968
|
||||
#define TXGBE_MACRX1024TOMAXH 0x00196C
|
||||
#define TXGBE_MACTX1TO64L 0x001834
|
||||
#define TXGBE_MACTX1TO64H 0x001838
|
||||
#define TXGBE_MACTX65TO127L 0x00183C
|
||||
#define TXGBE_MACTX65TO127H 0x001840
|
||||
#define TXGBE_MACTX128TO255L 0x001844
|
||||
#define TXGBE_MACTX128TO255H 0x001848
|
||||
#define TXGBE_MACTX256TO511L 0x00184C
|
||||
#define TXGBE_MACTX256TO511H 0x001850
|
||||
#define TXGBE_MACTX512TO1023L 0x001854
|
||||
#define TXGBE_MACTX512TO1023H 0x001858
|
||||
#define TXGBE_MACTX1024TOMAXL 0x00185C
|
||||
#define TXGBE_MACTX1024TOMAXH 0x001860
|
||||
|
||||
#define TXGBE_MACRXUNDERSIZE 0x011938
|
||||
#define TXGBE_MACRXOVERSIZE 0x01193C
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
#define TXGBE_FRAME_SIZE_MAX (9728) /* Maximum frame size, +FCS */
|
||||
#define TXGBE_FRAME_SIZE_DFT (1518) /* Default frame size, +FCS */
|
||||
#define TXGBE_MAX_UP 8
|
||||
#define TXGBE_MAX_QP (128)
|
||||
#define TXGBE_MAX_UTA 128
|
||||
|
||||
#define TXGBE_ALIGN 128 /* as intel did */
|
||||
@ -206,6 +208,151 @@ struct txgbe_bus_info {
|
||||
u16 instance_id;
|
||||
};
|
||||
|
||||
/* Statistics counters collected by the MAC */
|
||||
/* PB[] RxTx */
|
||||
struct txgbe_pb_stats {
|
||||
u64 tx_pb_xon_packets;
|
||||
u64 rx_pb_xon_packets;
|
||||
u64 tx_pb_xoff_packets;
|
||||
u64 rx_pb_xoff_packets;
|
||||
u64 rx_pb_dropped;
|
||||
u64 rx_pb_mbuf_alloc_errors;
|
||||
u64 tx_pb_xon2off_packets;
|
||||
};
|
||||
|
||||
/* QP[] RxTx */
|
||||
struct txgbe_qp_stats {
|
||||
u64 rx_qp_packets;
|
||||
u64 tx_qp_packets;
|
||||
u64 rx_qp_bytes;
|
||||
u64 tx_qp_bytes;
|
||||
u64 rx_qp_mc_packets;
|
||||
};
|
||||
|
||||
struct txgbe_hw_stats {
|
||||
/* MNG RxTx */
|
||||
u64 mng_bmc2host_packets;
|
||||
u64 mng_host2bmc_packets;
|
||||
/* Basix RxTx */
|
||||
u64 rx_packets;
|
||||
u64 tx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 tx_bytes;
|
||||
u64 rx_total_bytes;
|
||||
u64 rx_total_packets;
|
||||
u64 tx_total_packets;
|
||||
u64 rx_total_missed_packets;
|
||||
u64 rx_broadcast_packets;
|
||||
u64 tx_broadcast_packets;
|
||||
u64 rx_multicast_packets;
|
||||
u64 tx_multicast_packets;
|
||||
u64 rx_management_packets;
|
||||
u64 tx_management_packets;
|
||||
u64 rx_management_dropped;
|
||||
u64 rx_drop_packets;
|
||||
|
||||
/* Basic Error */
|
||||
u64 rx_crc_errors;
|
||||
u64 rx_illegal_byte_errors;
|
||||
u64 rx_error_bytes;
|
||||
u64 rx_mac_short_packet_dropped;
|
||||
u64 rx_length_errors;
|
||||
u64 rx_undersize_errors;
|
||||
u64 rx_fragment_errors;
|
||||
u64 rx_oversize_errors;
|
||||
u64 rx_jabber_errors;
|
||||
u64 rx_l3_l4_xsum_error;
|
||||
u64 mac_local_errors;
|
||||
u64 mac_remote_errors;
|
||||
|
||||
/* Flow Director */
|
||||
u64 flow_director_added_filters;
|
||||
u64 flow_director_removed_filters;
|
||||
u64 flow_director_filter_add_errors;
|
||||
u64 flow_director_filter_remove_errors;
|
||||
u64 flow_director_matched_filters;
|
||||
u64 flow_director_missed_filters;
|
||||
|
||||
/* FCoE */
|
||||
u64 rx_fcoe_crc_errors;
|
||||
u64 rx_fcoe_mbuf_allocation_errors;
|
||||
u64 rx_fcoe_dropped;
|
||||
u64 rx_fcoe_packets;
|
||||
u64 tx_fcoe_packets;
|
||||
u64 rx_fcoe_bytes;
|
||||
u64 tx_fcoe_bytes;
|
||||
u64 rx_fcoe_no_ddp;
|
||||
u64 rx_fcoe_no_ddp_ext_buff;
|
||||
|
||||
/* MACSEC */
|
||||
u64 tx_macsec_pkts_untagged;
|
||||
u64 tx_macsec_pkts_encrypted;
|
||||
u64 tx_macsec_pkts_protected;
|
||||
u64 tx_macsec_octets_encrypted;
|
||||
u64 tx_macsec_octets_protected;
|
||||
u64 rx_macsec_pkts_untagged;
|
||||
u64 rx_macsec_pkts_badtag;
|
||||
u64 rx_macsec_pkts_nosci;
|
||||
u64 rx_macsec_pkts_unknownsci;
|
||||
u64 rx_macsec_octets_decrypted;
|
||||
u64 rx_macsec_octets_validated;
|
||||
u64 rx_macsec_sc_pkts_unchecked;
|
||||
u64 rx_macsec_sc_pkts_delayed;
|
||||
u64 rx_macsec_sc_pkts_late;
|
||||
u64 rx_macsec_sa_pkts_ok;
|
||||
u64 rx_macsec_sa_pkts_invalid;
|
||||
u64 rx_macsec_sa_pkts_notvalid;
|
||||
u64 rx_macsec_sa_pkts_unusedsa;
|
||||
u64 rx_macsec_sa_pkts_notusingsa;
|
||||
|
||||
/* MAC RxTx */
|
||||
u64 rx_size_64_packets;
|
||||
u64 rx_size_65_to_127_packets;
|
||||
u64 rx_size_128_to_255_packets;
|
||||
u64 rx_size_256_to_511_packets;
|
||||
u64 rx_size_512_to_1023_packets;
|
||||
u64 rx_size_1024_to_max_packets;
|
||||
u64 tx_size_64_packets;
|
||||
u64 tx_size_65_to_127_packets;
|
||||
u64 tx_size_128_to_255_packets;
|
||||
u64 tx_size_256_to_511_packets;
|
||||
u64 tx_size_512_to_1023_packets;
|
||||
u64 tx_size_1024_to_max_packets;
|
||||
|
||||
/* Flow Control */
|
||||
u64 tx_xon_packets;
|
||||
u64 rx_xon_packets;
|
||||
u64 tx_xoff_packets;
|
||||
u64 rx_xoff_packets;
|
||||
|
||||
/* PB[] RxTx */
|
||||
struct {
|
||||
u64 rx_up_packets;
|
||||
u64 tx_up_packets;
|
||||
u64 rx_up_bytes;
|
||||
u64 tx_up_bytes;
|
||||
u64 rx_up_drop_packets;
|
||||
|
||||
u64 tx_up_xon_packets;
|
||||
u64 rx_up_xon_packets;
|
||||
u64 tx_up_xoff_packets;
|
||||
u64 rx_up_xoff_packets;
|
||||
u64 rx_up_dropped;
|
||||
u64 rx_up_mbuf_alloc_errors;
|
||||
u64 tx_up_xon2off_packets;
|
||||
} up[TXGBE_MAX_UP];
|
||||
|
||||
/* QP[] RxTx */
|
||||
struct {
|
||||
u64 rx_qp_packets;
|
||||
u64 tx_qp_packets;
|
||||
u64 rx_qp_bytes;
|
||||
u64 tx_qp_bytes;
|
||||
u64 rx_qp_mc_packets;
|
||||
} qp[TXGBE_MAX_QP];
|
||||
|
||||
};
|
||||
|
||||
/* iterator type for walking multicast address lists */
|
||||
typedef u8* (*txgbe_mc_addr_itr) (struct txgbe_hw *hw, u8 **mc_addr_ptr,
|
||||
u32 *vmdq);
|
||||
@ -488,6 +635,14 @@ struct txgbe_hw {
|
||||
|
||||
u32 q_rx_regs[128 * 4];
|
||||
u32 q_tx_regs[128 * 4];
|
||||
bool offset_loaded;
|
||||
struct {
|
||||
u64 rx_qp_packets;
|
||||
u64 tx_qp_packets;
|
||||
u64 rx_qp_bytes;
|
||||
u64 tx_qp_bytes;
|
||||
u64 rx_qp_mc_packets;
|
||||
} qp_last[TXGBE_MAX_QP];
|
||||
};
|
||||
|
||||
#include "txgbe_regs.h"
|
||||
|
@ -27,6 +27,7 @@ static int txgbe_dev_set_link_down(struct rte_eth_dev *dev);
|
||||
static int txgbe_dev_close(struct rte_eth_dev *dev);
|
||||
static int txgbe_dev_link_update(struct rte_eth_dev *dev,
|
||||
int wait_to_complete);
|
||||
static int txgbe_dev_stats_reset(struct rte_eth_dev *dev);
|
||||
|
||||
static void txgbe_dev_link_status_print(struct rte_eth_dev *dev);
|
||||
static int txgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
|
||||
@ -236,6 +237,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Reset the hw statistics */
|
||||
txgbe_dev_stats_reset(eth_dev);
|
||||
|
||||
/* disable interrupt */
|
||||
txgbe_disable_intr(hw);
|
||||
|
||||
@ -568,6 +572,7 @@ static int
|
||||
txgbe_dev_start(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
|
||||
struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
|
||||
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
|
||||
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
|
||||
uint32_t intr_vector = 0;
|
||||
@ -749,6 +754,9 @@ skip_link_setup:
|
||||
|
||||
wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_ORD_MASK);
|
||||
|
||||
txgbe_read_stats_registers(hw, hw_stats);
|
||||
hw->offset_loaded = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
@ -941,6 +949,267 @@ txgbe_dev_reset(struct rte_eth_dev *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define UPDATE_QP_COUNTER_32bit(reg, last_counter, counter) \
|
||||
{ \
|
||||
uint32_t current_counter = rd32(hw, reg); \
|
||||
if (current_counter < last_counter) \
|
||||
current_counter += 0x100000000LL; \
|
||||
if (!hw->offset_loaded) \
|
||||
last_counter = current_counter; \
|
||||
counter = current_counter - last_counter; \
|
||||
counter &= 0xFFFFFFFFLL; \
|
||||
}
|
||||
|
||||
#define UPDATE_QP_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
|
||||
{ \
|
||||
uint64_t current_counter_lsb = rd32(hw, reg_lsb); \
|
||||
uint64_t current_counter_msb = rd32(hw, reg_msb); \
|
||||
uint64_t current_counter = (current_counter_msb << 32) | \
|
||||
current_counter_lsb; \
|
||||
if (current_counter < last_counter) \
|
||||
current_counter += 0x1000000000LL; \
|
||||
if (!hw->offset_loaded) \
|
||||
last_counter = current_counter; \
|
||||
counter = current_counter - last_counter; \
|
||||
counter &= 0xFFFFFFFFFLL; \
|
||||
}
|
||||
|
||||
void
|
||||
txgbe_read_stats_registers(struct txgbe_hw *hw,
|
||||
struct txgbe_hw_stats *hw_stats)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* QP Stats */
|
||||
for (i = 0; i < hw->nb_rx_queues; i++) {
|
||||
UPDATE_QP_COUNTER_32bit(TXGBE_QPRXPKT(i),
|
||||
hw->qp_last[i].rx_qp_packets,
|
||||
hw_stats->qp[i].rx_qp_packets);
|
||||
UPDATE_QP_COUNTER_36bit(TXGBE_QPRXOCTL(i), TXGBE_QPRXOCTH(i),
|
||||
hw->qp_last[i].rx_qp_bytes,
|
||||
hw_stats->qp[i].rx_qp_bytes);
|
||||
UPDATE_QP_COUNTER_32bit(TXGBE_QPRXMPKT(i),
|
||||
hw->qp_last[i].rx_qp_mc_packets,
|
||||
hw_stats->qp[i].rx_qp_mc_packets);
|
||||
}
|
||||
|
||||
for (i = 0; i < hw->nb_tx_queues; i++) {
|
||||
UPDATE_QP_COUNTER_32bit(TXGBE_QPTXPKT(i),
|
||||
hw->qp_last[i].tx_qp_packets,
|
||||
hw_stats->qp[i].tx_qp_packets);
|
||||
UPDATE_QP_COUNTER_36bit(TXGBE_QPTXOCTL(i), TXGBE_QPTXOCTH(i),
|
||||
hw->qp_last[i].tx_qp_bytes,
|
||||
hw_stats->qp[i].tx_qp_bytes);
|
||||
}
|
||||
/* PB Stats */
|
||||
for (i = 0; i < TXGBE_MAX_UP; i++) {
|
||||
hw_stats->up[i].rx_up_xon_packets +=
|
||||
rd32(hw, TXGBE_PBRXUPXON(i));
|
||||
hw_stats->up[i].rx_up_xoff_packets +=
|
||||
rd32(hw, TXGBE_PBRXUPXOFF(i));
|
||||
hw_stats->up[i].tx_up_xon_packets +=
|
||||
rd32(hw, TXGBE_PBTXUPXON(i));
|
||||
hw_stats->up[i].tx_up_xoff_packets +=
|
||||
rd32(hw, TXGBE_PBTXUPXOFF(i));
|
||||
hw_stats->up[i].tx_up_xon2off_packets +=
|
||||
rd32(hw, TXGBE_PBTXUPOFF(i));
|
||||
hw_stats->up[i].rx_up_dropped +=
|
||||
rd32(hw, TXGBE_PBRXMISS(i));
|
||||
}
|
||||
hw_stats->rx_xon_packets += rd32(hw, TXGBE_PBRXLNKXON);
|
||||
hw_stats->rx_xoff_packets += rd32(hw, TXGBE_PBRXLNKXOFF);
|
||||
hw_stats->tx_xon_packets += rd32(hw, TXGBE_PBTXLNKXON);
|
||||
hw_stats->tx_xoff_packets += rd32(hw, TXGBE_PBTXLNKXOFF);
|
||||
|
||||
/* DMA Stats */
|
||||
hw_stats->rx_packets += rd32(hw, TXGBE_DMARXPKT);
|
||||
hw_stats->tx_packets += rd32(hw, TXGBE_DMATXPKT);
|
||||
|
||||
hw_stats->rx_bytes += rd64(hw, TXGBE_DMARXOCTL);
|
||||
hw_stats->tx_bytes += rd64(hw, TXGBE_DMATXOCTL);
|
||||
hw_stats->rx_drop_packets += rd32(hw, TXGBE_PBRXDROP);
|
||||
|
||||
/* MAC Stats */
|
||||
hw_stats->rx_crc_errors += rd64(hw, TXGBE_MACRXERRCRCL);
|
||||
hw_stats->rx_multicast_packets += rd64(hw, TXGBE_MACRXMPKTL);
|
||||
hw_stats->tx_multicast_packets += rd64(hw, TXGBE_MACTXMPKTL);
|
||||
|
||||
hw_stats->rx_total_packets += rd64(hw, TXGBE_MACRXPKTL);
|
||||
hw_stats->tx_total_packets += rd64(hw, TXGBE_MACTXPKTL);
|
||||
hw_stats->rx_total_bytes += rd64(hw, TXGBE_MACRXGBOCTL);
|
||||
|
||||
hw_stats->rx_broadcast_packets += rd64(hw, TXGBE_MACRXOCTL);
|
||||
hw_stats->tx_broadcast_packets += rd32(hw, TXGBE_MACTXOCTL);
|
||||
|
||||
hw_stats->rx_size_64_packets += rd64(hw, TXGBE_MACRX1TO64L);
|
||||
hw_stats->rx_size_65_to_127_packets += rd64(hw, TXGBE_MACRX65TO127L);
|
||||
hw_stats->rx_size_128_to_255_packets += rd64(hw, TXGBE_MACRX128TO255L);
|
||||
hw_stats->rx_size_256_to_511_packets += rd64(hw, TXGBE_MACRX256TO511L);
|
||||
hw_stats->rx_size_512_to_1023_packets +=
|
||||
rd64(hw, TXGBE_MACRX512TO1023L);
|
||||
hw_stats->rx_size_1024_to_max_packets +=
|
||||
rd64(hw, TXGBE_MACRX1024TOMAXL);
|
||||
hw_stats->tx_size_64_packets += rd64(hw, TXGBE_MACTX1TO64L);
|
||||
hw_stats->tx_size_65_to_127_packets += rd64(hw, TXGBE_MACTX65TO127L);
|
||||
hw_stats->tx_size_128_to_255_packets += rd64(hw, TXGBE_MACTX128TO255L);
|
||||
hw_stats->tx_size_256_to_511_packets += rd64(hw, TXGBE_MACTX256TO511L);
|
||||
hw_stats->tx_size_512_to_1023_packets +=
|
||||
rd64(hw, TXGBE_MACTX512TO1023L);
|
||||
hw_stats->tx_size_1024_to_max_packets +=
|
||||
rd64(hw, TXGBE_MACTX1024TOMAXL);
|
||||
|
||||
hw_stats->rx_undersize_errors += rd64(hw, TXGBE_MACRXERRLENL);
|
||||
hw_stats->rx_oversize_errors += rd32(hw, TXGBE_MACRXOVERSIZE);
|
||||
hw_stats->rx_jabber_errors += rd32(hw, TXGBE_MACRXJABBER);
|
||||
|
||||
/* MNG Stats */
|
||||
hw_stats->mng_bmc2host_packets = rd32(hw, TXGBE_MNGBMC2OS);
|
||||
hw_stats->mng_host2bmc_packets = rd32(hw, TXGBE_MNGOS2BMC);
|
||||
hw_stats->rx_management_packets = rd32(hw, TXGBE_DMARXMNG);
|
||||
hw_stats->tx_management_packets = rd32(hw, TXGBE_DMATXMNG);
|
||||
|
||||
/* FCoE Stats */
|
||||
hw_stats->rx_fcoe_crc_errors += rd32(hw, TXGBE_FCOECRC);
|
||||
hw_stats->rx_fcoe_mbuf_allocation_errors += rd32(hw, TXGBE_FCOELAST);
|
||||
hw_stats->rx_fcoe_dropped += rd32(hw, TXGBE_FCOERPDC);
|
||||
hw_stats->rx_fcoe_packets += rd32(hw, TXGBE_FCOEPRC);
|
||||
hw_stats->tx_fcoe_packets += rd32(hw, TXGBE_FCOEPTC);
|
||||
hw_stats->rx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWRC);
|
||||
hw_stats->tx_fcoe_bytes += rd32(hw, TXGBE_FCOEDWTC);
|
||||
|
||||
/* Flow Director Stats */
|
||||
hw_stats->flow_director_matched_filters += rd32(hw, TXGBE_FDIRMATCH);
|
||||
hw_stats->flow_director_missed_filters += rd32(hw, TXGBE_FDIRMISS);
|
||||
hw_stats->flow_director_added_filters +=
|
||||
TXGBE_FDIRUSED_ADD(rd32(hw, TXGBE_FDIRUSED));
|
||||
hw_stats->flow_director_removed_filters +=
|
||||
TXGBE_FDIRUSED_REM(rd32(hw, TXGBE_FDIRUSED));
|
||||
hw_stats->flow_director_filter_add_errors +=
|
||||
TXGBE_FDIRFAIL_ADD(rd32(hw, TXGBE_FDIRFAIL));
|
||||
hw_stats->flow_director_filter_remove_errors +=
|
||||
TXGBE_FDIRFAIL_REM(rd32(hw, TXGBE_FDIRFAIL));
|
||||
|
||||
/* MACsec Stats */
|
||||
hw_stats->tx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECTX_UTPKT);
|
||||
hw_stats->tx_macsec_pkts_encrypted +=
|
||||
rd32(hw, TXGBE_LSECTX_ENCPKT);
|
||||
hw_stats->tx_macsec_pkts_protected +=
|
||||
rd32(hw, TXGBE_LSECTX_PROTPKT);
|
||||
hw_stats->tx_macsec_octets_encrypted +=
|
||||
rd32(hw, TXGBE_LSECTX_ENCOCT);
|
||||
hw_stats->tx_macsec_octets_protected +=
|
||||
rd32(hw, TXGBE_LSECTX_PROTOCT);
|
||||
hw_stats->rx_macsec_pkts_untagged += rd32(hw, TXGBE_LSECRX_UTPKT);
|
||||
hw_stats->rx_macsec_pkts_badtag += rd32(hw, TXGBE_LSECRX_BTPKT);
|
||||
hw_stats->rx_macsec_pkts_nosci += rd32(hw, TXGBE_LSECRX_NOSCIPKT);
|
||||
hw_stats->rx_macsec_pkts_unknownsci += rd32(hw, TXGBE_LSECRX_UNSCIPKT);
|
||||
hw_stats->rx_macsec_octets_decrypted += rd32(hw, TXGBE_LSECRX_DECOCT);
|
||||
hw_stats->rx_macsec_octets_validated += rd32(hw, TXGBE_LSECRX_VLDOCT);
|
||||
hw_stats->rx_macsec_sc_pkts_unchecked +=
|
||||
rd32(hw, TXGBE_LSECRX_UNCHKPKT);
|
||||
hw_stats->rx_macsec_sc_pkts_delayed += rd32(hw, TXGBE_LSECRX_DLYPKT);
|
||||
hw_stats->rx_macsec_sc_pkts_late += rd32(hw, TXGBE_LSECRX_LATEPKT);
|
||||
for (i = 0; i < 2; i++) {
|
||||
hw_stats->rx_macsec_sa_pkts_ok +=
|
||||
rd32(hw, TXGBE_LSECRX_OKPKT(i));
|
||||
hw_stats->rx_macsec_sa_pkts_invalid +=
|
||||
rd32(hw, TXGBE_LSECRX_INVPKT(i));
|
||||
hw_stats->rx_macsec_sa_pkts_notvalid +=
|
||||
rd32(hw, TXGBE_LSECRX_BADPKT(i));
|
||||
}
|
||||
hw_stats->rx_macsec_sa_pkts_unusedsa +=
|
||||
rd32(hw, TXGBE_LSECRX_INVSAPKT);
|
||||
hw_stats->rx_macsec_sa_pkts_notusingsa +=
|
||||
rd32(hw, TXGBE_LSECRX_BADSAPKT);
|
||||
|
||||
hw_stats->rx_total_missed_packets = 0;
|
||||
for (i = 0; i < TXGBE_MAX_UP; i++) {
|
||||
hw_stats->rx_total_missed_packets +=
|
||||
hw_stats->up[i].rx_up_dropped;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
txgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
|
||||
{
|
||||
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
|
||||
struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
|
||||
struct txgbe_stat_mappings *stat_mappings =
|
||||
TXGBE_DEV_STAT_MAPPINGS(dev);
|
||||
uint32_t i, j;
|
||||
|
||||
txgbe_read_stats_registers(hw, hw_stats);
|
||||
|
||||
if (stats == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Fill out the rte_eth_stats statistics structure */
|
||||
stats->ipackets = hw_stats->rx_packets;
|
||||
stats->ibytes = hw_stats->rx_bytes;
|
||||
stats->opackets = hw_stats->tx_packets;
|
||||
stats->obytes = hw_stats->tx_bytes;
|
||||
|
||||
memset(&stats->q_ipackets, 0, sizeof(stats->q_ipackets));
|
||||
memset(&stats->q_opackets, 0, sizeof(stats->q_opackets));
|
||||
memset(&stats->q_ibytes, 0, sizeof(stats->q_ibytes));
|
||||
memset(&stats->q_obytes, 0, sizeof(stats->q_obytes));
|
||||
memset(&stats->q_errors, 0, sizeof(stats->q_errors));
|
||||
for (i = 0; i < TXGBE_MAX_QP; i++) {
|
||||
uint32_t n = i / NB_QMAP_FIELDS_PER_QSM_REG;
|
||||
uint32_t offset = (i % NB_QMAP_FIELDS_PER_QSM_REG) * 8;
|
||||
uint32_t q_map;
|
||||
|
||||
q_map = (stat_mappings->rqsm[n] >> offset)
|
||||
& QMAP_FIELD_RESERVED_BITS_MASK;
|
||||
j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
|
||||
? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
||||
stats->q_ipackets[j] += hw_stats->qp[i].rx_qp_packets;
|
||||
stats->q_ibytes[j] += hw_stats->qp[i].rx_qp_bytes;
|
||||
|
||||
q_map = (stat_mappings->tqsm[n] >> offset)
|
||||
& QMAP_FIELD_RESERVED_BITS_MASK;
|
||||
j = (q_map < RTE_ETHDEV_QUEUE_STAT_CNTRS
|
||||
? q_map : q_map % RTE_ETHDEV_QUEUE_STAT_CNTRS);
|
||||
stats->q_opackets[j] += hw_stats->qp[i].tx_qp_packets;
|
||||
stats->q_obytes[j] += hw_stats->qp[i].tx_qp_bytes;
|
||||
}
|
||||
|
||||
/* Rx Errors */
|
||||
stats->imissed = hw_stats->rx_total_missed_packets;
|
||||
stats->ierrors = hw_stats->rx_crc_errors +
|
||||
hw_stats->rx_mac_short_packet_dropped +
|
||||
hw_stats->rx_length_errors +
|
||||
hw_stats->rx_undersize_errors +
|
||||
hw_stats->rx_oversize_errors +
|
||||
hw_stats->rx_drop_packets +
|
||||
hw_stats->rx_illegal_byte_errors +
|
||||
hw_stats->rx_error_bytes +
|
||||
hw_stats->rx_fragment_errors +
|
||||
hw_stats->rx_fcoe_crc_errors +
|
||||
hw_stats->rx_fcoe_mbuf_allocation_errors;
|
||||
|
||||
/* Tx Errors */
|
||||
stats->oerrors = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
txgbe_dev_stats_reset(struct rte_eth_dev *dev)
|
||||
{
|
||||
struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
|
||||
struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
|
||||
|
||||
/* HW registers are cleared on read */
|
||||
hw->offset_loaded = 0;
|
||||
txgbe_dev_stats_get(dev, NULL);
|
||||
hw->offset_loaded = 1;
|
||||
|
||||
/* Reset software totals */
|
||||
memset(hw_stats, 0, sizeof(*hw_stats));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
|
||||
{
|
||||
@ -1725,6 +1994,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
|
||||
.dev_close = txgbe_dev_close,
|
||||
.dev_reset = txgbe_dev_reset,
|
||||
.link_update = txgbe_dev_link_update,
|
||||
.stats_get = txgbe_dev_stats_get,
|
||||
.stats_reset = txgbe_dev_stats_reset,
|
||||
.dev_supported_ptypes_get = txgbe_dev_supported_ptypes_get,
|
||||
.rx_queue_start = txgbe_dev_rx_queue_start,
|
||||
.rx_queue_stop = txgbe_dev_rx_queue_stop,
|
||||
|
@ -50,6 +50,15 @@ struct txgbe_interrupt {
|
||||
uint32_t mask[2];
|
||||
};
|
||||
|
||||
#define TXGBE_NB_STAT_MAPPING 32
|
||||
#define QSM_REG_NB_BITS_PER_QMAP_FIELD 8
|
||||
#define NB_QMAP_FIELDS_PER_QSM_REG 4
|
||||
#define QMAP_FIELD_RESERVED_BITS_MASK 0x0f
|
||||
struct txgbe_stat_mappings {
|
||||
uint32_t tqsm[TXGBE_NB_STAT_MAPPING];
|
||||
uint32_t rqsm[TXGBE_NB_STAT_MAPPING];
|
||||
};
|
||||
|
||||
struct txgbe_uta_info {
|
||||
uint8_t uc_filter_type;
|
||||
uint16_t uta_in_use;
|
||||
@ -61,7 +70,9 @@ struct txgbe_uta_info {
|
||||
*/
|
||||
struct txgbe_adapter {
|
||||
struct txgbe_hw hw;
|
||||
struct txgbe_hw_stats stats;
|
||||
struct txgbe_interrupt intr;
|
||||
struct txgbe_stat_mappings stat_mappings;
|
||||
struct txgbe_uta_info uta_info;
|
||||
bool rx_bulk_alloc_allowed;
|
||||
};
|
||||
@ -72,9 +83,15 @@ struct txgbe_adapter {
|
||||
#define TXGBE_DEV_HW(dev) \
|
||||
(&((struct txgbe_adapter *)(dev)->data->dev_private)->hw)
|
||||
|
||||
#define TXGBE_DEV_STATS(dev) \
|
||||
(&((struct txgbe_adapter *)(dev)->data->dev_private)->stats)
|
||||
|
||||
#define TXGBE_DEV_INTR(dev) \
|
||||
(&((struct txgbe_adapter *)(dev)->data->dev_private)->intr)
|
||||
|
||||
#define TXGBE_DEV_STAT_MAPPINGS(dev) \
|
||||
(&((struct txgbe_adapter *)(dev)->data->dev_private)->stat_mappings)
|
||||
|
||||
#define TXGBE_DEV_UTA_INFO(dev) \
|
||||
(&((struct txgbe_adapter *)(dev)->data->dev_private)->uta_info)
|
||||
|
||||
@ -172,5 +189,7 @@ int txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
|
||||
struct rte_ether_addr *mc_addr_set,
|
||||
uint32_t nb_mc_addr);
|
||||
void txgbe_dev_setup_link_alarm_handler(void *param);
|
||||
void txgbe_read_stats_registers(struct txgbe_hw *hw,
|
||||
struct txgbe_hw_stats *hw_stats);
|
||||
|
||||
#endif /* _TXGBE_ETHDEV_H_ */
|
||||
|
Loading…
x
Reference in New Issue
Block a user