net/qede: add queue statistics
This patch adds support for pulling per queue statistics. Signed-off-by: Rasesh Mody <rasesh.mody@qlogic.com>
This commit is contained in:
parent
f1e4b6c0ac
commit
7634c5f915
@ -160,6 +160,15 @@ static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
|
||||
offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
|
||||
};
|
||||
|
||||
static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
|
||||
{"rx_q_segments",
|
||||
offsetof(struct qede_rx_queue, rx_segs)},
|
||||
{"rx_q_hw_errors",
|
||||
offsetof(struct qede_rx_queue, rx_hw_errors)},
|
||||
{"rx_q_allocation_errors",
|
||||
offsetof(struct qede_rx_queue, rx_alloc_errors)}
|
||||
};
|
||||
|
||||
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
|
||||
{
|
||||
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
|
||||
@ -828,6 +837,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
|
||||
struct qede_dev *qdev = eth_dev->data->dev_private;
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct ecore_eth_stats stats;
|
||||
unsigned int i = 0, j = 0, qid;
|
||||
struct qede_tx_queue *txq;
|
||||
|
||||
qdev->ops->get_vport_stats(edev, &stats);
|
||||
|
||||
@ -858,20 +869,73 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
|
||||
stats.tx_mcast_bytes + stats.tx_bcast_bytes;
|
||||
|
||||
eth_stats->oerrors = stats.tx_err_drop_pkts;
|
||||
|
||||
/* Queue stats */
|
||||
for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
|
||||
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
|
||||
eth_stats->q_ipackets[i] =
|
||||
*(uint64_t *)(
|
||||
((char *)(qdev->fp_array[(qid)].rxq)) +
|
||||
offsetof(struct qede_rx_queue,
|
||||
rcv_pkts));
|
||||
eth_stats->q_errors[i] =
|
||||
*(uint64_t *)(
|
||||
((char *)(qdev->fp_array[(qid)].rxq)) +
|
||||
offsetof(struct qede_rx_queue,
|
||||
rx_hw_errors)) +
|
||||
*(uint64_t *)(
|
||||
((char *)(qdev->fp_array[(qid)].rxq)) +
|
||||
offsetof(struct qede_rx_queue,
|
||||
rx_alloc_errors));
|
||||
i++;
|
||||
}
|
||||
|
||||
if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
|
||||
txq = qdev->fp_array[(qid)].txqs[0];
|
||||
eth_stats->q_opackets[j] =
|
||||
*((uint64_t *)(uintptr_t)
|
||||
(((uint64_t)(uintptr_t)(txq)) +
|
||||
offsetof(struct qede_tx_queue,
|
||||
xmit_pkts)));
|
||||
j++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned
|
||||
qede_get_xstats_count(struct qede_dev *qdev) {
|
||||
return RTE_DIM(qede_xstats_strings) +
|
||||
(RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
|
||||
}
|
||||
|
||||
static int
|
||||
qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
|
||||
struct rte_eth_xstat_name *xstats_names, unsigned limit)
|
||||
{
|
||||
unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings);
|
||||
struct qede_dev *qdev = dev->data->dev_private;
|
||||
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
|
||||
unsigned int i, qid, stat_idx = 0;
|
||||
|
||||
if (xstats_names != NULL)
|
||||
for (i = 0; i < stat_cnt; i++)
|
||||
snprintf(xstats_names[i].name,
|
||||
sizeof(xstats_names[i].name),
|
||||
if (xstats_names != NULL) {
|
||||
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
|
||||
snprintf(xstats_names[stat_idx].name,
|
||||
sizeof(xstats_names[stat_idx].name),
|
||||
"%s",
|
||||
qede_xstats_strings[i].name);
|
||||
stat_idx++;
|
||||
}
|
||||
|
||||
for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
|
||||
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
|
||||
snprintf(xstats_names[stat_idx].name,
|
||||
sizeof(xstats_names[stat_idx].name),
|
||||
"%.4s%d%s",
|
||||
qede_rxq_xstats_strings[i].name, qid,
|
||||
qede_rxq_xstats_strings[i].name + 4);
|
||||
stat_idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stat_cnt;
|
||||
}
|
||||
@ -883,18 +947,32 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
|
||||
struct qede_dev *qdev = dev->data->dev_private;
|
||||
struct ecore_dev *edev = &qdev->edev;
|
||||
struct ecore_eth_stats stats;
|
||||
unsigned int num = RTE_DIM(qede_xstats_strings);
|
||||
const unsigned int num = qede_get_xstats_count(qdev);
|
||||
unsigned int i, qid, stat_idx = 0;
|
||||
|
||||
if (n < num)
|
||||
return num;
|
||||
|
||||
qdev->ops->get_vport_stats(edev, &stats);
|
||||
|
||||
for (num = 0; num < n; num++)
|
||||
xstats[num].value = *(u64 *)(((char *)&stats) +
|
||||
qede_xstats_strings[num].offset);
|
||||
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
|
||||
xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
|
||||
qede_xstats_strings[i].offset);
|
||||
stat_idx++;
|
||||
}
|
||||
|
||||
return num;
|
||||
for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
|
||||
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
|
||||
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
|
||||
xstats[stat_idx].value = *(uint64_t *)(
|
||||
((char *)(qdev->fp_array[(qid)].rxq)) +
|
||||
qede_rxq_xstats_strings[i].offset);
|
||||
stat_idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stat_idx;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -69,6 +69,9 @@
|
||||
#define QEDE_TSS_COUNT(qdev) (((qdev)->num_queues - (qdev)->fp_num_rx) * \
|
||||
(qdev)->num_tc)
|
||||
|
||||
#define QEDE_FASTPATH_TX (1 << 0)
|
||||
#define QEDE_FASTPATH_RX (1 << 1)
|
||||
|
||||
#define QEDE_DUPLEX_FULL 1
|
||||
#define QEDE_DUPLEX_HALF 2
|
||||
#define QEDE_DUPLEX_UNKNOWN 0xff
|
||||
|
@ -10,9 +10,6 @@
|
||||
|
||||
static bool gro_disable = 1; /* mod_param */
|
||||
|
||||
#define QEDE_FASTPATH_TX (1 << 0)
|
||||
#define QEDE_FASTPATH_RX (1 << 1)
|
||||
|
||||
static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
|
||||
{
|
||||
struct rte_mbuf *new_mb = NULL;
|
||||
@ -838,7 +835,7 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
|
||||
}
|
||||
|
||||
int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
|
||||
int num_frags, uint16_t pkt_len)
|
||||
int num_segs, uint16_t pkt_len)
|
||||
{
|
||||
struct qede_rx_queue *rxq = p_rxq;
|
||||
struct qede_dev *qdev = rxq->qdev;
|
||||
@ -849,13 +846,13 @@ int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
|
||||
register struct rte_mbuf *seg2 = NULL;
|
||||
|
||||
seg1 = rx_mb;
|
||||
while (num_frags) {
|
||||
while (num_segs) {
|
||||
cur_size = pkt_len > rxq->rx_buf_size ?
|
||||
rxq->rx_buf_size : pkt_len;
|
||||
if (!cur_size) {
|
||||
PMD_RX_LOG(DEBUG, rxq,
|
||||
"SG packet, len and num BD mismatch\n");
|
||||
qede_recycle_rx_bd_ring(rxq, qdev, num_frags);
|
||||
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -876,7 +873,8 @@ int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
|
||||
seg2->data_len = cur_size;
|
||||
seg1->next = seg2;
|
||||
seg1 = seg1->next;
|
||||
num_frags--;
|
||||
num_segs--;
|
||||
rxq->rx_segs++;
|
||||
continue;
|
||||
}
|
||||
seg1 = NULL;
|
||||
@ -904,7 +902,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
register struct rte_mbuf *seg1 = NULL;
|
||||
enum eth_rx_cqe_type cqe_type;
|
||||
uint16_t len, pad, preload_idx, pkt_len, parse_flag;
|
||||
uint8_t csum_flag, num_frags;
|
||||
uint8_t csum_flag, num_segs;
|
||||
enum rss_hash_type htype;
|
||||
int ret;
|
||||
|
||||
@ -978,11 +976,13 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
|
||||
if (fp_cqe->bd_num > 1) {
|
||||
pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
|
||||
num_frags = fp_cqe->bd_num - 1;
|
||||
num_segs = fp_cqe->bd_num - 1;
|
||||
|
||||
rxq->rx_segs++;
|
||||
|
||||
pkt_len -= len;
|
||||
seg1 = rx_mb;
|
||||
ret = qede_process_sg_pkts(p_rxq, seg1, num_frags,
|
||||
ret = qede_process_sg_pkts(p_rxq, seg1, num_segs,
|
||||
pkt_len);
|
||||
if (ret != ECORE_SUCCESS) {
|
||||
qede_recycle_rx_bd_ring(rxq, qdev,
|
||||
@ -1045,6 +1045,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
|
||||
|
||||
qede_update_rx_prod(qdev, rxq);
|
||||
|
||||
rxq->rcv_pkts += rx_pkt;
|
||||
|
||||
PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
|
||||
|
||||
return rx_pkt;
|
||||
@ -1237,6 +1239,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
||||
bd_prod =
|
||||
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
|
||||
nb_pkt_sent++;
|
||||
txq->xmit_pkts++;
|
||||
}
|
||||
|
||||
/* Write value of prod idx into bd_prod */
|
||||
|
@ -99,6 +99,8 @@ struct qede_rx_queue {
|
||||
uint16_t queue_id;
|
||||
uint16_t port_id;
|
||||
uint16_t rx_buf_size;
|
||||
uint64_t rcv_pkts;
|
||||
uint64_t rx_segs;
|
||||
uint64_t rx_hw_errors;
|
||||
uint64_t rx_alloc_errors;
|
||||
struct qede_dev *qdev;
|
||||
@ -130,7 +132,7 @@ struct qede_tx_queue {
|
||||
void OSAL_IOMEM *doorbell_addr;
|
||||
volatile union db_prod tx_db;
|
||||
uint16_t port_id;
|
||||
uint64_t txq_counter;
|
||||
uint64_t xmit_pkts;
|
||||
struct qede_dev *qdev;
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user