net/bnxt: fix per queue stats display in xstats

While gathering per queue stats, we are overwriting some of the
stats. This causes some of the counters in xstats to be incorrect.

Fixes: 577d3dced0 ("net/bnxt: refactor the query stats")
Cc: stable@dpdk.org

Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Ajit Khaparde 2017-09-28 16:43:41 -05:00 committed by Ferruh Yigit
parent de8a402ae8
commit 86ff87b0ed
3 changed files with 24 additions and 20 deletions

View File

@ -2815,7 +2815,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
}
int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
struct rte_eth_stats *stats)
struct rte_eth_stats *stats, uint8_t rx)
{
int rc = 0;
struct hwrm_stat_ctx_query_input req = {.req_type = 0};
@ -2829,23 +2829,25 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
HWRM_CHECK_RESULT();
stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
if (rx) {
stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
} else {
stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
}
stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
HWRM_UNLOCK();

View File

@ -92,7 +92,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
struct rte_eth_stats *stats);
struct rte_eth_stats *stats, uint8_t rx);
int bnxt_hwrm_ver_get(struct bnxt *bp);

View File

@ -240,14 +240,16 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
bnxt_stats, 1);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i,
bnxt_stats, 0);
}
bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);