net/bnxt: refactor the query stats

1) Use hwrm_stat_ctx_query command to query statistics
   Using hwrm_stat_ctx_query command will allow polling
   the statistics from hardware instead of using the current push
   model from the hardware which does a DMA of the stats to the host
   at fixed intervals.

2) Use the rx_mbuf_alloc_fail to track mbuf alloc failures.

3) We were wrongly incrementing hwrm_cmd_seq in bnxt_hwrm_stat_clear
   and bnxt_hwrm_stat_ctx_alloc functions.  This patch fixes that.

Signed-off-by: Stephen Hurd <stephen.hurd@broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
This commit is contained in:
Ajit Khaparde 2017-06-01 12:07:13 -05:00 committed by Ferruh Yigit
parent 3e12fdb78e
commit 577d3dced0
6 changed files with 100 additions and 60 deletions

View File

@ -204,6 +204,7 @@ struct bnxt {
uint16_t vxlan_fw_dst_port_id;
uint16_t geneve_fw_dst_port_id;
uint32_t fw_ver;
rte_atomic64_t rx_mbuf_alloc_fail;
};
/*

View File

@ -1616,6 +1616,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
bp = eth_dev->data->dev_private;
rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
bp->dev_stopped = 1;
if (bnxt_vf_pciid(pci_dev->id.device_id))

View File

@ -821,13 +821,12 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
return rc;
HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@ -845,9 +844,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
req.update_period_ms = rte_cpu_to_le_32(1000);
req.update_period_ms = rte_cpu_to_le_32(0);
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
req.stats_dma_addr =
rte_cpu_to_le_64(cpr->hw_stats_map);
@ -870,7 +868,6 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@ -1227,6 +1224,43 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
return rc;
}
int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
struct rte_eth_stats *stats)
{
int rc = 0;
struct hwrm_func_qstats_input req = {.req_type = 0};
struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, FUNC_QSTATS, -1, resp);
req.fid = rte_cpu_to_le_16(fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
return rc;
}
/*
* HWRM utility functions
*/
@ -2348,6 +2382,42 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
return rc;
}
int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
struct rte_eth_stats *stats)
{
int rc = 0;
struct hwrm_stat_ctx_query_input req = {.req_type = 0};
struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
req.stat_ctx_id = rte_cpu_to_le_32(cid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
return rc;
}
int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
struct hwrm_port_qstats_input req = {0};

View File

@ -62,6 +62,8 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_func_reset(struct bnxt *bp);
int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags);
int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
struct rte_eth_stats *stats);
int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp);
int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp);
@ -81,6 +83,8 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr, unsigned int idx);
int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
struct rte_eth_stats *stats);
int bnxt_hwrm_ver_get(struct bnxt *bp);

View File

@ -68,8 +68,10 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
struct rte_mbuf *data;
data = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!data)
if (!data) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
}
rx_buf->mbuf = data;
@ -87,8 +89,10 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
struct rte_mbuf *data;
data = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!data)
if (!data) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
}
if (rxbd == NULL)
RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
@ -319,8 +323,10 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data)
if (!new_data) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return NULL;
}
tpa_info->mbuf = new_data;
return mbuf;
@ -676,8 +682,10 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
for (i = 0; i < BNXT_TPA_MAX; i++) {
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf)
if (!rxr->tpa_info[i].mbuf) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
}
}
}
RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);

View File

@ -196,64 +196,18 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct ctx_hw_stats64 *hw_stats =
(struct ctx_hw_stats64 *)cpr->hw_stats;
bnxt_stats->q_ipackets[i] +=
rte_le_to_cpu_64(hw_stats->rx_ucast_pkts);
bnxt_stats->q_ipackets[i] +=
rte_le_to_cpu_64(hw_stats->rx_mcast_pkts);
bnxt_stats->q_ipackets[i] +=
rte_le_to_cpu_64(hw_stats->rx_bcast_pkts);
bnxt_stats->q_ibytes[i] +=
rte_le_to_cpu_64(hw_stats->rx_ucast_bytes);
bnxt_stats->q_ibytes[i] +=
rte_le_to_cpu_64(hw_stats->rx_mcast_bytes);
bnxt_stats->q_ibytes[i] +=
rte_le_to_cpu_64(hw_stats->rx_bcast_bytes);
/*
* TBD: No clear mapping to this... we don't seem
* to have a stat specifically for dropped due to
* insufficient mbufs.
*/
bnxt_stats->q_errors[i] = 0;
/* These get replaced once the *_QSTATS commands work */
bnxt_stats->ipackets += bnxt_stats->q_ipackets[i];
bnxt_stats->ibytes += bnxt_stats->q_ibytes[i];
bnxt_stats->imissed += bnxt_stats->q_errors[i];
bnxt_stats->ierrors +=
rte_le_to_cpu_64(hw_stats->rx_discard_pkts);
bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
struct ctx_hw_stats64 *hw_stats =
(struct ctx_hw_stats64 *)cpr->hw_stats;
bnxt_stats->q_opackets[i] +=
rte_le_to_cpu_64(hw_stats->tx_ucast_pkts);
bnxt_stats->q_opackets[i] +=
rte_le_to_cpu_64(hw_stats->tx_mcast_pkts);
bnxt_stats->q_opackets[i] +=
rte_le_to_cpu_64(hw_stats->tx_bcast_pkts);
bnxt_stats->q_obytes[i] +=
rte_le_to_cpu_64(hw_stats->tx_ucast_bytes);
bnxt_stats->q_obytes[i] +=
rte_le_to_cpu_64(hw_stats->tx_mcast_bytes);
bnxt_stats->q_obytes[i] +=
rte_le_to_cpu_64(hw_stats->tx_bcast_bytes);
/* These get replaced once the *_QSTATS commands work */
bnxt_stats->opackets += bnxt_stats->q_opackets[i];
bnxt_stats->obytes += bnxt_stats->q_obytes[i];
bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts);
bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_discard_pkts);
bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats);
}
bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
}
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
@ -261,6 +215,7 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
bnxt_clear_all_hwrm_stat_ctxs(bp);
rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
}
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,