net/mlx5: fix Rx/Tx stats concurrency

Queue statistics are being continuously updated in Rx/Tx burst
routines while handling traffic. In addition to that, statistics
can be reset (written with zeroes) on statistics reset in other
threads, causing a race condition, which in turn could result in
wrong stats.

The patch provides an approach with reference values, allowing
the actual counters to be writable within Rx/Tx burst threads
only, and updating reference values on stats reset.

Fixes: 87011737b7 ("mlx5: add software counters")
Cc: stable@dpdk.org

Signed-off-by: Raja Zidane <rzidane@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
This commit is contained in:
Raja Zidane 2022-04-20 18:32:17 +03:00 committed by Raslan Darawsheh
parent 26f22fa64e
commit 773a7de21a
3 changed files with 26 additions and 16 deletions

View File

@ -126,6 +126,7 @@ struct mlx5_rxq_data {
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
uint16_t idx; /* Queue index. */
struct mlx5_rxq_stats stats;
struct mlx5_rxq_stats stats_reset; /* stats on last reset. */
rte_xmm_t mbuf_initializer; /* Default rearm/flags for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
struct mlx5_uar_data uar_data; /* CQ doorbell. */

View File

@ -114,18 +114,23 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = rxq->idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX5_PMD_SOFT_COUNTERS
tmp.q_ipackets[idx] += rxq->stats.ipackets;
tmp.q_ibytes[idx] += rxq->stats.ibytes;
tmp.q_ipackets[idx] += rxq->stats.ipackets -
rxq->stats_reset.ipackets;
tmp.q_ibytes[idx] += rxq->stats.ibytes -
rxq->stats_reset.ibytes;
#endif
tmp.q_errors[idx] += (rxq->stats.idropped +
rxq->stats.rx_nombuf);
rxq->stats.rx_nombuf) -
(rxq->stats_reset.idropped +
rxq->stats_reset.rx_nombuf);
}
#ifdef MLX5_PMD_SOFT_COUNTERS
tmp.ipackets += rxq->stats.ipackets;
tmp.ibytes += rxq->stats.ibytes;
tmp.ipackets += rxq->stats.ipackets - rxq->stats_reset.ipackets;
tmp.ibytes += rxq->stats.ibytes - rxq->stats_reset.ibytes;
#endif
tmp.ierrors += rxq->stats.idropped;
tmp.rx_nombuf += rxq->stats.rx_nombuf;
tmp.ierrors += rxq->stats.idropped - rxq->stats_reset.idropped;
tmp.rx_nombuf += rxq->stats.rx_nombuf -
rxq->stats_reset.rx_nombuf;
}
for (i = 0; (i != priv->txqs_n); ++i) {
struct mlx5_txq_data *txq = (*priv->txqs)[i];
@ -135,15 +140,17 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = txq->idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
#ifdef MLX5_PMD_SOFT_COUNTERS
tmp.q_opackets[idx] += txq->stats.opackets;
tmp.q_obytes[idx] += txq->stats.obytes;
tmp.q_opackets[idx] += txq->stats.opackets -
txq->stats_reset.opackets;
tmp.q_obytes[idx] += txq->stats.obytes -
txq->stats_reset.obytes;
#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
tmp.opackets += txq->stats.opackets;
tmp.obytes += txq->stats.obytes;
tmp.opackets += txq->stats.opackets - txq->stats_reset.opackets;
tmp.obytes += txq->stats.obytes - txq->stats_reset.obytes;
#endif
tmp.oerrors += txq->stats.oerrors;
tmp.oerrors += txq->stats.oerrors - txq->stats_reset.oerrors;
}
ret = mlx5_os_read_dev_stat(priv, "out_of_buffer", &tmp.imissed);
if (ret == 0) {
@ -185,13 +192,14 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
if (rxq_data == NULL)
continue;
memset(&rxq_data->stats, 0, sizeof(struct mlx5_rxq_stats));
rxq_data->stats_reset = rxq_data->stats;
}
for (i = 0; (i != priv->txqs_n); ++i) {
if ((*priv->txqs)[i] == NULL)
struct mlx5_txq_data *txq_data = (*priv->txqs)[i];
if (txq_data == NULL)
continue;
memset(&(*priv->txqs)[i]->stats, 0,
sizeof(struct mlx5_txq_stats));
txq_data->stats_reset = txq_data->stats;
}
mlx5_os_read_dev_stat(priv, "out_of_buffer", &stats_ctrl->imissed_base);
stats_ctrl->imissed = 0;

View File

@ -164,6 +164,7 @@ struct mlx5_txq_data {
int32_t ts_offset; /* Timestamp field dynamic offset. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
struct mlx5_txq_stats stats; /* TX queue counters. */
struct mlx5_txq_stats stats_reset; /* stats on last reset. */
struct mlx5_uar_data uar_data;
struct rte_mbuf *elts[0];
/* Storage for queued packets, must be the last field. */