vdpa/mlx5: make statistics counter persistent

In order to speed-up the device suspend and resume, make the statistics
counters persistent in reconfiguration until the device gets removed.

Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Xueming Li 2022-05-08 17:25:54 +03:00 committed by Maxime Coquelin
parent d7e5d5a7e5
commit 476048d546
4 changed files with 26 additions and 32 deletions

View File

@ -109,3 +109,9 @@ Upon potential hardware errors, mlx5 PMD try to recover, give up if failed 3
times in 3 seconds, virtq will be put in disable state. User should check log
to get error information, or query vdpa statistics counter to know error type
and count report.
Statistics
^^^^^^^^^^
The device statistics counter persists in reconfiguration until the device gets
removed. User can reset counters by calling function rte_vdpa_reset_stats().

View File

@ -388,12 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
return -ENODEV;
}
if (priv->state == MLX5_VDPA_STATE_PROBED) {
DRV_LOG(ERR, "Device %s was not configured.",
vdev->device->name);
return -ENODATA;
}
if (qid >= (int)priv->nr_virtqs) {
if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
vdev->device->name);
return -E2BIG;
@ -416,12 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
return -ENODEV;
}
if (priv->state == MLX5_VDPA_STATE_PROBED) {
DRV_LOG(ERR, "Device %s was not configured.",
vdev->device->name);
return -ENODATA;
}
if (qid >= (int)priv->nr_virtqs) {
if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
vdev->device->name);
return -E2BIG;
@ -695,6 +685,11 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
uint32_t i;
mlx5_vdpa_dev_cache_clean(priv);
for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
if (!priv->virtqs[i].counters)
continue;
claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
}
mlx5_vdpa_event_qp_global_release(priv);
mlx5_vdpa_err_event_unset(priv);
if (priv->steer.tbl)

View File

@ -92,6 +92,7 @@ struct mlx5_vdpa_virtq {
struct rte_intr_handle *intr_handle;
uint64_t err_time[3]; /* RDTSC time of recent errors. */
uint32_t n_retry;
struct mlx5_devx_virtio_q_couners_attr stats;
struct mlx5_devx_virtio_q_couners_attr reset;
};

View File

@ -127,14 +127,9 @@ void
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
{
int i;
struct mlx5_vdpa_virtq *virtq;
for (i = 0; i < priv->nr_virtqs; i++) {
virtq = &priv->virtqs[i];
mlx5_vdpa_virtq_unset(virtq);
if (virtq->counters)
claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
}
for (i = 0; i < priv->nr_virtqs; i++)
mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
priv->features = 0;
priv->nr_virtqs = 0;
}
@ -590,7 +585,7 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
struct rte_vdpa_stat *stats, unsigned int n)
{
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
struct mlx5_devx_virtio_q_couners_attr attr = {0};
struct mlx5_devx_virtio_q_couners_attr *attr = &virtq->stats;
int ret;
if (!virtq->counters) {
@ -598,7 +593,7 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
"is invalid.", qid);
return -EINVAL;
}
ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, attr);
if (ret) {
DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
return ret;
@ -608,37 +603,37 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
return ret;
stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
.id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
.value = attr.received_desc - virtq->reset.received_desc,
.value = attr->received_desc - virtq->reset.received_desc,
};
if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
return ret;
stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
.id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
.value = attr.completed_desc - virtq->reset.completed_desc,
.value = attr->completed_desc - virtq->reset.completed_desc,
};
if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
return ret;
stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
.id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
.value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
.value = attr->bad_desc_errors - virtq->reset.bad_desc_errors,
};
if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
return ret;
stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
.id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
.value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
.value = attr->exceed_max_chain - virtq->reset.exceed_max_chain,
};
if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
return ret;
stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
.id = MLX5_VDPA_STATS_INVALID_BUFFER,
.value = attr.invalid_buffer - virtq->reset.invalid_buffer,
.value = attr->invalid_buffer - virtq->reset.invalid_buffer,
};
if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
return ret;
stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
.id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
.value = attr.error_cqes - virtq->reset.error_cqes,
.value = attr->error_cqes - virtq->reset.error_cqes,
};
return ret;
}
@ -649,11 +644,8 @@ mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
int ret;
if (!virtq->counters) {
DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
"is invalid.", qid);
return -EINVAL;
}
if (virtq->counters == NULL) /* VQ not enabled. */
return 0;
ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
&virtq->reset);
if (ret)