vdpa/mlx5: make statistics counter persistent
In order to speed-up the device suspend and resume, make the statistics counters persistent in reconfiguration until the device gets removed. Signed-off-by: Xueming Li <xuemingl@nvidia.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
parent
d7e5d5a7e5
commit
476048d546
@ -109,3 +109,9 @@ Upon potential hardware errors, mlx5 PMD try to recover, give up if failed 3
|
|||||||
times in 3 seconds, virtq will be put in disable state. User should check log
|
times in 3 seconds, virtq will be put in disable state. User should check log
|
||||||
to get error information, or query vdpa statistics counter to know error type
|
to get error information, or query vdpa statistics counter to know error type
|
||||||
and count report.
|
and count report.
|
||||||
|
|
||||||
|
Statistics
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
The device statistics counter persists in reconfiguration until the device gets
|
||||||
|
removed. User can reset counters by calling function rte_vdpa_reset_stats().
|
||||||
|
@ -388,12 +388,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
|
|||||||
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
|
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
if (priv->state == MLX5_VDPA_STATE_PROBED) {
|
if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
|
||||||
DRV_LOG(ERR, "Device %s was not configured.",
|
|
||||||
vdev->device->name);
|
|
||||||
return -ENODATA;
|
|
||||||
}
|
|
||||||
if (qid >= (int)priv->nr_virtqs) {
|
|
||||||
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
|
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
|
||||||
vdev->device->name);
|
vdev->device->name);
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
@ -416,12 +411,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
|
|||||||
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
|
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
if (priv->state == MLX5_VDPA_STATE_PROBED) {
|
if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
|
||||||
DRV_LOG(ERR, "Device %s was not configured.",
|
|
||||||
vdev->device->name);
|
|
||||||
return -ENODATA;
|
|
||||||
}
|
|
||||||
if (qid >= (int)priv->nr_virtqs) {
|
|
||||||
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
|
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
|
||||||
vdev->device->name);
|
vdev->device->name);
|
||||||
return -E2BIG;
|
return -E2BIG;
|
||||||
@ -695,6 +685,11 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
|
|||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
|
||||||
mlx5_vdpa_dev_cache_clean(priv);
|
mlx5_vdpa_dev_cache_clean(priv);
|
||||||
|
for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
|
||||||
|
if (!priv->virtqs[i].counters)
|
||||||
|
continue;
|
||||||
|
claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
|
||||||
|
}
|
||||||
mlx5_vdpa_event_qp_global_release(priv);
|
mlx5_vdpa_event_qp_global_release(priv);
|
||||||
mlx5_vdpa_err_event_unset(priv);
|
mlx5_vdpa_err_event_unset(priv);
|
||||||
if (priv->steer.tbl)
|
if (priv->steer.tbl)
|
||||||
|
@ -92,6 +92,7 @@ struct mlx5_vdpa_virtq {
|
|||||||
struct rte_intr_handle *intr_handle;
|
struct rte_intr_handle *intr_handle;
|
||||||
uint64_t err_time[3]; /* RDTSC time of recent errors. */
|
uint64_t err_time[3]; /* RDTSC time of recent errors. */
|
||||||
uint32_t n_retry;
|
uint32_t n_retry;
|
||||||
|
struct mlx5_devx_virtio_q_couners_attr stats;
|
||||||
struct mlx5_devx_virtio_q_couners_attr reset;
|
struct mlx5_devx_virtio_q_couners_attr reset;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -127,14 +127,9 @@ void
|
|||||||
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
|
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct mlx5_vdpa_virtq *virtq;
|
|
||||||
|
|
||||||
for (i = 0; i < priv->nr_virtqs; i++) {
|
for (i = 0; i < priv->nr_virtqs; i++)
|
||||||
virtq = &priv->virtqs[i];
|
mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
|
||||||
mlx5_vdpa_virtq_unset(virtq);
|
|
||||||
if (virtq->counters)
|
|
||||||
claim_zero(mlx5_devx_cmd_destroy(virtq->counters));
|
|
||||||
}
|
|
||||||
priv->features = 0;
|
priv->features = 0;
|
||||||
priv->nr_virtqs = 0;
|
priv->nr_virtqs = 0;
|
||||||
}
|
}
|
||||||
@ -590,7 +585,7 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
|
|||||||
struct rte_vdpa_stat *stats, unsigned int n)
|
struct rte_vdpa_stat *stats, unsigned int n)
|
||||||
{
|
{
|
||||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
|
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
|
||||||
struct mlx5_devx_virtio_q_couners_attr attr = {0};
|
struct mlx5_devx_virtio_q_couners_attr *attr = &virtq->stats;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!virtq->counters) {
|
if (!virtq->counters) {
|
||||||
@ -598,7 +593,7 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
|
|||||||
"is invalid.", qid);
|
"is invalid.", qid);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, &attr);
|
ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters, attr);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
|
DRV_LOG(ERR, "Failed to read virtq %d stats from HW.", qid);
|
||||||
return ret;
|
return ret;
|
||||||
@ -608,37 +603,37 @@ mlx5_vdpa_virtq_stats_get(struct mlx5_vdpa_priv *priv, int qid,
|
|||||||
return ret;
|
return ret;
|
||||||
stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
|
stats[MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS] = (struct rte_vdpa_stat) {
|
||||||
.id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
|
.id = MLX5_VDPA_STATS_RECEIVED_DESCRIPTORS,
|
||||||
.value = attr.received_desc - virtq->reset.received_desc,
|
.value = attr->received_desc - virtq->reset.received_desc,
|
||||||
};
|
};
|
||||||
if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
|
if (ret == MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS)
|
||||||
return ret;
|
return ret;
|
||||||
stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
|
stats[MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS] = (struct rte_vdpa_stat) {
|
||||||
.id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
|
.id = MLX5_VDPA_STATS_COMPLETED_DESCRIPTORS,
|
||||||
.value = attr.completed_desc - virtq->reset.completed_desc,
|
.value = attr->completed_desc - virtq->reset.completed_desc,
|
||||||
};
|
};
|
||||||
if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
|
if (ret == MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS)
|
||||||
return ret;
|
return ret;
|
||||||
stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
|
stats[MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS] = (struct rte_vdpa_stat) {
|
||||||
.id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
|
.id = MLX5_VDPA_STATS_BAD_DESCRIPTOR_ERRORS,
|
||||||
.value = attr.bad_desc_errors - virtq->reset.bad_desc_errors,
|
.value = attr->bad_desc_errors - virtq->reset.bad_desc_errors,
|
||||||
};
|
};
|
||||||
if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
|
if (ret == MLX5_VDPA_STATS_EXCEED_MAX_CHAIN)
|
||||||
return ret;
|
return ret;
|
||||||
stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
|
stats[MLX5_VDPA_STATS_EXCEED_MAX_CHAIN] = (struct rte_vdpa_stat) {
|
||||||
.id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
|
.id = MLX5_VDPA_STATS_EXCEED_MAX_CHAIN,
|
||||||
.value = attr.exceed_max_chain - virtq->reset.exceed_max_chain,
|
.value = attr->exceed_max_chain - virtq->reset.exceed_max_chain,
|
||||||
};
|
};
|
||||||
if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
|
if (ret == MLX5_VDPA_STATS_INVALID_BUFFER)
|
||||||
return ret;
|
return ret;
|
||||||
stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
|
stats[MLX5_VDPA_STATS_INVALID_BUFFER] = (struct rte_vdpa_stat) {
|
||||||
.id = MLX5_VDPA_STATS_INVALID_BUFFER,
|
.id = MLX5_VDPA_STATS_INVALID_BUFFER,
|
||||||
.value = attr.invalid_buffer - virtq->reset.invalid_buffer,
|
.value = attr->invalid_buffer - virtq->reset.invalid_buffer,
|
||||||
};
|
};
|
||||||
if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
|
if (ret == MLX5_VDPA_STATS_COMPLETION_ERRORS)
|
||||||
return ret;
|
return ret;
|
||||||
stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
|
stats[MLX5_VDPA_STATS_COMPLETION_ERRORS] = (struct rte_vdpa_stat) {
|
||||||
.id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
|
.id = MLX5_VDPA_STATS_COMPLETION_ERRORS,
|
||||||
.value = attr.error_cqes - virtq->reset.error_cqes,
|
.value = attr->error_cqes - virtq->reset.error_cqes,
|
||||||
};
|
};
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -649,11 +644,8 @@ mlx5_vdpa_virtq_stats_reset(struct mlx5_vdpa_priv *priv, int qid)
|
|||||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
|
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[qid];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!virtq->counters) {
|
if (virtq->counters == NULL) /* VQ not enabled. */
|
||||||
DRV_LOG(ERR, "Failed to read virtq %d statistics - virtq "
|
return 0;
|
||||||
"is invalid.", qid);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
|
ret = mlx5_devx_cmd_query_virtio_q_counters(virtq->counters,
|
||||||
&virtq->reset);
|
&virtq->reset);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
Loading…
Reference in New Issue
Block a user