vdpa/mlx5: fix maximum number of virtqs
The driver wrongly takes the capability value for
the number of virtq pairs instead of just the number of virtqs.
Adjust all the usages of it to be the number of virtqs.
Fixes: c2eb33aaf9
("vdpa/mlx5: manage virtqs by array")
Cc: stable@dpdk.org
Signed-off-by: Li Zhang <lizh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
parent
ad0eeb83ea
commit
6f065d1539
@ -85,7 +85,7 @@ mlx5_vdpa_get_queue_num(struct rte_vdpa_device *vdev, uint32_t *queue_num)
|
||||
DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
|
||||
return -1;
|
||||
}
|
||||
*queue_num = priv->caps.max_num_virtio_queues;
|
||||
*queue_num = priv->caps.max_num_virtio_queues / 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -142,7 +142,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
|
||||
DRV_LOG(ERR, "Invalid vDPA device: %s.", vdev->device->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
|
||||
if (vring >= (int)priv->caps.max_num_virtio_queues) {
|
||||
DRV_LOG(ERR, "Too big vring id: %d.", vring);
|
||||
return -E2BIG;
|
||||
}
|
||||
@ -389,7 +389,7 @@ mlx5_vdpa_get_stats(struct rte_vdpa_device *vdev, int qid,
|
||||
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
|
||||
if (qid >= (int)priv->caps.max_num_virtio_queues) {
|
||||
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
|
||||
vdev->device->name);
|
||||
return -E2BIG;
|
||||
@ -412,7 +412,7 @@ mlx5_vdpa_reset_stats(struct rte_vdpa_device *vdev, int qid)
|
||||
DRV_LOG(ERR, "Invalid device: %s.", vdev->device->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (qid >= (int)priv->caps.max_num_virtio_queues * 2) {
|
||||
if (qid >= (int)priv->caps.max_num_virtio_queues) {
|
||||
DRV_LOG(ERR, "Too big vring id: %d for device %s.", qid,
|
||||
vdev->device->name);
|
||||
return -E2BIG;
|
||||
@ -628,7 +628,7 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
|
||||
DRV_LOG(DEBUG, "No capability to support virtq statistics.");
|
||||
priv = rte_zmalloc("mlx5 vDPA device private", sizeof(*priv) +
|
||||
sizeof(struct mlx5_vdpa_virtq) *
|
||||
attr->vdpa.max_num_virtio_queues * 2,
|
||||
attr->vdpa.max_num_virtio_queues,
|
||||
RTE_CACHE_LINE_SIZE);
|
||||
if (!priv) {
|
||||
DRV_LOG(ERR, "Failed to allocate private memory.");
|
||||
@ -689,7 +689,7 @@ mlx5_vdpa_release_dev_resources(struct mlx5_vdpa_priv *priv)
|
||||
uint32_t i;
|
||||
|
||||
mlx5_vdpa_dev_cache_clean(priv);
|
||||
for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
|
||||
for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
|
||||
if (!priv->virtqs[i].counters)
|
||||
continue;
|
||||
claim_zero(mlx5_devx_cmd_destroy(priv->virtqs[i].counters));
|
||||
|
@ -72,7 +72,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
|
||||
for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
|
||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
|
||||
|
||||
for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
|
||||
@ -492,9 +492,9 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
|
||||
DRV_LOG(INFO, "TSO is enabled without CSUM, force CSUM.");
|
||||
priv->features |= (1ULL << VIRTIO_NET_F_CSUM);
|
||||
}
|
||||
if (nr_vring > priv->caps.max_num_virtio_queues * 2) {
|
||||
if (nr_vring > priv->caps.max_num_virtio_queues) {
|
||||
DRV_LOG(ERR, "Do not support more than %d virtqs(%d).",
|
||||
(int)priv->caps.max_num_virtio_queues * 2,
|
||||
(int)priv->caps.max_num_virtio_queues,
|
||||
(int)nr_vring);
|
||||
return -1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user