diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index 70327dfda5..9f7353d3b0 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -121,13 +121,11 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state) DRV_LOG(ERR, "Invalid device id: %d.", did); return -EINVAL; } - if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs, - (int)priv->caps.max_num_virtio_queues * 2) || - !priv->virtqs[vring].virtq) { - DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring); - return -EINVAL; + if (vring >= (int)priv->caps.max_num_virtio_queues * 2) { + DRV_LOG(ERR, "Too big vring id: %d.", vring); + return -E2BIG; } - return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state); + return mlx5_vdpa_virtq_enable(priv, vring, state); } static int @@ -206,7 +204,7 @@ mlx5_vdpa_dev_close(int vid) if (priv->configured) ret |= mlx5_vdpa_lm_log(priv); mlx5_vdpa_cqe_event_unset(priv); - ret |= mlx5_vdpa_steer_unset(priv); + mlx5_vdpa_steer_unset(priv); mlx5_vdpa_virtqs_release(priv); mlx5_vdpa_event_qp_global_release(priv); mlx5_vdpa_mem_dereg(priv); diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.h b/drivers/vdpa/mlx5/mlx5_vdpa.h index 0edd68800f..fcc216ac78 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/mlx5_vdpa.h @@ -127,6 +127,24 @@ struct mlx5_vdpa_priv { struct mlx5_vdpa_virtq virtqs[]; }; +/* + * Check whether virtq is for traffic receive. + * According to VIRTIO_NET Spec the virtqueues index identity its type by: + * 0 receiveq1 + * 1 transmitq1 + * ... + * 2(N-1) receiveqN + * 2(N-1)+1 transmitqN + * 2N controlq + */ +static inline uint8_t +is_virtq_recvq(int virtq_index, int nr_vring) +{ + if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1) + return 1; + return 0; +} + /** * Release all the prepared memory regions and all their related resources. * @@ -223,15 +241,17 @@ int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv); /** * Enable\Disable virtq.. * - * @param[in] virtq - * The vdpa driver private virtq structure. + * @param[in] priv + * The vdpa driver private structure. + * @param[in] index + * The virtq index. * @param[in] enable * Set to enable, otherwise disable. * * @return * 0 on success, a negative value otherwise. */ -int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable); +int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable); /** * Unset steering and release all its related resources- stop traffic. @@ -239,7 +259,18 @@ int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable); * @param[in] priv * The vdpa driver private structure. */ -int mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv); +void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv); + +/** + * Update steering according to the received queues status. + * + * @param[in] priv + * The vdpa driver private structure. + * + * @return + * 0 on success, a negative value otherwise. + */ +int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv); /** * Setup steering and all its related resources to enable RSS traffic from the diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c index 26b7ce1c8b..460e01d800 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c @@ -19,7 +19,8 @@ mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable) for (i = 0; i < priv->nr_virtqs; ++i) { attr.queue_index = i; - if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { + if (!priv->virtqs[i].virtq || + mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { DRV_LOG(ERR, "Failed to modify virtq %d logging.", i); return -1; } @@ -68,7 +69,8 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base, attr.dirty_bitmap_mkey = mr->mkey->id; for (i = 0; i < priv->nr_virtqs; ++i) { attr.queue_index = i; - if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { + if (!priv->virtqs[i].virtq || + mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i); goto err; } @@ -102,9 +104,14 @@ mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv) if (!RTE_VHOST_NEED_LOG(features)) return 0; for (i = 0; i < priv->nr_virtqs; ++i) { - ret = mlx5_vdpa_virtq_stop(priv, i); - if (ret) { - DRV_LOG(ERR, "Failed to stop virtq %d.", i); + if (priv->virtqs[i].virtq) { + ret = mlx5_vdpa_virtq_stop(priv, i); + if (ret) { + DRV_LOG(ERR, "Failed to stop virtq %d.", i); + return -1; + } + } else { + DRV_LOG(ERR, "virtq %d is not created.", i); return -1; } rte_vhost_log_used_vring(priv->vid, i, 0, diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c index 96ffc21c43..406c7be17f 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_steer.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_steer.c @@ -12,10 +12,9 @@ #include "mlx5_vdpa_utils.h" #include "mlx5_vdpa.h" -int -mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv) +static void +mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv) { - int ret __rte_unused; unsigned i; for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) { @@ -40,6 +39,12 @@ mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv) priv->steer.rss[i].matcher = NULL; } } +} + +void +mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv) +{ + mlx5_vdpa_rss_flows_destroy(priv); if (priv->steer.tbl) { claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl)); priv->steer.tbl = NULL; @@ -52,27 +57,13 @@ mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv) claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt)); priv->steer.rqt = NULL; } - return 0; -} - -/* - * According to VIRTIO_NET Spec the virtqueues index identity its type by: - * 0 receiveq1 - * 1 transmitq1 - * ... - * 2(N-1) receiveqN - * 2(N-1)+1 transmitqN - * 2N controlq - */ -static uint8_t -is_virtq_recvq(int virtq_index, int nr_vring) -{ - if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1) - return 1; - return 0; } #define MLX5_VDPA_DEFAULT_RQT_SIZE 512 +/* + * Return the number of queues configured to the table on success, otherwise + * -1 on error. + */ static int mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) { @@ -83,7 +74,7 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) + rqt_n * sizeof(uint32_t), 0); uint32_t k = 0, j; - int ret = 0; + int ret = 0, num; if (!attr) { DRV_LOG(ERR, "Failed to allocate RQT attributes memory."); @@ -92,11 +83,15 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) } for (i = 0; i < priv->nr_virtqs; i++) { if (is_virtq_recvq(i, priv->nr_virtqs) && - priv->virtqs[i].enable) { + priv->virtqs[i].enable && priv->virtqs[i].virtq) { attr->rq_list[k] = priv->virtqs[i].virtq->id; k++; } } + if (k == 0) + /* No enabled RQ to configure for RSS. */ + return 0; + num = (int)k; for (j = 0; k != rqt_n; ++k, ++j) attr->rq_list[k] = attr->rq_list[j]; attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ; @@ -114,26 +109,7 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) DRV_LOG(ERR, "Failed to modify RQT."); } rte_free(attr); - return ret; -} - -int -mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable) -{ - struct mlx5_vdpa_priv *priv = virtq->priv; - int ret = 0; - - DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", virtq->index, - virtq->enable ? "en" : "dis", enable ? "en" : "dis"); - if (virtq->enable == !!enable) - return 0; - virtq->enable = !!enable; - if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { - ret = mlx5_vdpa_rqt_prepare(priv); - if (ret) - virtq->enable = !enable; - } - return ret; + return ret ? -1 : num; } static int __rte_unused @@ -261,12 +237,33 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv) #endif /* HAVE_MLX5DV_DR */ } +int +mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv) +{ + int ret = mlx5_vdpa_rqt_prepare(priv); + + if (ret == 0) { + mlx5_vdpa_rss_flows_destroy(priv); + if (priv->steer.rqt) { + claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt)); + priv->steer.rqt = NULL; + } + } else if (ret < 0) { + return ret; + } else if (!priv->steer.rss[0].flow) { + ret = mlx5_vdpa_rss_flows_create(priv); + if (ret) { + DRV_LOG(ERR, "Cannot create RSS flows."); + return -1; + } + } + return 0; +} + int mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv) { #ifdef HAVE_MLX5DV_DR - if (mlx5_vdpa_rqt_prepare(priv)) - return -1; priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx, MLX5DV_DR_DOMAIN_TYPE_NIC_RX); if (!priv->steer.domain) { @@ -278,7 +275,7 @@ mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv) DRV_LOG(ERR, "Failed to create table 0 with Rx domain."); goto error; } - if (mlx5_vdpa_rss_flows_create(priv)) + if (mlx5_vdpa_steer_update(priv)) goto error; return 0; error: diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c index 9c0284c671..bd48460b5b 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa_virtq.c @@ -59,9 +59,11 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq) usleep(MLX5_VDPA_INTR_RETRIES_USEC); } } + virtq->intr_handle.fd = -1; } if (virtq->virtq) claim_zero(mlx5_devx_cmd_destroy(virtq->virtq)); + virtq->virtq = NULL; for (i = 0; i < RTE_DIM(virtq->umems); ++i) { if (virtq->umems[i].obj) claim_zero(mlx5_glue->devx_umem_dereg @@ -69,10 +71,9 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq) if (virtq->umems[i].buf) rte_free(virtq->umems[i].buf); } + memset(&virtq->umems, 0, sizeof(virtq->umems)); if (virtq->eqp.fw_qp) mlx5_vdpa_event_qp_destroy(&virtq->eqp); - memset(virtq, 0, sizeof(*virtq)); - virtq->intr_handle.fd = -1; return 0; } @@ -81,8 +82,10 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv) { int i; - for (i = 0; i < priv->nr_virtqs; i++) + for (i = 0; i < priv->nr_virtqs; i++) { mlx5_vdpa_virtq_unset(&priv->virtqs[i]); + priv->virtqs[i].enable = 0; + } if (priv->tis) { claim_zero(mlx5_devx_cmd_destroy(priv->tis)); priv->tis = NULL; @@ -272,10 +275,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index) goto error; if (mlx5_vdpa_virtq_modify(virtq, 1)) goto error; - virtq->enable = 1; virtq->priv = priv; - /* Be sure notifications are not missed during configuration. */ - claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1)); rte_write32(virtq->index, priv->virtq_db_addr); /* Setup doorbell mapping. */ virtq->intr_handle.fd = vq.kickfd; @@ -402,11 +402,56 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv) goto error; } priv->nr_virtqs = nr_vring; - for (i = 0; i < nr_vring; i++) + for (i = 0; i < nr_vring; i++) { + claim_zero(rte_vhost_enable_guest_notification(priv->vid, i, + 1)); if (mlx5_vdpa_virtq_setup(priv, i)) goto error; + } return 0; error: mlx5_vdpa_virtqs_release(priv); return -1; } + +int +mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable) +{ + struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index]; + int ret; + + DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index, + virtq->enable ? "en" : "dis", enable ? "en" : "dis"); + if (virtq->enable == !!enable) + return 0; + if (!priv->configured) { + virtq->enable = !!enable; + return 0; + } + if (enable) { + /* Configuration might have been updated - reconfigure virtq. */ + if (virtq->virtq) { + ret = mlx5_vdpa_virtq_stop(priv, index); + if (ret) + DRV_LOG(WARNING, "Failed to stop virtq %d.", + index); + mlx5_vdpa_virtq_unset(virtq); + } + ret = mlx5_vdpa_virtq_setup(priv, index); + if (ret) { + DRV_LOG(ERR, "Failed to setup virtq %d.", index); + return ret; + /* The only case virtq can stay invalid. */ + } + } + virtq->enable = !!enable; + if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) { + /* Need to add received virtq to the RQT table of the TIRs. */ + ret = mlx5_vdpa_steer_update(priv); + if (ret) { + virtq->enable = !enable; + return ret; + } + } + return 0; +}