vdpa/mlx5: recreate a virtq becoming enabled

The virtq configurations may be changed when it moves from disabled
state to enabled state.

Listen to the state callback even if the device is not configured.
Recreate the virtq when it moves from disabled state to enabled state
and when the device is configured.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
Matan Azrad 2020-04-26 12:07:36 +00:00 committed by Ferruh Yigit
parent 7497873f23
commit 9f09b1ca15
5 changed files with 147 additions and 69 deletions

View File

@ -121,13 +121,11 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
DRV_LOG(ERR, "Invalid device id: %d.", did); DRV_LOG(ERR, "Invalid device id: %d.", did);
return -EINVAL; return -EINVAL;
} }
if (!priv->configured || vring >= RTE_MIN((int)priv->nr_virtqs, if (vring >= (int)priv->caps.max_num_virtio_queues * 2) {
(int)priv->caps.max_num_virtio_queues * 2) || DRV_LOG(ERR, "Too big vring id: %d.", vring);
!priv->virtqs[vring].virtq) { return -E2BIG;
DRV_LOG(ERR, "Invalid or unconfigured vring id: %d.", vring);
return -EINVAL;
} }
return mlx5_vdpa_virtq_enable(&priv->virtqs[vring], state); return mlx5_vdpa_virtq_enable(priv, vring, state);
} }
static int static int
@ -206,7 +204,7 @@ mlx5_vdpa_dev_close(int vid)
if (priv->configured) if (priv->configured)
ret |= mlx5_vdpa_lm_log(priv); ret |= mlx5_vdpa_lm_log(priv);
mlx5_vdpa_cqe_event_unset(priv); mlx5_vdpa_cqe_event_unset(priv);
ret |= mlx5_vdpa_steer_unset(priv); mlx5_vdpa_steer_unset(priv);
mlx5_vdpa_virtqs_release(priv); mlx5_vdpa_virtqs_release(priv);
mlx5_vdpa_event_qp_global_release(priv); mlx5_vdpa_event_qp_global_release(priv);
mlx5_vdpa_mem_dereg(priv); mlx5_vdpa_mem_dereg(priv);

View File

@ -127,6 +127,24 @@ struct mlx5_vdpa_priv {
struct mlx5_vdpa_virtq virtqs[]; struct mlx5_vdpa_virtq virtqs[];
}; };
/*
* Check whether virtq is for traffic receive.
* According to VIRTIO_NET Spec the virtqueues index identity its type by:
* 0 receiveq1
* 1 transmitq1
* ...
* 2(N-1) receiveqN
* 2(N-1)+1 transmitqN
* 2N controlq
*/
static inline uint8_t
is_virtq_recvq(int virtq_index, int nr_vring)
{
if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
return 1;
return 0;
}
/** /**
* Release all the prepared memory regions and all their related resources. * Release all the prepared memory regions and all their related resources.
* *
@ -223,15 +241,17 @@ int mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv);
/** /**
* Enable\Disable virtq.. * Enable\Disable virtq..
* *
* @param[in] virtq * @param[in] priv
* The vdpa driver private virtq structure. * The vdpa driver private structure.
* @param[in] index
* The virtq index.
* @param[in] enable * @param[in] enable
* Set to enable, otherwise disable. * Set to enable, otherwise disable.
* *
* @return * @return
* 0 on success, a negative value otherwise. * 0 on success, a negative value otherwise.
*/ */
int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable); int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable);
/** /**
* Unset steering and release all its related resources- stop traffic. * Unset steering and release all its related resources- stop traffic.
@ -239,7 +259,18 @@ int mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable);
* @param[in] priv * @param[in] priv
* The vdpa driver private structure. * The vdpa driver private structure.
*/ */
int mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv); void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
/**
* Update steering according to the received queues status.
*
* @param[in] priv
* The vdpa driver private structure.
*
* @return
* 0 on success, a negative value otherwise.
*/
int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
/** /**
* Setup steering and all its related resources to enable RSS traffic from the * Setup steering and all its related resources to enable RSS traffic from the

View File

@ -19,7 +19,8 @@ mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)
for (i = 0; i < priv->nr_virtqs; ++i) { for (i = 0; i < priv->nr_virtqs; ++i) {
attr.queue_index = i; attr.queue_index = i;
if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { if (!priv->virtqs[i].virtq ||
mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
DRV_LOG(ERR, "Failed to modify virtq %d logging.", i); DRV_LOG(ERR, "Failed to modify virtq %d logging.", i);
return -1; return -1;
} }
@ -68,7 +69,8 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
attr.dirty_bitmap_mkey = mr->mkey->id; attr.dirty_bitmap_mkey = mr->mkey->id;
for (i = 0; i < priv->nr_virtqs; ++i) { for (i = 0; i < priv->nr_virtqs; ++i) {
attr.queue_index = i; attr.queue_index = i;
if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) { if (!priv->virtqs[i].virtq ||
mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq, &attr)) {
DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i); DRV_LOG(ERR, "Failed to modify virtq %d for lm.", i);
goto err; goto err;
} }
@ -102,9 +104,14 @@ mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
if (!RTE_VHOST_NEED_LOG(features)) if (!RTE_VHOST_NEED_LOG(features))
return 0; return 0;
for (i = 0; i < priv->nr_virtqs; ++i) { for (i = 0; i < priv->nr_virtqs; ++i) {
ret = mlx5_vdpa_virtq_stop(priv, i); if (priv->virtqs[i].virtq) {
if (ret) { ret = mlx5_vdpa_virtq_stop(priv, i);
DRV_LOG(ERR, "Failed to stop virtq %d.", i); if (ret) {
DRV_LOG(ERR, "Failed to stop virtq %d.", i);
return -1;
}
} else {
DRV_LOG(ERR, "virtq %d is not created.", i);
return -1; return -1;
} }
rte_vhost_log_used_vring(priv->vid, i, 0, rte_vhost_log_used_vring(priv->vid, i, 0,

View File

@ -12,10 +12,9 @@
#include "mlx5_vdpa_utils.h" #include "mlx5_vdpa_utils.h"
#include "mlx5_vdpa.h" #include "mlx5_vdpa.h"
int static void
mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv) mlx5_vdpa_rss_flows_destroy(struct mlx5_vdpa_priv *priv)
{ {
int ret __rte_unused;
unsigned i; unsigned i;
for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) { for (i = 0; i < RTE_DIM(priv->steer.rss); ++i) {
@ -40,6 +39,12 @@ mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
priv->steer.rss[i].matcher = NULL; priv->steer.rss[i].matcher = NULL;
} }
} }
}
void
mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
{
mlx5_vdpa_rss_flows_destroy(priv);
if (priv->steer.tbl) { if (priv->steer.tbl) {
claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl)); claim_zero(mlx5_glue->dr_destroy_flow_tbl(priv->steer.tbl));
priv->steer.tbl = NULL; priv->steer.tbl = NULL;
@ -52,27 +57,13 @@ mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt)); claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
priv->steer.rqt = NULL; priv->steer.rqt = NULL;
} }
return 0;
}
/*
* According to VIRTIO_NET Spec the virtqueues index identity its type by:
* 0 receiveq1
* 1 transmitq1
* ...
* 2(N-1) receiveqN
* 2(N-1)+1 transmitqN
* 2N controlq
*/
static uint8_t
is_virtq_recvq(int virtq_index, int nr_vring)
{
if (virtq_index % 2 == 0 && virtq_index != nr_vring - 1)
return 1;
return 0;
} }
#define MLX5_VDPA_DEFAULT_RQT_SIZE 512 #define MLX5_VDPA_DEFAULT_RQT_SIZE 512
/*
* Return the number of queues configured to the table on success, otherwise
* -1 on error.
*/
static int static int
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv) mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
{ {
@ -83,7 +74,7 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
+ rqt_n * + rqt_n *
sizeof(uint32_t), 0); sizeof(uint32_t), 0);
uint32_t k = 0, j; uint32_t k = 0, j;
int ret = 0; int ret = 0, num;
if (!attr) { if (!attr) {
DRV_LOG(ERR, "Failed to allocate RQT attributes memory."); DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
@ -92,11 +83,15 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
} }
for (i = 0; i < priv->nr_virtqs; i++) { for (i = 0; i < priv->nr_virtqs; i++) {
if (is_virtq_recvq(i, priv->nr_virtqs) && if (is_virtq_recvq(i, priv->nr_virtqs) &&
priv->virtqs[i].enable) { priv->virtqs[i].enable && priv->virtqs[i].virtq) {
attr->rq_list[k] = priv->virtqs[i].virtq->id; attr->rq_list[k] = priv->virtqs[i].virtq->id;
k++; k++;
} }
} }
if (k == 0)
/* No enabled RQ to configure for RSS. */
return 0;
num = (int)k;
for (j = 0; k != rqt_n; ++k, ++j) for (j = 0; k != rqt_n; ++k, ++j)
attr->rq_list[k] = attr->rq_list[j]; attr->rq_list[k] = attr->rq_list[j];
attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ; attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
@ -114,26 +109,7 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
DRV_LOG(ERR, "Failed to modify RQT."); DRV_LOG(ERR, "Failed to modify RQT.");
} }
rte_free(attr); rte_free(attr);
return ret; return ret ? -1 : num;
}
int
mlx5_vdpa_virtq_enable(struct mlx5_vdpa_virtq *virtq, int enable)
{
struct mlx5_vdpa_priv *priv = virtq->priv;
int ret = 0;
DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", virtq->index,
virtq->enable ? "en" : "dis", enable ? "en" : "dis");
if (virtq->enable == !!enable)
return 0;
virtq->enable = !!enable;
if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
ret = mlx5_vdpa_rqt_prepare(priv);
if (ret)
virtq->enable = !enable;
}
return ret;
} }
static int __rte_unused static int __rte_unused
@ -261,12 +237,33 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
#endif /* HAVE_MLX5DV_DR */ #endif /* HAVE_MLX5DV_DR */
} }
int
mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
{
int ret = mlx5_vdpa_rqt_prepare(priv);
if (ret == 0) {
mlx5_vdpa_rss_flows_destroy(priv);
if (priv->steer.rqt) {
claim_zero(mlx5_devx_cmd_destroy(priv->steer.rqt));
priv->steer.rqt = NULL;
}
} else if (ret < 0) {
return ret;
} else if (!priv->steer.rss[0].flow) {
ret = mlx5_vdpa_rss_flows_create(priv);
if (ret) {
DRV_LOG(ERR, "Cannot create RSS flows.");
return -1;
}
}
return 0;
}
int int
mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv) mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
{ {
#ifdef HAVE_MLX5DV_DR #ifdef HAVE_MLX5DV_DR
if (mlx5_vdpa_rqt_prepare(priv))
return -1;
priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx, priv->steer.domain = mlx5_glue->dr_create_domain(priv->ctx,
MLX5DV_DR_DOMAIN_TYPE_NIC_RX); MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
if (!priv->steer.domain) { if (!priv->steer.domain) {
@ -278,7 +275,7 @@ mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
DRV_LOG(ERR, "Failed to create table 0 with Rx domain."); DRV_LOG(ERR, "Failed to create table 0 with Rx domain.");
goto error; goto error;
} }
if (mlx5_vdpa_rss_flows_create(priv)) if (mlx5_vdpa_steer_update(priv))
goto error; goto error;
return 0; return 0;
error: error:

View File

@ -59,9 +59,11 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
usleep(MLX5_VDPA_INTR_RETRIES_USEC); usleep(MLX5_VDPA_INTR_RETRIES_USEC);
} }
} }
virtq->intr_handle.fd = -1;
} }
if (virtq->virtq) if (virtq->virtq)
claim_zero(mlx5_devx_cmd_destroy(virtq->virtq)); claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
virtq->virtq = NULL;
for (i = 0; i < RTE_DIM(virtq->umems); ++i) { for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
if (virtq->umems[i].obj) if (virtq->umems[i].obj)
claim_zero(mlx5_glue->devx_umem_dereg claim_zero(mlx5_glue->devx_umem_dereg
@ -69,10 +71,9 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
if (virtq->umems[i].buf) if (virtq->umems[i].buf)
rte_free(virtq->umems[i].buf); rte_free(virtq->umems[i].buf);
} }
memset(&virtq->umems, 0, sizeof(virtq->umems));
if (virtq->eqp.fw_qp) if (virtq->eqp.fw_qp)
mlx5_vdpa_event_qp_destroy(&virtq->eqp); mlx5_vdpa_event_qp_destroy(&virtq->eqp);
memset(virtq, 0, sizeof(*virtq));
virtq->intr_handle.fd = -1;
return 0; return 0;
} }
@ -81,8 +82,10 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
{ {
int i; int i;
for (i = 0; i < priv->nr_virtqs; i++) for (i = 0; i < priv->nr_virtqs; i++) {
mlx5_vdpa_virtq_unset(&priv->virtqs[i]); mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
priv->virtqs[i].enable = 0;
}
if (priv->tis) { if (priv->tis) {
claim_zero(mlx5_devx_cmd_destroy(priv->tis)); claim_zero(mlx5_devx_cmd_destroy(priv->tis));
priv->tis = NULL; priv->tis = NULL;
@ -272,10 +275,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
goto error; goto error;
if (mlx5_vdpa_virtq_modify(virtq, 1)) if (mlx5_vdpa_virtq_modify(virtq, 1))
goto error; goto error;
virtq->enable = 1;
virtq->priv = priv; virtq->priv = priv;
/* Be sure notifications are not missed during configuration. */
claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
rte_write32(virtq->index, priv->virtq_db_addr); rte_write32(virtq->index, priv->virtq_db_addr);
/* Setup doorbell mapping. */ /* Setup doorbell mapping. */
virtq->intr_handle.fd = vq.kickfd; virtq->intr_handle.fd = vq.kickfd;
@ -402,11 +402,56 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
goto error; goto error;
} }
priv->nr_virtqs = nr_vring; priv->nr_virtqs = nr_vring;
for (i = 0; i < nr_vring; i++) for (i = 0; i < nr_vring; i++) {
claim_zero(rte_vhost_enable_guest_notification(priv->vid, i,
1));
if (mlx5_vdpa_virtq_setup(priv, i)) if (mlx5_vdpa_virtq_setup(priv, i))
goto error; goto error;
}
return 0; return 0;
error: error:
mlx5_vdpa_virtqs_release(priv); mlx5_vdpa_virtqs_release(priv);
return -1; return -1;
} }
int
mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
{
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
int ret;
DRV_LOG(INFO, "Update virtq %d status %sable -> %sable.", index,
virtq->enable ? "en" : "dis", enable ? "en" : "dis");
if (virtq->enable == !!enable)
return 0;
if (!priv->configured) {
virtq->enable = !!enable;
return 0;
}
if (enable) {
/* Configuration might have been updated - reconfigure virtq. */
if (virtq->virtq) {
ret = mlx5_vdpa_virtq_stop(priv, index);
if (ret)
DRV_LOG(WARNING, "Failed to stop virtq %d.",
index);
mlx5_vdpa_virtq_unset(virtq);
}
ret = mlx5_vdpa_virtq_setup(priv, index);
if (ret) {
DRV_LOG(ERR, "Failed to setup virtq %d.", index);
return ret;
/* The only case virtq can stay invalid. */
}
}
virtq->enable = !!enable;
if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
/* Need to add received virtq to the RQT table of the TIRs. */
ret = mlx5_vdpa_steer_update(priv);
if (ret) {
virtq->enable = !enable;
return ret;
}
}
return 0;
}