vdpa/mlx5: add virtq sub-resources creation
pre-created virt-queue sub-resource in device probe stage and then modify virtqueue in device config stage. Steer table also need to support dummy virt-queue. This accelerates the LM process and reduces its time by 40%. Signed-off-by: Li Zhang <lizh@nvidia.com> Signed-off-by: Yajun Wu <yajunw@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
parent
6ebb02b44b
commit
91edbbfbb4
@ -628,65 +628,39 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,
|
||||
static int
|
||||
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
uint32_t max_queues;
|
||||
uint32_t index;
|
||||
uint32_t i;
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
|
||||
for (index = 0; index < priv->caps.max_num_virtio_queues * 2;
|
||||
for (index = 0; index < priv->caps.max_num_virtio_queues;
|
||||
index++) {
|
||||
virtq = &priv->virtqs[index];
|
||||
pthread_mutex_init(&virtq->virtq_lock, NULL);
|
||||
}
|
||||
if (!priv->queues)
|
||||
if (!priv->queues || !priv->queue_size)
|
||||
return 0;
|
||||
for (index = 0; index < (priv->queues * 2); ++index) {
|
||||
max_queues = (priv->queues < priv->caps.max_num_virtio_queues) ?
|
||||
(priv->queues * 2) : (priv->caps.max_num_virtio_queues);
|
||||
for (index = 0; index < max_queues; ++index)
|
||||
if (mlx5_vdpa_virtq_single_resource_prepare(priv,
|
||||
index))
|
||||
goto error;
|
||||
if (mlx5_vdpa_is_modify_virtq_supported(priv))
|
||||
if (mlx5_vdpa_steer_update(priv, true))
|
||||
goto error;
|
||||
return 0;
|
||||
error:
|
||||
for (index = 0; index < max_queues; ++index) {
|
||||
virtq = &priv->virtqs[index];
|
||||
int ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,
|
||||
-1, virtq);
|
||||
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
|
||||
index);
|
||||
return -1;
|
||||
}
|
||||
if (priv->caps.queue_counters_valid) {
|
||||
if (!virtq->counters)
|
||||
virtq->counters =
|
||||
mlx5_devx_cmd_create_virtio_q_counters
|
||||
(priv->cdev->ctx);
|
||||
if (!virtq->counters) {
|
||||
DRV_LOG(ERR, "Failed to create virtq couners for virtq"
|
||||
" %d.", index);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
|
||||
uint32_t size;
|
||||
void *buf;
|
||||
struct mlx5dv_devx_umem *obj;
|
||||
|
||||
size = priv->caps.umems[i].a * priv->queue_size +
|
||||
priv->caps.umems[i].b;
|
||||
buf = rte_zmalloc(__func__, size, 4096);
|
||||
if (buf == NULL) {
|
||||
DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
|
||||
" %u.", i, index);
|
||||
return -1;
|
||||
}
|
||||
obj = mlx5_glue->devx_umem_reg(priv->cdev->ctx, buf,
|
||||
size, IBV_ACCESS_LOCAL_WRITE);
|
||||
if (obj == NULL) {
|
||||
rte_free(buf);
|
||||
DRV_LOG(ERR, "Failed to register umem %d for virtq %u.",
|
||||
i, index);
|
||||
return -1;
|
||||
}
|
||||
virtq->umems[i].size = size;
|
||||
virtq->umems[i].buf = buf;
|
||||
virtq->umems[i].obj = obj;
|
||||
if (virtq->virtq) {
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
mlx5_vdpa_virtq_unset(virtq);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
if (mlx5_vdpa_is_modify_virtq_supported(priv))
|
||||
mlx5_vdpa_steer_unset(priv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -277,13 +277,15 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
|
||||
* The guest notification file descriptor.
|
||||
* @param[in/out] virtq
|
||||
* Pointer to the virt-queue structure.
|
||||
* @param[in] reset
|
||||
* If true, it will reset event qp.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 otherwise and rte_errno is set.
|
||||
*/
|
||||
int
|
||||
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
int callfd, struct mlx5_vdpa_virtq *virtq);
|
||||
int callfd, struct mlx5_vdpa_virtq *virtq, bool reset);
|
||||
|
||||
/**
|
||||
* Destroy an event QP and all its related resources.
|
||||
@ -403,11 +405,13 @@ void mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv);
|
||||
*
|
||||
* @param[in] priv
|
||||
* The vdpa driver private structure.
|
||||
* @param[in] is_dummy
|
||||
* If set, it is updated with dummy queue for prepare resource.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, a negative value otherwise.
|
||||
*/
|
||||
int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv);
|
||||
int mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy);
|
||||
|
||||
/**
|
||||
* Setup steering and all its related resources to enable RSS traffic from the
|
||||
@ -581,9 +585,14 @@ mlx5_vdpa_c_thread_wait_bulk_tasks_done(uint32_t *remaining_cnt,
|
||||
int
|
||||
mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick);
|
||||
void
|
||||
mlx5_vdpa_vq_destroy(struct mlx5_vdpa_virtq *virtq);
|
||||
void
|
||||
mlx5_vdpa_dev_cache_clean(struct mlx5_vdpa_priv *priv);
|
||||
void
|
||||
mlx5_vdpa_virtq_unreg_intr_handle_all(struct mlx5_vdpa_priv *priv);
|
||||
bool
|
||||
mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,
|
||||
int index);
|
||||
int
|
||||
mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp);
|
||||
void
|
||||
mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq);
|
||||
#endif /* RTE_PMD_MLX5_VDPA_H_ */
|
||||
|
@ -249,7 +249,7 @@ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < priv->caps.max_num_virtio_queues * 2; i++) {
|
||||
for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
|
||||
struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
|
||||
|
||||
mlx5_vdpa_queue_complete(cq);
|
||||
@ -618,7 +618,7 @@ mlx5_vdpa_qps2rts(struct mlx5_vdpa_event_qp *eqp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
|
||||
{
|
||||
if (mlx5_devx_cmd_modify_qp_state(eqp->fw_qp, MLX5_CMD_OP_QP_2RST,
|
||||
@ -638,7 +638,7 @@ mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
|
||||
|
||||
int
|
||||
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
int callfd, struct mlx5_vdpa_virtq *virtq)
|
||||
int callfd, struct mlx5_vdpa_virtq *virtq, bool reset)
|
||||
{
|
||||
struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
|
||||
struct mlx5_devx_qp_attr attr = {0};
|
||||
@ -649,11 +649,10 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
/* Reuse existing resources. */
|
||||
eqp->cq.callfd = callfd;
|
||||
/* FW will set event qp to error state in q destroy. */
|
||||
if (!mlx5_vdpa_qps2rst2rts(eqp)) {
|
||||
if (reset && !mlx5_vdpa_qps2rst2rts(eqp))
|
||||
rte_write32(rte_cpu_to_be_32(RTE_BIT32(log_desc_n)),
|
||||
&eqp->sw_qp.db_rec[0]);
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (eqp->fw_qp)
|
||||
mlx5_vdpa_event_qp_destroy(eqp);
|
||||
|
@ -57,7 +57,7 @@ mlx5_vdpa_steer_unset(struct mlx5_vdpa_priv *priv)
|
||||
* -1 on error.
|
||||
*/
|
||||
static int
|
||||
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
|
||||
mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv, bool is_dummy)
|
||||
{
|
||||
int i;
|
||||
uint32_t rqt_n = RTE_MIN(MLX5_VDPA_DEFAULT_RQT_SIZE,
|
||||
@ -67,15 +67,20 @@ mlx5_vdpa_rqt_prepare(struct mlx5_vdpa_priv *priv)
|
||||
sizeof(uint32_t), 0);
|
||||
uint32_t k = 0, j;
|
||||
int ret = 0, num;
|
||||
uint16_t nr_vring = is_dummy ?
|
||||
(((priv->queues * 2) < priv->caps.max_num_virtio_queues) ?
|
||||
(priv->queues * 2) : priv->caps.max_num_virtio_queues) : priv->nr_virtqs;
|
||||
|
||||
if (!attr) {
|
||||
DRV_LOG(ERR, "Failed to allocate RQT attributes memory.");
|
||||
rte_errno = ENOMEM;
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < priv->nr_virtqs; i++) {
|
||||
for (i = 0; i < nr_vring; i++) {
|
||||
if (is_virtq_recvq(i, priv->nr_virtqs) &&
|
||||
priv->virtqs[i].enable && priv->virtqs[i].virtq) {
|
||||
(is_dummy || (priv->virtqs[i].enable &&
|
||||
priv->virtqs[i].configured)) &&
|
||||
priv->virtqs[i].virtq) {
|
||||
attr->rq_list[k] = priv->virtqs[i].virtq->id;
|
||||
k++;
|
||||
}
|
||||
@ -235,12 +240,12 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
|
||||
}
|
||||
|
||||
int
|
||||
mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
|
||||
mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv, bool is_dummy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&priv->steer_update_lock);
|
||||
ret = mlx5_vdpa_rqt_prepare(priv);
|
||||
ret = mlx5_vdpa_rqt_prepare(priv, is_dummy);
|
||||
if (ret == 0) {
|
||||
mlx5_vdpa_steer_unset(priv);
|
||||
} else if (ret < 0) {
|
||||
@ -261,7 +266,7 @@ mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
|
||||
int
|
||||
mlx5_vdpa_steer_setup(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
if (mlx5_vdpa_steer_update(priv))
|
||||
if (mlx5_vdpa_steer_update(priv, false))
|
||||
goto error;
|
||||
return 0;
|
||||
error:
|
||||
|
@ -146,10 +146,10 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
void
|
||||
mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
|
||||
{
|
||||
int ret = -EAGAIN;
|
||||
int ret;
|
||||
|
||||
mlx5_vdpa_virtq_unregister_intr_handle(virtq);
|
||||
if (virtq->configured) {
|
||||
@ -157,12 +157,12 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
|
||||
if (ret)
|
||||
DRV_LOG(WARNING, "Failed to stop virtq %d.",
|
||||
virtq->index);
|
||||
virtq->configured = 0;
|
||||
claim_zero(mlx5_devx_cmd_destroy(virtq->virtq));
|
||||
virtq->index = 0;
|
||||
virtq->virtq = NULL;
|
||||
virtq->configured = 0;
|
||||
}
|
||||
virtq->virtq = NULL;
|
||||
virtq->notifier_state = MLX5_VDPA_NOTIFIER_STATE_DISABLED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
@ -175,6 +175,9 @@ mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
|
||||
virtq = &priv->virtqs[i];
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
mlx5_vdpa_virtq_unset(virtq);
|
||||
if (i < (priv->queues * 2))
|
||||
mlx5_vdpa_virtq_single_resource_prepare(
|
||||
priv, i);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
priv->features = 0;
|
||||
@ -258,7 +261,8 @@ mlx5_vdpa_hva_to_gpa(struct rte_vhost_memory *mem, uint64_t hva)
|
||||
static int
|
||||
mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
struct mlx5_devx_virtq_attr *attr,
|
||||
struct rte_vhost_vring *vq, int index)
|
||||
struct rte_vhost_vring *vq,
|
||||
int index, bool is_prepare)
|
||||
{
|
||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
|
||||
uint64_t gpa;
|
||||
@ -277,11 +281,15 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
MLX5_VIRTQ_MODIFY_TYPE_Q_MKEY |
|
||||
MLX5_VIRTQ_MODIFY_TYPE_QUEUE_FEATURE_BIT_MASK |
|
||||
MLX5_VIRTQ_MODIFY_TYPE_EVENT_MODE;
|
||||
attr->tso_ipv4 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
|
||||
attr->tso_ipv6 = !!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
|
||||
attr->tx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
|
||||
attr->rx_csum = !!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
|
||||
attr->virtio_version_1_0 =
|
||||
attr->tso_ipv4 = is_prepare ? 1 :
|
||||
!!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO4));
|
||||
attr->tso_ipv6 = is_prepare ? 1 :
|
||||
!!(priv->features & (1ULL << VIRTIO_NET_F_HOST_TSO6));
|
||||
attr->tx_csum = is_prepare ? 1 :
|
||||
!!(priv->features & (1ULL << VIRTIO_NET_F_CSUM));
|
||||
attr->rx_csum = is_prepare ? 1 :
|
||||
!!(priv->features & (1ULL << VIRTIO_NET_F_GUEST_CSUM));
|
||||
attr->virtio_version_1_0 = is_prepare ? 1 :
|
||||
!!(priv->features & (1ULL << VIRTIO_F_VERSION_1));
|
||||
attr->q_type =
|
||||
(priv->features & (1ULL << VIRTIO_F_RING_PACKED)) ?
|
||||
@ -290,12 +298,12 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
* No need event QPs creation when the guest in poll mode or when the
|
||||
* capability allows it.
|
||||
*/
|
||||
attr->event_mode = vq->callfd != -1 ||
|
||||
attr->event_mode = is_prepare || vq->callfd != -1 ||
|
||||
!(priv->caps.event_mode & (1 << MLX5_VIRTQ_EVENT_MODE_NO_MSIX)) ?
|
||||
MLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
|
||||
if (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
|
||||
ret = mlx5_vdpa_event_qp_prepare(priv,
|
||||
vq->size, vq->callfd, virtq);
|
||||
ret = mlx5_vdpa_event_qp_prepare(priv, vq->size,
|
||||
vq->callfd, virtq, !virtq->virtq);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR,
|
||||
"Failed to create event QPs for virtq %d.",
|
||||
@ -320,7 +328,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
attr->counters_obj_id = virtq->counters->id;
|
||||
}
|
||||
/* Setup 3 UMEMs for each virtq. */
|
||||
if (virtq->virtq) {
|
||||
if (!virtq->virtq) {
|
||||
for (i = 0; i < RTE_DIM(virtq->umems); ++i) {
|
||||
uint32_t size;
|
||||
void *buf;
|
||||
@ -345,7 +353,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
buf = rte_zmalloc(__func__,
|
||||
size, 4096);
|
||||
if (buf == NULL) {
|
||||
DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq"
|
||||
DRV_LOG(ERR, "Cannot allocate umem %d memory for virtq."
|
||||
" %u.", i, index);
|
||||
return -1;
|
||||
}
|
||||
@ -366,7 +374,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
attr->umems[i].size = virtq->umems[i].size;
|
||||
}
|
||||
}
|
||||
if (attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {
|
||||
if (!is_prepare && attr->q_type == MLX5_VIRTQ_TYPE_SPLIT) {
|
||||
gpa = mlx5_vdpa_hva_to_gpa(priv->vmem_info.vmem,
|
||||
(uint64_t)(uintptr_t)vq->desc);
|
||||
if (!gpa) {
|
||||
@ -389,21 +397,23 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
}
|
||||
attr->available_addr = gpa;
|
||||
}
|
||||
ret = rte_vhost_get_vring_base(priv->vid,
|
||||
if (!is_prepare) {
|
||||
ret = rte_vhost_get_vring_base(priv->vid,
|
||||
index, &last_avail_idx, &last_used_idx);
|
||||
if (ret) {
|
||||
last_avail_idx = 0;
|
||||
last_used_idx = 0;
|
||||
DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0.");
|
||||
} else {
|
||||
DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
|
||||
if (ret) {
|
||||
last_avail_idx = 0;
|
||||
last_used_idx = 0;
|
||||
DRV_LOG(WARNING, "Couldn't get vring base, idx are set to 0.");
|
||||
} else {
|
||||
DRV_LOG(INFO, "vid %d: Init last_avail_idx=%d, last_used_idx=%d for "
|
||||
"virtq %d.", priv->vid, last_avail_idx,
|
||||
last_used_idx, index);
|
||||
}
|
||||
}
|
||||
attr->hw_available_index = last_avail_idx;
|
||||
attr->hw_used_index = last_used_idx;
|
||||
attr->q_size = vq->size;
|
||||
attr->mkey = priv->gpa_mkey_index;
|
||||
attr->mkey = is_prepare ? 0 : priv->gpa_mkey_index;
|
||||
attr->tis_id = priv->tiss[(index / 2) % priv->num_lag_ports]->id;
|
||||
attr->queue_index = index;
|
||||
attr->pd = priv->cdev->pdn;
|
||||
@ -416,6 +426,39 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool
|
||||
mlx5_vdpa_virtq_single_resource_prepare(struct mlx5_vdpa_priv *priv,
|
||||
int index)
|
||||
{
|
||||
struct mlx5_devx_virtq_attr attr = {0};
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
struct rte_vhost_vring vq = {
|
||||
.size = priv->queue_size,
|
||||
.callfd = -1,
|
||||
};
|
||||
int ret;
|
||||
|
||||
virtq = &priv->virtqs[index];
|
||||
virtq->index = index;
|
||||
virtq->vq_size = vq.size;
|
||||
virtq->configured = 0;
|
||||
virtq->virtq = NULL;
|
||||
ret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr, &vq, index, true);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR,
|
||||
"Cannot prepare setup resource for virtq %d.", index);
|
||||
return true;
|
||||
}
|
||||
if (mlx5_vdpa_is_modify_virtq_supported(priv)) {
|
||||
virtq->virtq =
|
||||
mlx5_devx_cmd_create_virtq(priv->cdev->ctx, &attr);
|
||||
virtq->priv = priv;
|
||||
if (!virtq->virtq)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
mlx5_vdpa_is_modify_virtq_supported(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
@ -473,7 +516,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index, bool reg_kick)
|
||||
virtq->priv = priv;
|
||||
virtq->stopped = 0;
|
||||
ret = mlx5_vdpa_virtq_sub_objs_prepare(priv, &attr,
|
||||
&vq, index);
|
||||
&vq, index, false);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to setup update virtq attr %d.",
|
||||
index);
|
||||
@ -746,7 +789,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
|
||||
if (virtq->configured) {
|
||||
virtq->enable = 0;
|
||||
if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
|
||||
ret = mlx5_vdpa_steer_update(priv);
|
||||
ret = mlx5_vdpa_steer_update(priv, false);
|
||||
if (ret)
|
||||
DRV_LOG(WARNING, "Failed to disable steering "
|
||||
"for virtq %d.", index);
|
||||
@ -761,7 +804,7 @@ mlx5_vdpa_virtq_enable(struct mlx5_vdpa_priv *priv, int index, int enable)
|
||||
}
|
||||
virtq->enable = 1;
|
||||
if (is_virtq_recvq(virtq->index, priv->nr_virtqs)) {
|
||||
ret = mlx5_vdpa_steer_update(priv);
|
||||
ret = mlx5_vdpa_steer_update(priv, false);
|
||||
if (ret)
|
||||
DRV_LOG(WARNING, "Failed to enable steering "
|
||||
"for virtq %d.", index);
|
||||
|
Loading…
Reference in New Issue
Block a user