vdpa/mlx5: optimize datapath-control synchronization
The driver used a single global lock for any synchronization needed for the datapath and control path. It is better to group the critical sections with the other ones that should be synchronized. Replace the global lock with the following locks: 1.virtq locks(per virtq) synchronize datapath polling and parallel configurations on the same virtq. 2.A doorbell lock synchronizes doorbell update, which is shared for all the virtqs in the device. 3.A steering lock for the shared steering objects updates. Signed-off-by: Li Zhang <lizh@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
parent
7f2de21244
commit
057f7d2084
@ -136,6 +136,7 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
|
||||
struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
|
||||
struct mlx5_vdpa_priv *priv =
|
||||
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
int ret;
|
||||
|
||||
if (priv == NULL) {
|
||||
@ -146,9 +147,10 @@ mlx5_vdpa_set_vring_state(int vid, int vring, int state)
|
||||
DRV_LOG(ERR, "Too big vring id: %d.", vring);
|
||||
return -E2BIG;
|
||||
}
|
||||
pthread_mutex_lock(&priv->vq_config_lock);
|
||||
virtq = &priv->virtqs[vring];
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
ret = mlx5_vdpa_virtq_enable(priv, vring, state);
|
||||
pthread_mutex_unlock(&priv->vq_config_lock);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -268,7 +270,9 @@ mlx5_vdpa_dev_close(int vid)
|
||||
ret |= mlx5_vdpa_lm_log(priv);
|
||||
priv->state = MLX5_VDPA_STATE_IN_PROGRESS;
|
||||
}
|
||||
pthread_mutex_lock(&priv->steer_update_lock);
|
||||
mlx5_vdpa_steer_unset(priv);
|
||||
pthread_mutex_unlock(&priv->steer_update_lock);
|
||||
mlx5_vdpa_virtqs_release(priv);
|
||||
mlx5_vdpa_drain_cq(priv);
|
||||
if (priv->lm_mr.addr)
|
||||
@ -277,8 +281,6 @@ mlx5_vdpa_dev_close(int vid)
|
||||
if (!priv->connected)
|
||||
mlx5_vdpa_dev_cache_clean(priv);
|
||||
priv->vid = 0;
|
||||
/* The mutex may stay locked after event thread cancel - initiate it. */
|
||||
pthread_mutex_init(&priv->vq_config_lock, NULL);
|
||||
DRV_LOG(INFO, "vDPA device %d was closed.", vid);
|
||||
return ret;
|
||||
}
|
||||
@ -550,15 +552,21 @@ mlx5_vdpa_config_get(struct mlx5_kvargs_ctrl *mkvlist,
|
||||
static int
|
||||
mlx5_vdpa_virtq_resource_prepare(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
uint32_t index;
|
||||
uint32_t i;
|
||||
|
||||
for (index = 0; index < priv->caps.max_num_virtio_queues * 2;
|
||||
index++) {
|
||||
virtq = &priv->virtqs[index];
|
||||
pthread_mutex_init(&virtq->virtq_lock, NULL);
|
||||
}
|
||||
if (!priv->queues)
|
||||
return 0;
|
||||
for (index = 0; index < (priv->queues * 2); ++index) {
|
||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[index];
|
||||
virtq = &priv->virtqs[index];
|
||||
int ret = mlx5_vdpa_event_qp_prepare(priv, priv->queue_size,
|
||||
-1, &virtq->eqp);
|
||||
-1, virtq);
|
||||
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to create event QPs for virtq %d.",
|
||||
@ -717,7 +725,8 @@ mlx5_vdpa_dev_probe(struct mlx5_common_device *cdev,
|
||||
priv->num_lag_ports = attr->num_lag_ports;
|
||||
if (attr->num_lag_ports == 0)
|
||||
priv->num_lag_ports = 1;
|
||||
pthread_mutex_init(&priv->vq_config_lock, NULL);
|
||||
rte_spinlock_init(&priv->db_lock);
|
||||
pthread_mutex_init(&priv->steer_update_lock, NULL);
|
||||
priv->cdev = cdev;
|
||||
mlx5_vdpa_config_get(mkvlist, priv);
|
||||
if (mlx5_vdpa_create_dev_resources(priv))
|
||||
@ -803,7 +812,6 @@ mlx5_vdpa_dev_release(struct mlx5_vdpa_priv *priv)
|
||||
mlx5_vdpa_release_dev_resources(priv);
|
||||
if (priv->vdev)
|
||||
rte_vdpa_unregister_device(priv->vdev);
|
||||
pthread_mutex_destroy(&priv->vq_config_lock);
|
||||
rte_free(priv);
|
||||
}
|
||||
|
||||
|
@ -82,6 +82,7 @@ struct mlx5_vdpa_virtq {
|
||||
bool stopped;
|
||||
uint32_t configured:1;
|
||||
uint32_t version;
|
||||
pthread_mutex_t virtq_lock;
|
||||
struct mlx5_vdpa_priv *priv;
|
||||
struct mlx5_devx_obj *virtq;
|
||||
struct mlx5_devx_obj *counters;
|
||||
@ -126,7 +127,8 @@ struct mlx5_vdpa_priv {
|
||||
TAILQ_ENTRY(mlx5_vdpa_priv) next;
|
||||
bool connected;
|
||||
enum mlx5_dev_state state;
|
||||
pthread_mutex_t vq_config_lock;
|
||||
rte_spinlock_t db_lock;
|
||||
pthread_mutex_t steer_update_lock;
|
||||
uint64_t no_traffic_counter;
|
||||
pthread_t timer_tid;
|
||||
int event_mode;
|
||||
@ -222,14 +224,15 @@ int mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv);
|
||||
* Number of descriptors.
|
||||
* @param[in] callfd
|
||||
* The guest notification file descriptor.
|
||||
* @param[in/out] eqp
|
||||
* Pointer to the event QP structure.
|
||||
* @param[in/out] virtq
|
||||
* Pointer to the virt-queue structure.
|
||||
*
|
||||
* @return
|
||||
* 0 on success, -1 otherwise and rte_errno is set.
|
||||
*/
|
||||
int mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
int callfd, struct mlx5_vdpa_event_qp *eqp);
|
||||
int
|
||||
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
int callfd, struct mlx5_vdpa_virtq *virtq);
|
||||
|
||||
/**
|
||||
* Destroy an event QP and all its related resources.
|
||||
|
@ -85,12 +85,13 @@ mlx5_vdpa_cq_arm(struct mlx5_vdpa_priv *priv, struct mlx5_vdpa_cq *cq)
|
||||
|
||||
static int
|
||||
mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
|
||||
int callfd, struct mlx5_vdpa_cq *cq)
|
||||
int callfd, struct mlx5_vdpa_virtq *virtq)
|
||||
{
|
||||
struct mlx5_devx_cq_attr attr = {
|
||||
.use_first_only = 1,
|
||||
.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar.obj),
|
||||
};
|
||||
struct mlx5_vdpa_cq *cq = &virtq->eqp.cq;
|
||||
uint16_t event_nums[1] = {0};
|
||||
int ret;
|
||||
|
||||
@ -102,10 +103,11 @@ mlx5_vdpa_cq_create(struct mlx5_vdpa_priv *priv, uint16_t log_desc_n,
|
||||
cq->log_desc_n = log_desc_n;
|
||||
rte_spinlock_init(&cq->sl);
|
||||
/* Subscribe CQ event to the event channel controlled by the driver. */
|
||||
ret = mlx5_os_devx_subscribe_devx_event(priv->eventc,
|
||||
cq->cq_obj.cq->obj,
|
||||
sizeof(event_nums), event_nums,
|
||||
(uint64_t)(uintptr_t)cq);
|
||||
ret = mlx5_glue->devx_subscribe_devx_event(priv->eventc,
|
||||
cq->cq_obj.cq->obj,
|
||||
sizeof(event_nums),
|
||||
event_nums,
|
||||
(uint64_t)(uintptr_t)virtq);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to subscribe CQE event.");
|
||||
rte_errno = errno;
|
||||
@ -167,13 +169,17 @@ mlx5_vdpa_cq_poll(struct mlx5_vdpa_cq *cq)
|
||||
static void
|
||||
mlx5_vdpa_arm_all_cqs(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
struct mlx5_vdpa_cq *cq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->nr_virtqs; i++) {
|
||||
virtq = &priv->virtqs[i];
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
cq = &priv->virtqs[i].eqp.cq;
|
||||
if (cq->cq_obj.cq && !cq->armed)
|
||||
mlx5_vdpa_cq_arm(priv, cq);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -220,13 +226,18 @@ mlx5_vdpa_queue_complete(struct mlx5_vdpa_cq *cq)
|
||||
static uint32_t
|
||||
mlx5_vdpa_queues_complete(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
int i;
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
struct mlx5_vdpa_cq *cq;
|
||||
uint32_t max = 0;
|
||||
uint32_t comp;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->nr_virtqs; i++) {
|
||||
struct mlx5_vdpa_cq *cq = &priv->virtqs[i].eqp.cq;
|
||||
uint32_t comp = mlx5_vdpa_queue_complete(cq);
|
||||
|
||||
virtq = &priv->virtqs[i];
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
cq = &virtq->eqp.cq;
|
||||
comp = mlx5_vdpa_queue_complete(cq);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
if (comp > max)
|
||||
max = comp;
|
||||
}
|
||||
@ -253,7 +264,7 @@ mlx5_vdpa_drain_cq(struct mlx5_vdpa_priv *priv)
|
||||
}
|
||||
|
||||
/* Wait on all CQs channel for completion event. */
|
||||
static struct mlx5_vdpa_cq *
|
||||
static struct mlx5_vdpa_virtq *
|
||||
mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
|
||||
{
|
||||
#ifdef HAVE_IBV_DEVX_EVENT
|
||||
@ -265,7 +276,8 @@ mlx5_vdpa_event_wait(struct mlx5_vdpa_priv *priv __rte_unused)
|
||||
sizeof(out.buf));
|
||||
|
||||
if (ret >= 0)
|
||||
return (struct mlx5_vdpa_cq *)(uintptr_t)out.event_resp.cookie;
|
||||
return (struct mlx5_vdpa_virtq *)
|
||||
(uintptr_t)out.event_resp.cookie;
|
||||
DRV_LOG(INFO, "Got error in devx_get_event, ret = %d, errno = %d.",
|
||||
ret, errno);
|
||||
#endif
|
||||
@ -276,7 +288,7 @@ static void *
|
||||
mlx5_vdpa_event_handle(void *arg)
|
||||
{
|
||||
struct mlx5_vdpa_priv *priv = arg;
|
||||
struct mlx5_vdpa_cq *cq;
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
uint32_t max;
|
||||
|
||||
switch (priv->event_mode) {
|
||||
@ -284,7 +296,6 @@ mlx5_vdpa_event_handle(void *arg)
|
||||
case MLX5_VDPA_EVENT_MODE_FIXED_TIMER:
|
||||
priv->timer_delay_us = priv->event_us;
|
||||
while (1) {
|
||||
pthread_mutex_lock(&priv->vq_config_lock);
|
||||
max = mlx5_vdpa_queues_complete(priv);
|
||||
if (max == 0 && priv->no_traffic_counter++ >=
|
||||
priv->no_traffic_max) {
|
||||
@ -292,32 +303,37 @@ mlx5_vdpa_event_handle(void *arg)
|
||||
priv->vdev->device->name);
|
||||
mlx5_vdpa_arm_all_cqs(priv);
|
||||
do {
|
||||
pthread_mutex_unlock
|
||||
(&priv->vq_config_lock);
|
||||
cq = mlx5_vdpa_event_wait(priv);
|
||||
pthread_mutex_lock
|
||||
(&priv->vq_config_lock);
|
||||
if (cq == NULL ||
|
||||
mlx5_vdpa_queue_complete(cq) > 0)
|
||||
virtq = mlx5_vdpa_event_wait(priv);
|
||||
if (virtq == NULL)
|
||||
break;
|
||||
pthread_mutex_lock(
|
||||
&virtq->virtq_lock);
|
||||
if (mlx5_vdpa_queue_complete(
|
||||
&virtq->eqp.cq) > 0) {
|
||||
pthread_mutex_unlock(
|
||||
&virtq->virtq_lock);
|
||||
break;
|
||||
}
|
||||
pthread_mutex_unlock(
|
||||
&virtq->virtq_lock);
|
||||
} while (1);
|
||||
priv->timer_delay_us = priv->event_us;
|
||||
priv->no_traffic_counter = 0;
|
||||
} else if (max != 0) {
|
||||
priv->no_traffic_counter = 0;
|
||||
}
|
||||
pthread_mutex_unlock(&priv->vq_config_lock);
|
||||
mlx5_vdpa_timer_sleep(priv, max);
|
||||
}
|
||||
return NULL;
|
||||
case MLX5_VDPA_EVENT_MODE_ONLY_INTERRUPT:
|
||||
do {
|
||||
cq = mlx5_vdpa_event_wait(priv);
|
||||
if (cq != NULL) {
|
||||
pthread_mutex_lock(&priv->vq_config_lock);
|
||||
if (mlx5_vdpa_queue_complete(cq) > 0)
|
||||
mlx5_vdpa_cq_arm(priv, cq);
|
||||
pthread_mutex_unlock(&priv->vq_config_lock);
|
||||
virtq = mlx5_vdpa_event_wait(priv);
|
||||
if (virtq != NULL) {
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
if (mlx5_vdpa_queue_complete(
|
||||
&virtq->eqp.cq) > 0)
|
||||
mlx5_vdpa_cq_arm(priv, &virtq->eqp.cq);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
} while (1);
|
||||
return NULL;
|
||||
@ -339,7 +355,6 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
uint64_t sec;
|
||||
|
||||
pthread_mutex_lock(&priv->vq_config_lock);
|
||||
while (mlx5_glue->devx_get_event(priv->err_chnl, &out.event_resp,
|
||||
sizeof(out.buf)) >=
|
||||
(ssize_t)sizeof(out.event_resp.cookie)) {
|
||||
@ -351,10 +366,11 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
|
||||
continue;
|
||||
}
|
||||
virtq = &priv->virtqs[vq_index];
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
if (!virtq->enable || virtq->version != version)
|
||||
continue;
|
||||
goto unlock;
|
||||
if (rte_rdtsc() / rte_get_tsc_hz() < MLX5_VDPA_ERROR_TIME_SEC)
|
||||
continue;
|
||||
goto unlock;
|
||||
virtq->stopped = true;
|
||||
/* Query error info. */
|
||||
if (mlx5_vdpa_virtq_query(priv, vq_index))
|
||||
@ -384,8 +400,9 @@ mlx5_vdpa_err_interrupt_handler(void *cb_arg __rte_unused)
|
||||
for (i = 1; i < RTE_DIM(virtq->err_time); i++)
|
||||
virtq->err_time[i - 1] = virtq->err_time[i];
|
||||
virtq->err_time[RTE_DIM(virtq->err_time) - 1] = rte_rdtsc();
|
||||
unlock:
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
pthread_mutex_unlock(&priv->vq_config_lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -533,11 +550,18 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
|
||||
void
|
||||
mlx5_vdpa_cqe_event_unset(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
void *status;
|
||||
int i;
|
||||
|
||||
if (priv->timer_tid) {
|
||||
pthread_cancel(priv->timer_tid);
|
||||
pthread_join(priv->timer_tid, &status);
|
||||
/* The mutex may stay locked after event thread cancel, initiate it. */
|
||||
for (i = 0; i < priv->nr_virtqs; i++) {
|
||||
virtq = &priv->virtqs[i];
|
||||
pthread_mutex_init(&virtq->virtq_lock, NULL);
|
||||
}
|
||||
}
|
||||
priv->timer_tid = 0;
|
||||
}
|
||||
@ -614,8 +638,9 @@ mlx5_vdpa_qps2rst2rts(struct mlx5_vdpa_event_qp *eqp)
|
||||
|
||||
int
|
||||
mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
int callfd, struct mlx5_vdpa_event_qp *eqp)
|
||||
int callfd, struct mlx5_vdpa_virtq *virtq)
|
||||
{
|
||||
struct mlx5_vdpa_event_qp *eqp = &virtq->eqp;
|
||||
struct mlx5_devx_qp_attr attr = {0};
|
||||
uint16_t log_desc_n = rte_log2_u32(desc_n);
|
||||
uint32_t ret;
|
||||
@ -632,7 +657,8 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
}
|
||||
if (eqp->fw_qp)
|
||||
mlx5_vdpa_event_qp_destroy(eqp);
|
||||
if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, &eqp->cq))
|
||||
if (mlx5_vdpa_cq_create(priv, log_desc_n, callfd, virtq) ||
|
||||
!eqp->cq.cq_obj.cq)
|
||||
return -1;
|
||||
attr.pd = priv->cdev->pdn;
|
||||
attr.ts_format =
|
||||
@ -650,8 +676,8 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
attr.ts_format =
|
||||
mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format);
|
||||
ret = mlx5_devx_qp_create(priv->cdev->ctx, &(eqp->sw_qp),
|
||||
attr.num_of_receive_wqes *
|
||||
MLX5_WSEG_SIZE, &attr, SOCKET_ID_ANY);
|
||||
attr.num_of_receive_wqes * MLX5_WSEG_SIZE,
|
||||
&attr, SOCKET_ID_ANY);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to create SW QP(%u).", rte_errno);
|
||||
goto error;
|
||||
@ -668,3 +694,4 @@ mlx5_vdpa_event_qp_prepare(struct mlx5_vdpa_priv *priv, uint16_t desc_n,
|
||||
mlx5_vdpa_event_qp_destroy(eqp);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -24,10 +24,19 @@ mlx5_vdpa_logging_enable(struct mlx5_vdpa_priv *priv, int enable)
|
||||
virtq = &priv->virtqs[i];
|
||||
if (!virtq->configured) {
|
||||
DRV_LOG(DEBUG, "virtq %d is invalid for dirty bitmap enabling.", i);
|
||||
} else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
|
||||
} else {
|
||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
|
||||
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
|
||||
&attr)) {
|
||||
DRV_LOG(ERR, "Failed to modify virtq %d for dirty bitmap enabling.", i);
|
||||
return -1;
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
DRV_LOG(ERR,
|
||||
"Failed to modify virtq %d for dirty bitmap enabling.",
|
||||
i);
|
||||
return -1;
|
||||
}
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -59,10 +68,19 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
|
||||
virtq = &priv->virtqs[i];
|
||||
if (!virtq->configured) {
|
||||
DRV_LOG(DEBUG, "virtq %d is invalid for LM.", i);
|
||||
} else if (mlx5_devx_cmd_modify_virtq(priv->virtqs[i].virtq,
|
||||
&attr)) {
|
||||
DRV_LOG(ERR, "Failed to modify virtq %d for LM.", i);
|
||||
goto err;
|
||||
} else {
|
||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
|
||||
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
if (mlx5_devx_cmd_modify_virtq(
|
||||
priv->virtqs[i].virtq,
|
||||
&attr)) {
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
DRV_LOG(ERR,
|
||||
"Failed to modify virtq %d for LM.", i);
|
||||
goto err;
|
||||
}
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -77,6 +95,7 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
|
||||
int
|
||||
mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
uint64_t features;
|
||||
int ret = rte_vhost_get_negotiated_features(priv->vid, &features);
|
||||
int i;
|
||||
@ -88,10 +107,13 @@ mlx5_vdpa_lm_log(struct mlx5_vdpa_priv *priv)
|
||||
if (!RTE_VHOST_NEED_LOG(features))
|
||||
return 0;
|
||||
for (i = 0; i < priv->nr_virtqs; ++i) {
|
||||
virtq = &priv->virtqs[i];
|
||||
if (!priv->virtqs[i].virtq) {
|
||||
DRV_LOG(DEBUG, "virtq %d is invalid for LM log.", i);
|
||||
} else {
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
ret = mlx5_vdpa_virtq_stop(priv, i);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to stop virtq %d for LM "
|
||||
"log.", i);
|
||||
|
@ -237,19 +237,24 @@ mlx5_vdpa_rss_flows_create(struct mlx5_vdpa_priv *priv)
|
||||
int
|
||||
mlx5_vdpa_steer_update(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
int ret = mlx5_vdpa_rqt_prepare(priv);
|
||||
int ret;
|
||||
|
||||
pthread_mutex_lock(&priv->steer_update_lock);
|
||||
ret = mlx5_vdpa_rqt_prepare(priv);
|
||||
if (ret == 0) {
|
||||
mlx5_vdpa_steer_unset(priv);
|
||||
} else if (ret < 0) {
|
||||
pthread_mutex_unlock(&priv->steer_update_lock);
|
||||
return ret;
|
||||
} else if (!priv->steer.rss[0].flow) {
|
||||
ret = mlx5_vdpa_rss_flows_create(priv);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Cannot create RSS flows.");
|
||||
pthread_mutex_unlock(&priv->steer_update_lock);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
pthread_mutex_unlock(&priv->steer_update_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -24,13 +24,17 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)
|
||||
int nbytes;
|
||||
int retry;
|
||||
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
|
||||
priv->vid, virtq->index);
|
||||
return;
|
||||
}
|
||||
if (rte_intr_fd_get(virtq->intr_handle) < 0)
|
||||
if (rte_intr_fd_get(virtq->intr_handle) < 0) {
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
return;
|
||||
}
|
||||
for (retry = 0; retry < 3; ++retry) {
|
||||
nbytes = read(rte_intr_fd_get(virtq->intr_handle), &buf,
|
||||
8);
|
||||
@ -44,9 +48,14 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (nbytes < 0)
|
||||
if (nbytes < 0) {
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
return;
|
||||
}
|
||||
rte_spinlock_lock(&priv->db_lock);
|
||||
rte_write32(virtq->index, priv->virtq_db_addr);
|
||||
rte_spinlock_unlock(&priv->db_lock);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
if (priv->state != MLX5_VDPA_STATE_CONFIGURED && !virtq->enable) {
|
||||
DRV_LOG(ERR, "device %d queue %d down, skip kick handling",
|
||||
priv->vid, virtq->index);
|
||||
@ -66,6 +75,33 @@ mlx5_vdpa_virtq_kick_handler(void *cb_arg)
|
||||
DRV_LOG(DEBUG, "Ring virtq %u doorbell.", virtq->index);
|
||||
}
|
||||
|
||||
/* Virtq must be locked before calling this function. */
|
||||
static void
|
||||
mlx5_vdpa_virtq_unregister_intr_handle(struct mlx5_vdpa_virtq *virtq)
|
||||
{
|
||||
int ret = -EAGAIN;
|
||||
|
||||
if (!virtq->intr_handle)
|
||||
return;
|
||||
if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
|
||||
while (ret == -EAGAIN) {
|
||||
ret = rte_intr_callback_unregister(virtq->intr_handle,
|
||||
mlx5_vdpa_virtq_kick_handler, virtq);
|
||||
if (ret == -EAGAIN) {
|
||||
DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
|
||||
rte_intr_fd_get(virtq->intr_handle),
|
||||
virtq->index);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
usleep(MLX5_VDPA_INTR_RETRIES_USEC);
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
}
|
||||
}
|
||||
(void)rte_intr_fd_set(virtq->intr_handle, -1);
|
||||
}
|
||||
rte_intr_instance_free(virtq->intr_handle);
|
||||
virtq->intr_handle = NULL;
|
||||
}
|
||||
|
||||
/* Release cached VQ resources. */
|
||||
void
|
||||
mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
|
||||
@ -75,6 +111,7 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
|
||||
for (i = 0; i < priv->caps.max_num_virtio_queues; i++) {
|
||||
struct mlx5_vdpa_virtq *virtq = &priv->virtqs[i];
|
||||
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
virtq->configured = 0;
|
||||
for (j = 0; j < RTE_DIM(virtq->umems); ++j) {
|
||||
if (virtq->umems[j].obj) {
|
||||
@ -90,28 +127,17 @@ mlx5_vdpa_virtqs_cleanup(struct mlx5_vdpa_priv *priv)
|
||||
}
|
||||
if (virtq->eqp.fw_qp)
|
||||
mlx5_vdpa_event_qp_destroy(&virtq->eqp);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
|
||||
{
|
||||
int ret = -EAGAIN;
|
||||
|
||||
if (rte_intr_fd_get(virtq->intr_handle) >= 0) {
|
||||
while (ret == -EAGAIN) {
|
||||
ret = rte_intr_callback_unregister(virtq->intr_handle,
|
||||
mlx5_vdpa_virtq_kick_handler, virtq);
|
||||
if (ret == -EAGAIN) {
|
||||
DRV_LOG(DEBUG, "Try again to unregister fd %d of virtq %hu interrupt",
|
||||
rte_intr_fd_get(virtq->intr_handle),
|
||||
virtq->index);
|
||||
usleep(MLX5_VDPA_INTR_RETRIES_USEC);
|
||||
}
|
||||
}
|
||||
rte_intr_fd_set(virtq->intr_handle, -1);
|
||||
}
|
||||
rte_intr_instance_free(virtq->intr_handle);
|
||||
mlx5_vdpa_virtq_unregister_intr_handle(virtq);
|
||||
if (virtq->configured) {
|
||||
ret = mlx5_vdpa_virtq_stop(virtq->priv, virtq->index);
|
||||
if (ret)
|
||||
@ -128,10 +154,15 @@ mlx5_vdpa_virtq_unset(struct mlx5_vdpa_virtq *virtq)
|
||||
void
|
||||
mlx5_vdpa_virtqs_release(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->nr_virtqs; i++)
|
||||
mlx5_vdpa_virtq_unset(&priv->virtqs[i]);
|
||||
for (i = 0; i < priv->nr_virtqs; i++) {
|
||||
virtq = &priv->virtqs[i];
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
mlx5_vdpa_virtq_unset(virtq);
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
priv->features = 0;
|
||||
priv->nr_virtqs = 0;
|
||||
}
|
||||
@ -250,7 +281,7 @@ mlx5_vdpa_virtq_sub_objs_prepare(struct mlx5_vdpa_priv *priv,
|
||||
MLX5_VIRTQ_EVENT_MODE_QP : MLX5_VIRTQ_EVENT_MODE_NO_MSIX;
|
||||
if (attr->event_mode == MLX5_VIRTQ_EVENT_MODE_QP) {
|
||||
ret = mlx5_vdpa_event_qp_prepare(priv,
|
||||
vq->size, vq->callfd, &virtq->eqp);
|
||||
vq->size, vq->callfd, virtq);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR,
|
||||
"Failed to create event QPs for virtq %d.",
|
||||
@ -420,7 +451,9 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
|
||||
}
|
||||
claim_zero(rte_vhost_enable_guest_notification(priv->vid, index, 1));
|
||||
virtq->configured = 1;
|
||||
rte_spinlock_lock(&priv->db_lock);
|
||||
rte_write32(virtq->index, priv->virtq_db_addr);
|
||||
rte_spinlock_unlock(&priv->db_lock);
|
||||
/* Setup doorbell mapping. */
|
||||
virtq->intr_handle =
|
||||
rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
|
||||
@ -441,7 +474,7 @@ mlx5_vdpa_virtq_setup(struct mlx5_vdpa_priv *priv, int index)
|
||||
if (rte_intr_callback_register(virtq->intr_handle,
|
||||
mlx5_vdpa_virtq_kick_handler,
|
||||
virtq)) {
|
||||
rte_intr_fd_set(virtq->intr_handle, -1);
|
||||
(void)rte_intr_fd_set(virtq->intr_handle, -1);
|
||||
DRV_LOG(ERR, "Failed to register virtq %d interrupt.",
|
||||
index);
|
||||
goto error;
|
||||
@ -537,6 +570,7 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
|
||||
uint32_t i;
|
||||
uint16_t nr_vring = rte_vhost_get_vring_num(priv->vid);
|
||||
int ret = rte_vhost_get_negotiated_features(priv->vid, &priv->features);
|
||||
struct mlx5_vdpa_virtq *virtq;
|
||||
|
||||
if (ret || mlx5_vdpa_features_validate(priv)) {
|
||||
DRV_LOG(ERR, "Failed to configure negotiated features.");
|
||||
@ -556,9 +590,17 @@ mlx5_vdpa_virtqs_prepare(struct mlx5_vdpa_priv *priv)
|
||||
return -1;
|
||||
}
|
||||
priv->nr_virtqs = nr_vring;
|
||||
for (i = 0; i < nr_vring; i++)
|
||||
if (priv->virtqs[i].enable && mlx5_vdpa_virtq_setup(priv, i))
|
||||
goto error;
|
||||
for (i = 0; i < nr_vring; i++) {
|
||||
virtq = &priv->virtqs[i];
|
||||
if (virtq->enable) {
|
||||
pthread_mutex_lock(&virtq->virtq_lock);
|
||||
if (mlx5_vdpa_virtq_setup(priv, i)) {
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
goto error;
|
||||
}
|
||||
pthread_mutex_unlock(&virtq->virtq_lock);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
error:
|
||||
mlx5_vdpa_virtqs_release(priv);
|
||||
|
Loading…
Reference in New Issue
Block a user