vdpa/mlx5: fix polling threads scheduling
When the event mode is with 0 fixed delay, the polling-thread will never give-up CPU. So, when multi-polling-threads are active, the context-switch between them will be managed by the system which may affect latency according to the time-out decided by the system. In order to fix multi-devices polling thread scheduling, this patch forces rescheduling for each CQ poll iteration. Move the polling thread to SCHED_RR mode with maximum priority to complete the fairness. Fixes: 6956a48cabbb ("vdpa/mlx5: set polling mode default delay to zero") Signed-off-by: Matan Azrad <matan@nvidia.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com> Acked-by: Xueming Li <xuemingl@nvidia.com>
This commit is contained in:
parent
894028ace2
commit
b7fa0bf4d5
@ -232,6 +232,9 @@ mlx5_vdpa_timer_sleep(struct mlx5_vdpa_priv *priv, uint32_t max)
|
||||
}
|
||||
if (priv->timer_delay_us)
|
||||
usleep(priv->timer_delay_us);
|
||||
else
|
||||
/* Give-up CPU to improve polling threads scheduling. */
|
||||
pthread_yield();
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -500,6 +503,9 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
|
||||
rte_cpuset_t cpuset;
|
||||
pthread_attr_t attr;
|
||||
char name[16];
|
||||
const struct sched_param sp = {
|
||||
.sched_priority = sched_get_priority_max(SCHED_RR),
|
||||
};
|
||||
|
||||
if (!priv->eventc)
|
||||
/* All virtqs are in poll mode. */
|
||||
@ -520,6 +526,16 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
|
||||
DRV_LOG(ERR, "Failed to set thread affinity.");
|
||||
return -1;
|
||||
}
|
||||
ret = pthread_attr_setschedpolicy(&attr, SCHED_RR);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to set thread sched policy = RR.");
|
||||
return -1;
|
||||
}
|
||||
ret = pthread_attr_setschedparam(&attr, &sp);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to set thread priority.");
|
||||
return -1;
|
||||
}
|
||||
ret = pthread_create(&priv->timer_tid, &attr,
|
||||
mlx5_vdpa_poll_handle, (void *)priv);
|
||||
if (ret) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user