vdpa/mlx5: add CPU core parameter to bind polling thread
This patch adds new device argument to specify cpu core affinity to event polling thread for better latency and throughput. The thread could be also located by name "vDPA-mlx5-<id>". Signed-off-by: Xueming Li <xuemingl@nvidia.com> Acked-by: Matan Azrad <matan@nvidia.com> Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
This commit is contained in:
parent
c9a189f4ea
commit
5cf3fd3af4
@ -134,6 +134,11 @@ Driver options
|
||||
interrupts are configured to the device in order to notify traffic for the
|
||||
driver. Default value is 2s.
|
||||
|
||||
- ``event_core`` parameter [int]
|
||||
|
||||
CPU core number to set polling thread affinity to, default to control plane
|
||||
cpu.
|
||||
|
||||
Error handling
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -612,6 +612,7 @@ mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
|
||||
{
|
||||
struct mlx5_vdpa_priv *priv = opaque;
|
||||
unsigned long tmp;
|
||||
int n_cores = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
|
||||
if (strcmp(key, "class") == 0)
|
||||
return 0;
|
||||
@ -630,6 +631,11 @@ mlx5_vdpa_args_check_handler(const char *key, const char *val, void *opaque)
|
||||
priv->event_us = (uint32_t)tmp;
|
||||
} else if (strcmp(key, "no_traffic_time") == 0) {
|
||||
priv->no_traffic_time_s = (uint32_t)tmp;
|
||||
} else if (strcmp(key, "event_core") == 0) {
|
||||
if (tmp >= (unsigned long)n_cores)
|
||||
DRV_LOG(WARNING, "Invalid event_core %s.", val);
|
||||
else
|
||||
priv->event_core = tmp;
|
||||
} else {
|
||||
DRV_LOG(WARNING, "Invalid key %s.", key);
|
||||
}
|
||||
@ -643,6 +649,7 @@ mlx5_vdpa_config_get(struct rte_devargs *devargs, struct mlx5_vdpa_priv *priv)
|
||||
|
||||
priv->event_mode = MLX5_VDPA_EVENT_MODE_DYNAMIC_TIMER;
|
||||
priv->event_us = 0;
|
||||
priv->event_core = -1;
|
||||
priv->no_traffic_time_s = MLX5_VDPA_DEFAULT_NO_TRAFFIC_TIME_S;
|
||||
if (devargs == NULL)
|
||||
return;
|
||||
|
@ -131,6 +131,7 @@ struct mlx5_vdpa_priv {
|
||||
pthread_cond_t timer_cond;
|
||||
volatile uint8_t timer_on;
|
||||
int event_mode;
|
||||
int event_core; /* Event thread cpu affinity core. */
|
||||
uint32_t event_us;
|
||||
uint32_t timer_delay_us;
|
||||
uint32_t no_traffic_time_s;
|
||||
|
@ -533,6 +533,9 @@ int
|
||||
mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
rte_cpuset_t cpuset;
|
||||
pthread_attr_t attr;
|
||||
char name[16];
|
||||
|
||||
if (!priv->eventc)
|
||||
/* All virtqs are in poll mode. */
|
||||
@ -541,12 +544,30 @@ mlx5_vdpa_cqe_event_setup(struct mlx5_vdpa_priv *priv)
|
||||
pthread_mutex_init(&priv->timer_lock, NULL);
|
||||
pthread_cond_init(&priv->timer_cond, NULL);
|
||||
priv->timer_on = 0;
|
||||
ret = pthread_create(&priv->timer_tid, NULL,
|
||||
pthread_attr_init(&attr);
|
||||
CPU_ZERO(&cpuset);
|
||||
if (priv->event_core != -1)
|
||||
CPU_SET(priv->event_core, &cpuset);
|
||||
else
|
||||
cpuset = rte_lcore_cpuset(rte_get_main_lcore());
|
||||
ret = pthread_attr_setaffinity_np(&attr, sizeof(cpuset),
|
||||
&cpuset);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to set thread affinity.");
|
||||
return -1;
|
||||
}
|
||||
ret = pthread_create(&priv->timer_tid, &attr,
|
||||
mlx5_vdpa_poll_handle, (void *)priv);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to create timer thread.");
|
||||
return -1;
|
||||
}
|
||||
snprintf(name, sizeof(name), "vDPA-mlx5-%d", priv->vid);
|
||||
ret = pthread_setname_np(priv->timer_tid, name);
|
||||
if (ret) {
|
||||
DRV_LOG(ERR, "Failed to set timer thread name.");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
priv->intr_handle.fd = priv->eventc->fd;
|
||||
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
|
||||
|
Loading…
x
Reference in New Issue
Block a user