eal: add thread lifetime API

The *rte_thread_create()* function can optionally receive an
rte_thread_attr_t object that will cause the thread to be created with
the affinity and priority described by the attributes object. If
no rte_thread_attr_t is passed (parameter is NULL), the default
affinity and priority are used.

On Windows, the function executed by a thread when the thread starts is
represented by a function pointer of type DWORD (*func) (void*).
On other platforms, the function pointer is a void* (*func) (void*).

Performing a cast between these two types of function pointers to
uniformize the API on all platforms may result in undefined behavior.
To fix this issue, a wrapper that respects the signature required by
CreateThread() has been created on Windows.

Signed-off-by: Narcisa Vasile <navasile@linux.microsoft.com>
Signed-off-by: Tyler Retzlaff <roretzla@linux.microsoft.com>
Acked-by: Dmitry Kozlyuk <dmitry.kozliuk@gmail.com>
This commit is contained in:
Tyler Retzlaff 2022-10-05 10:07:27 -07:00 committed by David Marchand
parent 51e6608aa6
commit ce6e911d20
5 changed files with 387 additions and 41 deletions

View File

@ -30,6 +30,18 @@ typedef struct {
uintptr_t opaque_id; /**< thread identifier */
} rte_thread_t;
/**
* Thread function
*
* Function pointer to thread start routine.
*
* @param arg
* Argument passed to rte_thread_create().
* @return
* Thread function exit value.
*/
typedef uint32_t (*rte_thread_func) (void *arg);
/**
* Thread priority values.
*/
@ -55,6 +67,69 @@ typedef struct {
*/
typedef struct eal_tls_key *rte_thread_key;
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
* Create a new thread that will invoke the 'thread_func' routine.
*
* @param thread_id
* A pointer that will store the id of the newly created thread.
*
* @param thread_attr
* Attributes that are used at the creation of the new thread.
*
* @param thread_func
* The routine that the new thread will invoke when starting execution.
*
* @param arg
* Argument to be passed to the 'thread_func' routine.
*
* @return
* On success, return 0.
* On failure, return a positive errno-style error number.
*/
__rte_experimental
int rte_thread_create(rte_thread_t *thread_id,
const rte_thread_attr_t *thread_attr,
rte_thread_func thread_func, void *arg);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
* Waits for the thread identified by 'thread_id' to terminate
*
* @param thread_id
* The identifier of the thread.
*
* @param value_ptr
* Stores the exit status of the thread.
*
* @return
* On success, return 0.
* On failure, return a positive errno-style error number.
*/
__rte_experimental
int rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
* Indicate that the return value of the thread is not needed and
* all thread resources should be release when the thread terminates.
*
* @param thread_id
* The id of the thread to be detached.
*
* @return
* On success, return 0.
* On failure, return a positive errno-style error number.
*/
__rte_experimental
int rte_thread_detach(rte_thread_t thread_id);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.

View File

@ -16,6 +16,11 @@ struct eal_tls_key {
pthread_key_t thread_index;
};
struct thread_routine_ctx {
rte_thread_func thread_func;
void *routine_args;
};
static int
thread_map_priority_to_os_value(enum rte_thread_priority eal_pri, int *os_pri,
int *pol)
@ -75,6 +80,136 @@ thread_map_os_priority_to_eal_priority(int policy, int os_pri,
return 0;
}
static void *
thread_func_wrapper(void *arg)
{
struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
free(arg);
return (void *)(uintptr_t)ctx.thread_func(ctx.routine_args);
}
int
rte_thread_create(rte_thread_t *thread_id,
const rte_thread_attr_t *thread_attr,
rte_thread_func thread_func, void *args)
{
int ret = 0;
pthread_attr_t attr;
pthread_attr_t *attrp = NULL;
struct thread_routine_ctx *ctx;
struct sched_param param = {
.sched_priority = 0,
};
int policy = SCHED_OTHER;
ctx = calloc(1, sizeof(*ctx));
if (ctx == NULL) {
RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n");
ret = ENOMEM;
goto cleanup;
}
ctx->routine_args = args;
ctx->thread_func = thread_func;
if (thread_attr != NULL) {
ret = pthread_attr_init(&attr);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "pthread_attr_init failed\n");
goto cleanup;
}
attrp = &attr;
/*
* Set the inherit scheduler parameter to explicit,
* otherwise the priority attribute is ignored.
*/
ret = pthread_attr_setinheritsched(attrp,
PTHREAD_EXPLICIT_SCHED);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "pthread_attr_setinheritsched failed\n");
goto cleanup;
}
if (thread_attr->priority ==
RTE_THREAD_PRIORITY_REALTIME_CRITICAL) {
ret = ENOTSUP;
goto cleanup;
}
ret = thread_map_priority_to_os_value(thread_attr->priority,
&param.sched_priority, &policy);
if (ret != 0)
goto cleanup;
ret = pthread_attr_setschedpolicy(attrp, policy);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "pthread_attr_setschedpolicy failed\n");
goto cleanup;
}
ret = pthread_attr_setschedparam(attrp, &param);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "pthread_attr_setschedparam failed\n");
goto cleanup;
}
}
ret = pthread_create((pthread_t *)&thread_id->opaque_id, attrp,
thread_func_wrapper, ctx);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "pthread_create failed\n");
goto cleanup;
}
if (thread_attr != NULL && CPU_COUNT(&thread_attr->cpuset) > 0) {
ret = rte_thread_set_affinity_by_id(*thread_id,
&thread_attr->cpuset);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "rte_thread_set_affinity_by_id failed\n");
goto cleanup;
}
}
ctx = NULL;
cleanup:
free(ctx);
if (attrp != NULL)
pthread_attr_destroy(&attr);
return ret;
}
int
rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr)
{
int ret = 0;
void *res = (void *)(uintptr_t)0;
void **pres = NULL;
if (value_ptr != NULL)
pres = &res;
ret = pthread_join((pthread_t)thread_id.opaque_id, pres);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "pthread_join failed\n");
return ret;
}
if (value_ptr != NULL)
*value_ptr = (uint32_t)(uintptr_t)res;
return 0;
}
int
rte_thread_detach(rte_thread_t thread_id)
{
return pthread_detach((pthread_t)thread_id.opaque_id);
}
rte_thread_t
rte_thread_self(void)
{

View File

@ -436,6 +436,9 @@ EXPERIMENTAL {
rte_thread_attr_init;
rte_thread_attr_set_affinity;
rte_thread_attr_set_priority;
rte_thread_create;
rte_thread_detach;
rte_thread_join;
};
INTERNAL {

View File

@ -44,7 +44,7 @@ typedef struct _rte_cpuset_s {
(1LL << _WHICH_BIT(b))) != 0LL)
static inline int
count_cpu(rte_cpuset_t *s)
count_cpu(const rte_cpuset_t *s)
{
unsigned int _i;
int count = 0;

View File

@ -15,6 +15,11 @@ struct eal_tls_key {
DWORD thread_index;
};
struct thread_routine_ctx {
rte_thread_func thread_func;
void *routine_args;
};
/* Translates the most common error codes related to threads */
static int
thread_translate_win32_error(DWORD error)
@ -116,6 +121,174 @@ thread_map_os_priority_to_eal_value(int os_pri, DWORD pri_class,
return 0;
}
static int
convert_cpuset_to_affinity(const rte_cpuset_t *cpuset,
PGROUP_AFFINITY affinity)
{
int ret = 0;
PGROUP_AFFINITY cpu_affinity = NULL;
unsigned int cpu_idx;
memset(affinity, 0, sizeof(GROUP_AFFINITY));
affinity->Group = (USHORT)-1;
/* Check that all cpus of the set belong to the same processor group and
* accumulate thread affinity to be applied.
*/
for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
if (!CPU_ISSET(cpu_idx, cpuset))
continue;
cpu_affinity = eal_get_cpu_affinity(cpu_idx);
if (affinity->Group == (USHORT)-1) {
affinity->Group = cpu_affinity->Group;
} else if (affinity->Group != cpu_affinity->Group) {
RTE_LOG(DEBUG, EAL, "All processors must belong to the same processor group\n");
ret = ENOTSUP;
goto cleanup;
}
affinity->Mask |= cpu_affinity->Mask;
}
if (affinity->Mask == 0) {
ret = EINVAL;
goto cleanup;
}
cleanup:
return ret;
}
static DWORD
thread_func_wrapper(void *arg)
{
struct thread_routine_ctx ctx = *(struct thread_routine_ctx *)arg;
free(arg);
return (DWORD)ctx.thread_func(ctx.routine_args);
}
int
rte_thread_create(rte_thread_t *thread_id,
const rte_thread_attr_t *thread_attr,
rte_thread_func thread_func, void *args)
{
int ret = 0;
DWORD tid;
HANDLE thread_handle = NULL;
GROUP_AFFINITY thread_affinity;
struct thread_routine_ctx *ctx;
ctx = calloc(1, sizeof(*ctx));
if (ctx == NULL) {
RTE_LOG(DEBUG, EAL, "Insufficient memory for thread context allocations\n");
ret = ENOMEM;
goto cleanup;
}
ctx->routine_args = args;
ctx->thread_func = thread_func;
thread_handle = CreateThread(NULL, 0, thread_func_wrapper, ctx,
CREATE_SUSPENDED, &tid);
if (thread_handle == NULL) {
ret = thread_log_last_error("CreateThread()");
goto cleanup;
}
thread_id->opaque_id = tid;
if (thread_attr != NULL) {
if (CPU_COUNT(&thread_attr->cpuset) > 0) {
ret = convert_cpuset_to_affinity(
&thread_attr->cpuset,
&thread_affinity
);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "Unable to convert cpuset to thread affinity\n");
goto cleanup;
}
if (!SetThreadGroupAffinity(thread_handle,
&thread_affinity, NULL)) {
ret = thread_log_last_error("SetThreadGroupAffinity()");
goto cleanup;
}
}
ret = rte_thread_set_priority(*thread_id,
thread_attr->priority);
if (ret != 0) {
RTE_LOG(DEBUG, EAL, "Unable to set thread priority\n");
goto cleanup;
}
}
if (ResumeThread(thread_handle) == (DWORD)-1) {
ret = thread_log_last_error("ResumeThread()");
goto cleanup;
}
ctx = NULL;
cleanup:
free(ctx);
if (thread_handle != NULL) {
CloseHandle(thread_handle);
thread_handle = NULL;
}
return ret;
}
int
rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr)
{
HANDLE thread_handle;
DWORD result;
DWORD exit_code = 0;
BOOL err;
int ret = 0;
thread_handle = OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
FALSE, thread_id.opaque_id);
if (thread_handle == NULL) {
ret = thread_log_last_error("OpenThread()");
goto cleanup;
}
result = WaitForSingleObject(thread_handle, INFINITE);
if (result != WAIT_OBJECT_0) {
ret = thread_log_last_error("WaitForSingleObject()");
goto cleanup;
}
if (value_ptr != NULL) {
err = GetExitCodeThread(thread_handle, &exit_code);
if (err == 0) {
ret = thread_log_last_error("GetExitCodeThread()");
goto cleanup;
}
*value_ptr = exit_code;
}
cleanup:
if (thread_handle != NULL) {
CloseHandle(thread_handle);
thread_handle = NULL;
}
return ret;
}
int
rte_thread_detach(rte_thread_t thread_id)
{
/* No resources that need to be released. */
RTE_SET_USED(thread_id);
return 0;
}
rte_thread_t
rte_thread_self(void)
{
@ -280,46 +453,6 @@ rte_thread_value_get(rte_thread_key key)
return output;
}
static int
convert_cpuset_to_affinity(const rte_cpuset_t *cpuset,
PGROUP_AFFINITY affinity)
{
int ret = 0;
PGROUP_AFFINITY cpu_affinity = NULL;
unsigned int cpu_idx;
memset(affinity, 0, sizeof(GROUP_AFFINITY));
affinity->Group = (USHORT)-1;
/* Check that all cpus of the set belong to the same processor group and
* accumulate thread affinity to be applied.
*/
for (cpu_idx = 0; cpu_idx < CPU_SETSIZE; cpu_idx++) {
if (!CPU_ISSET(cpu_idx, cpuset))
continue;
cpu_affinity = eal_get_cpu_affinity(cpu_idx);
if (affinity->Group == (USHORT)-1) {
affinity->Group = cpu_affinity->Group;
} else if (affinity->Group != cpu_affinity->Group) {
RTE_LOG(DEBUG, EAL, "All processors must belong to the same processor group\n");
ret = ENOTSUP;
goto cleanup;
}
affinity->Mask |= cpu_affinity->Mask;
}
if (affinity->Mask == 0) {
ret = EINVAL;
goto cleanup;
}
cleanup:
return ret;
}
int
rte_thread_set_affinity_by_id(rte_thread_t thread_id,
const rte_cpuset_t *cpuset)