Merge commit cde8f4c16 from llvm git (by me):
Move special va_list handling to kmp_os.h Instead of copying and pasting the same #ifdef expressions in multiple places, define a type and a pair of macros in kmp_os.h, to handle whether va_list is pointer-like or not: * kmp_va_list is the type to use for __kmp_fork_call() * kmp_va_deref() dereferences a va_list, if necessary * kmp_va_addr_of() takes the address of a va_list, if necessary Also add FreeBSD to the list of OSes that has a non pointer-like va_list. This can now be easily extended to other OSes too. Reviewed By: AndreyChurbanov Differential Revision: https://reviews.llvm.org/D86397 This should enable building of LLVM's OpenMP on AArch64. Addition to share/mk will follow in a subsequent commit. PR: 248864 MFC after: 2 weeks
This commit is contained in:
parent
6808083af4
commit
d2c85e90f2
@ -3459,13 +3459,7 @@ enum fork_context_e {
|
||||
extern int __kmp_fork_call(ident_t *loc, int gtid,
|
||||
enum fork_context_e fork_context, kmp_int32 argc,
|
||||
microtask_t microtask, launch_t invoker,
|
||||
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
|
||||
#if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
va_list *ap
|
||||
#else
|
||||
va_list ap
|
||||
#endif
|
||||
);
|
||||
kmp_va_list ap);
|
||||
|
||||
extern void __kmp_join_call(ident_t *loc, int gtid
|
||||
#if OMPT_SUPPORT
|
||||
|
@ -308,13 +308,7 @@ void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) {
|
||||
__kmp_fork_call(loc, gtid, fork_context_intel, argc,
|
||||
VOLATILE_CAST(microtask_t) microtask, // "wrapped" task
|
||||
VOLATILE_CAST(launch_t) __kmp_invoke_task_func,
|
||||
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
&ap
|
||||
#else
|
||||
ap
|
||||
#endif
|
||||
);
|
||||
kmp_va_addr_of(ap));
|
||||
#if INCLUDE_SSC_MARKS
|
||||
SSC_MARK_JOINING();
|
||||
#endif
|
||||
@ -408,16 +402,10 @@ void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,
|
||||
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1);
|
||||
KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1);
|
||||
|
||||
__kmp_fork_call(loc, gtid, fork_context_intel, argc,
|
||||
VOLATILE_CAST(microtask_t)
|
||||
__kmp_teams_master, // "wrapped" task
|
||||
VOLATILE_CAST(launch_t) __kmp_invoke_teams_master,
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
&ap
|
||||
#else
|
||||
ap
|
||||
#endif
|
||||
);
|
||||
__kmp_fork_call(
|
||||
loc, gtid, fork_context_intel, argc,
|
||||
VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task
|
||||
VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, kmp_va_addr_of(ap));
|
||||
__kmp_join_call(loc, gtid
|
||||
#if OMPT_SUPPORT
|
||||
,
|
||||
|
@ -376,13 +376,7 @@ static
|
||||
va_start(ap, argc);
|
||||
|
||||
rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper,
|
||||
__kmp_invoke_task_func,
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
&ap
|
||||
#else
|
||||
ap
|
||||
#endif
|
||||
);
|
||||
__kmp_invoke_task_func, kmp_va_addr_of(ap));
|
||||
|
||||
va_end(ap);
|
||||
|
||||
|
@ -200,6 +200,18 @@ typedef kmp_uint32 kmp_uint;
|
||||
#define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
|
||||
#define KMP_INT_MIN ((kmp_int32)0x80000000)
|
||||
|
||||
// stdarg handling
|
||||
#if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && \
|
||||
(KMP_OS_FREEBSD || KMP_OS_LINUX)
|
||||
typedef va_list *kmp_va_list;
|
||||
#define kmp_va_deref(ap) (*(ap))
|
||||
#define kmp_va_addr_of(ap) (&(ap))
|
||||
#else
|
||||
typedef va_list kmp_va_list;
|
||||
#define kmp_va_deref(ap) (ap)
|
||||
#define kmp_va_addr_of(ap) (ap)
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
// macros to cast out qualifiers and to re-interpret types
|
||||
#define CCAST(type, var) const_cast<type>(var)
|
||||
|
@ -1389,13 +1389,7 @@ void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
|
||||
int __kmp_fork_call(ident_t *loc, int gtid,
|
||||
enum fork_context_e call_context, // Intel, GNU, ...
|
||||
kmp_int32 argc, microtask_t microtask, launch_t invoker,
|
||||
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
va_list *ap
|
||||
#else
|
||||
va_list ap
|
||||
#endif
|
||||
) {
|
||||
kmp_va_list ap) {
|
||||
void **argv;
|
||||
int i;
|
||||
int master_tid;
|
||||
@ -1505,12 +1499,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
||||
parent_team->t.t_argc = argc;
|
||||
argv = (void **)parent_team->t.t_argv;
|
||||
for (i = argc - 1; i >= 0; --i)
|
||||
/* TODO: revert workaround for Intel(R) 64 tracker #96 */
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
*argv++ = va_arg(*ap, void *);
|
||||
#else
|
||||
*argv++ = va_arg(ap, void *);
|
||||
#endif
|
||||
*argv++ = va_arg(kmp_va_deref(ap), void *);
|
||||
// Increment our nested depth levels, but not increase the serialization
|
||||
if (parent_team == master_th->th.th_serial_team) {
|
||||
// AC: we are in serialized parallel
|
||||
@ -1804,12 +1793,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
||||
argv = (void **)team->t.t_argv;
|
||||
if (ap) {
|
||||
for (i = argc - 1; i >= 0; --i)
|
||||
// TODO: revert workaround for Intel(R) 64 tracker #96
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
*argv++ = va_arg(*ap, void *);
|
||||
#else
|
||||
*argv++ = va_arg(ap, void *);
|
||||
#endif
|
||||
*argv++ = va_arg(kmp_va_deref(ap), void *);
|
||||
} else {
|
||||
for (i = 0; i < argc; ++i)
|
||||
// Get args from parent team for teams construct
|
||||
@ -1840,12 +1824,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
||||
} else {
|
||||
argv = args;
|
||||
for (i = argc - 1; i >= 0; --i)
|
||||
// TODO: revert workaround for Intel(R) 64 tracker #96
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
*argv++ = va_arg(*ap, void *);
|
||||
#else
|
||||
*argv++ = va_arg(ap, void *);
|
||||
#endif
|
||||
*argv++ = va_arg(kmp_va_deref(ap), void *);
|
||||
KMP_MB();
|
||||
|
||||
#if OMPT_SUPPORT
|
||||
@ -2130,12 +2109,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
||||
argv = (void **)team->t.t_argv;
|
||||
if (ap) {
|
||||
for (i = argc - 1; i >= 0; --i) {
|
||||
// TODO: revert workaround for Intel(R) 64 tracker #96
|
||||
#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
|
||||
void *new_argv = va_arg(*ap, void *);
|
||||
#else
|
||||
void *new_argv = va_arg(ap, void *);
|
||||
#endif
|
||||
void *new_argv = va_arg(kmp_va_deref(ap), void *);
|
||||
KMP_CHECK_UPDATE(*argv, new_argv);
|
||||
argv++;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user