Merge llvm, clang, lld, lldb, compiler-rt and libc++ r304149, and update

build glue.
This commit is contained in:
Dimitry Andric 2017-05-29 22:09:23 +00:00
commit 302affcb04
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang500-import/; revision=319164
578 changed files with 14494 additions and 9864 deletions

View File

@ -357,28 +357,22 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
#if SANITIZER_ANDROID #if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) { INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
if (!IsHandledDeadlySignal(signum) || if (GetHandleSignalMode(signum) != kHandleSignalExclusive)
common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler); return REAL(bsd_signal)(signum, handler);
}
return 0; return 0;
} }
#endif #endif
INTERCEPTOR(void*, signal, int signum, void *handler) { INTERCEPTOR(void*, signal, int signum, void *handler) {
if (!IsHandledDeadlySignal(signum) || if (GetHandleSignalMode(signum) != kHandleSignalExclusive)
common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler); return REAL(signal)(signum, handler);
}
return nullptr; return nullptr;
} }
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) { struct sigaction *oldact) {
if (!IsHandledDeadlySignal(signum) || if (GetHandleSignalMode(signum) != kHandleSignalExclusive)
common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact); return REAL(sigaction)(signum, act, oldact);
}
return 0; return 0;
} }

View File

@ -80,7 +80,7 @@ static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter, INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter,
LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) { LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) {
CHECK(REAL(SetUnhandledExceptionFilter)); CHECK(REAL(SetUnhandledExceptionFilter));
if (ExceptionFilter == &SEHHandler || common_flags()->allow_user_segv_handler) if (ExceptionFilter == &SEHHandler)
return REAL(SetUnhandledExceptionFilter)(ExceptionFilter); return REAL(SetUnhandledExceptionFilter)(ExceptionFilter);
// We record the user provided exception handler to be called for all the // We record the user provided exception handler to be called for all the
// exceptions unhandled by asan. // exceptions unhandled by asan.

View File

@ -57,8 +57,8 @@ si_int __popcountsi2(si_int a); // bit population
si_int __popcountdi2(di_int a); // bit population si_int __popcountdi2(di_int a); // bit population
si_int __popcountti2(ti_int a); // bit population si_int __popcountti2(ti_int a); // bit population
uint32_t __bswapsi2(uint32_t a); // a byteswapped, arm/mips only uint32_t __bswapsi2(uint32_t a); // a byteswapped
uint64_t __bswapdi2(uint64_t a); // a byteswapped, arm/mips only uint64_t __bswapdi2(uint64_t a); // a byteswapped
// Integral arithmetic // Integral arithmetic

View File

@ -48,7 +48,12 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
// NaN has been ruled out, so __aeabi_cdcmple can't trap // NaN has been ruled out, so __aeabi_cdcmple can't trap
bne __aeabi_cdcmple bne __aeabi_cdcmple
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
msr CPSR_f, #APSR_C msr CPSR_f, #APSR_C
#endif
JMP(lr) JMP(lr)
#endif #endif
END_COMPILERRT_FUNCTION(__aeabi_cdcmpeq) END_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
@ -95,17 +100,23 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
lsls r0, r0, #31 lsls r0, r0, #31
pop {r0-r3, pc} pop {r0-r3, pc}
#else #else
ITT(eq)
moveq ip, #0 moveq ip, #0
beq 1f beq 1f
ldm sp, {r0-r3} ldm sp, {r0-r3}
bl __aeabi_dcmpeq bl __aeabi_dcmpeq
cmp r0, #1 cmp r0, #1
ITE(eq)
moveq ip, #(APSR_C | APSR_Z) moveq ip, #(APSR_C | APSR_Z)
movne ip, #(APSR_C) movne ip, #(APSR_C)
1: 1:
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
msr APSR_nzcvq, ip
#else
msr CPSR_f, ip msr CPSR_f, ip
#endif
pop {r0-r3} pop {r0-r3}
POP_PC() POP_PC()
#endif #endif

View File

@ -48,7 +48,12 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
// NaN has been ruled out, so __aeabi_cfcmple can't trap // NaN has been ruled out, so __aeabi_cfcmple can't trap
bne __aeabi_cfcmple bne __aeabi_cfcmple
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
msr CPSR_f, #APSR_C msr CPSR_f, #APSR_C
#endif
JMP(lr) JMP(lr)
#endif #endif
END_COMPILERRT_FUNCTION(__aeabi_cfcmpeq) END_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
@ -95,17 +100,23 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
lsls r0, r0, #31 lsls r0, r0, #31
pop {r0-r3, pc} pop {r0-r3, pc}
#else #else
ITT(eq)
moveq ip, #0 moveq ip, #0
beq 1f beq 1f
ldm sp, {r0-r3} ldm sp, {r0-r3}
bl __aeabi_fcmpeq bl __aeabi_fcmpeq
cmp r0, #1 cmp r0, #1
ITE(eq)
moveq ip, #(APSR_C | APSR_Z) moveq ip, #(APSR_C | APSR_Z)
movne ip, #(APSR_C) movne ip, #(APSR_C)
1: 1:
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
msr APSR_nzcvq, ip
#else
msr CPSR_f, ip msr CPSR_f, ip
#endif
pop {r0-r3} pop {r0-r3}
POP_PC() POP_PC()
#endif #endif

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__eqdf2vfp)
vcmp.f64 d6, d7 vcmp.f64 d6, d7
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(eq)
moveq r0, #1 // set result register to 1 if equal moveq r0, #1 // set result register to 1 if equal
movne r0, #0 movne r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__eqsf2vfp)
vcmp.f32 s14, s15 vcmp.f32 s14, s15
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(eq)
moveq r0, #1 // set result register to 1 if equal moveq r0, #1 // set result register to 1 if equal
movne r0, #0 movne r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__gedf2vfp)
vcmp.f64 d6, d7 vcmp.f64 d6, d7
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(ge)
movge r0, #1 // set result register to 1 if greater than or equal movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0 movlt r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__gesf2vfp)
vcmp.f32 s14, s15 vcmp.f32 s14, s15
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(ge)
movge r0, #1 // set result register to 1 if greater than or equal movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0 movlt r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__gtdf2vfp)
vcmp.f64 d6, d7 vcmp.f64 d6, d7
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(gt)
movgt r0, #1 // set result register to 1 if equal movgt r0, #1 // set result register to 1 if equal
movle r0, #0 movle r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__gtsf2vfp)
vcmp.f32 s14, s15 vcmp.f32 s14, s15
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(gt)
movgt r0, #1 // set result register to 1 if equal movgt r0, #1 // set result register to 1 if equal
movle r0, #0 movle r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__ledf2vfp)
vcmp.f64 d6, d7 vcmp.f64 d6, d7
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(ls)
movls r0, #1 // set result register to 1 if equal movls r0, #1 // set result register to 1 if equal
movhi r0, #0 movhi r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__lesf2vfp)
vcmp.f32 s14, s15 vcmp.f32 s14, s15
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(ls)
movls r0, #1 // set result register to 1 if equal movls r0, #1 // set result register to 1 if equal
movhi r0, #0 movhi r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__ltdf2vfp)
vcmp.f64 d6, d7 vcmp.f64 d6, d7
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(mi)
movmi r0, #1 // set result register to 1 if equal movmi r0, #1 // set result register to 1 if equal
movpl r0, #0 movpl r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__ltsf2vfp)
vcmp.f32 s14, s15 vcmp.f32 s14, s15
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(mi)
movmi r0, #1 // set result register to 1 if equal movmi r0, #1 // set result register to 1 if equal
movpl r0, #0 movpl r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__nedf2vfp)
vcmp.f64 d6, d7 vcmp.f64 d6, d7
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(ne)
movne r0, #1 // set result register to 0 if unequal movne r0, #1 // set result register to 0 if unequal
moveq r0, #0 moveq r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__nesf2vfp)
vcmp.f32 s14, s15 vcmp.f32 s14, s15
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(ne)
movne r0, #1 // set result register to 1 if unequal movne r0, #1 // set result register to 1 if unequal
moveq r0, #0 moveq r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__unorddf2vfp)
vcmp.f64 d6, d7 vcmp.f64 d6, d7
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(vs)
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs) movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0 movvc r0, #0
bx lr bx lr

View File

@ -27,6 +27,7 @@ DEFINE_COMPILERRT_FUNCTION(__unordsf2vfp)
vcmp.f32 s14, s15 vcmp.f32 s14, s15
#endif #endif
vmrs apsr_nzcv, fpscr vmrs apsr_nzcv, fpscr
ITE(vs)
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs) movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0 movvc r0, #0
bx lr bx lr

View File

@ -115,10 +115,12 @@
#if defined(USE_THUMB_2) #if defined(USE_THUMB_2)
#define IT(cond) it cond #define IT(cond) it cond
#define ITT(cond) itt cond #define ITT(cond) itt cond
#define ITE(cond) ite cond
#define WIDE(op) op.w #define WIDE(op) op.w
#else #else
#define IT(cond) #define IT(cond)
#define ITT(cond) #define ITT(cond)
#define ITE(cond)
#define WIDE(op) op #define WIDE(op) op
#endif #endif
#endif /* defined(__arm__) */ #endif /* defined(__arm__) */

View File

@ -14,15 +14,14 @@
#include "int_lib.h" #include "int_lib.h"
COMPILER_RT_ABI uint64_t COMPILER_RT_ABI uint64_t __bswapdi2(uint64_t u) {
__bswapdi2 (uint64_t u) return (
{ (((u)&0xff00000000000000ULL) >> 56) |
return ((((u) & 0xff00000000000000ULL) >> 56) (((u)&0x00ff000000000000ULL) >> 40) |
| (((u) & 0x00ff000000000000ULL) >> 40) (((u)&0x0000ff0000000000ULL) >> 24) |
| (((u) & 0x0000ff0000000000ULL) >> 24) (((u)&0x000000ff00000000ULL) >> 8) |
| (((u) & 0x000000ff00000000ULL) >> 8) (((u)&0x00000000ff000000ULL) << 8) |
| (((u) & 0x00000000ff000000ULL) << 8) (((u)&0x0000000000ff0000ULL) << 24) |
| (((u) & 0x0000000000ff0000ULL) << 24) (((u)&0x000000000000ff00ULL) << 40) |
| (((u) & 0x000000000000ff00ULL) << 40) (((u)&0x00000000000000ffULL) << 56));
| (((u) & 0x00000000000000ffULL) << 56));
} }

View File

@ -14,12 +14,10 @@
#include "int_lib.h" #include "int_lib.h"
COMPILER_RT_ABI uint32_t COMPILER_RT_ABI uint32_t __bswapsi2(uint32_t u) {
__bswapsi2 (uint32_t u) return (
{ (((u)&0xff000000) >> 24) |
(((u)&0x00ff0000) >> 8) |
return ((((u) & 0xff000000) >> 24) (((u)&0x0000ff00) << 8) |
| (((u) & 0x00ff0000) >> 8) (((u)&0x000000ff) << 24));
| (((u) & 0x0000ff00) << 8)
| (((u) & 0x000000ff) << 24));
} }

View File

@ -265,19 +265,21 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
} }
if (flags()->use_tls) { if (flags()->use_tls) {
LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end); if (tls_begin) {
if (cache_begin == cache_end) { LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); // If the tls and cache ranges don't overlap, scan full tls range,
} else { // otherwise, only scan the non-overlapping portions
// Because LSan should not be loaded with dlopen(), we can assume if (cache_begin == cache_end || tls_end < cache_begin ||
// that allocator cache will be part of static TLS image. tls_begin > cache_end) {
CHECK_LE(tls_begin, cache_begin); ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
CHECK_GE(tls_end, cache_end); } else {
if (tls_begin < cache_begin) if (tls_begin < cache_begin)
ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
kReachable); kReachable);
if (tls_end > cache_end) if (tls_end > cache_end)
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable); ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
kReachable);
}
} }
if (dtls && !DTLSInDestruction(dtls)) { if (dtls && !DTLSInDestruction(dtls)) {
for (uptr j = 0; j < dtls->dtv_size; ++j) { for (uptr j = 0; j < dtls->dtv_size; ++j) {

View File

@ -91,12 +91,7 @@ LoadedModule *GetLinker() { return nullptr; }
// Required on Linux for initialization of TLS behavior, but should not be // Required on Linux for initialization of TLS behavior, but should not be
// required on Darwin. // required on Darwin.
void InitializePlatformSpecificModules() { void InitializePlatformSpecificModules() {}
if (flags()->use_tls) {
Report("use_tls=1 is not supported on Darwin.\n");
Die();
}
}
// Scans global variables for heap pointers. // Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) { void ProcessGlobalRegions(Frontier *frontier) {

View File

@ -30,7 +30,7 @@ LSAN_FLAG(bool, use_globals, true,
"Root set: include global variables (.data and .bss)") "Root set: include global variables (.data and .bss)")
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks") LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers") LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
LSAN_FLAG(bool, use_tls, !SANITIZER_MAC, LSAN_FLAG(bool, use_tls, true,
"Root set: include TLS and thread-specific storage") "Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true, LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().") "Root set: include regions added via __lsan_register_root_region().")

View File

@ -319,5 +319,3 @@ class SizeClassAllocator32 {
ByteMap possible_regions; ByteMap possible_regions;
SizeClassInfo size_class_info_array[kNumClasses]; SizeClassInfo size_class_info_array[kNumClasses];
}; };

View File

@ -380,7 +380,7 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
// Functions related to signal handling. // Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *); typedef void (*SignalHandlerType)(int, void *, void *);
bool IsHandledDeadlySignal(int signum); HandleSignalMode GetHandleSignalMode(int signum);
void InstallDeadlySignalHandlers(SignalHandlerType handler); void InstallDeadlySignalHandlers(SignalHandlerType handler);
const char *DescribeSignalOrException(int signo); const char *DescribeSignalOrException(int signo);
// Alternative signal stack (POSIX-only). // Alternative signal stack (POSIX-only).

View File

@ -64,6 +64,11 @@ inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
*t_ = b ? kHandleSignalYes : kHandleSignalNo; *t_ = b ? kHandleSignalYes : kHandleSignalNo;
return true; return true;
} }
if (internal_strcmp(value, "2") == 0 ||
internal_strcmp(value, "exclusive") == 0) {
*t_ = kHandleSignalExclusive;
return true;
}
Printf("ERROR: Invalid value for signal handler option: '%s'\n", value); Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
return false; return false;
} }

View File

@ -21,6 +21,7 @@ namespace __sanitizer {
enum HandleSignalMode { enum HandleSignalMode {
kHandleSignalNo, kHandleSignalNo,
kHandleSignalYes, kHandleSignalYes,
kHandleSignalExclusive,
}; };
struct CommonFlags { struct CommonFlags {

View File

@ -75,12 +75,13 @@ COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error " "If false, disable printing error summaries in addition to error "
"reports.") "reports.")
COMMON_FLAG(int, print_module_map, 0, COMMON_FLAG(int, print_module_map, 0,
"OS X only. 0 = don't print, 1 = print only once before process " "OS X only (0 - don't print, 1 - print only once before process "
"exits, 2 = print after each report.") "exits, 2 - print after each report).")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.") COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \ #define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \
"Controls custom tool's " #signal " handler (0 - do not registers the " \ "Controls custom tool's " #signal " handler (0 - do not registers the " \
"handler, 1 - register the handler). " "handler, 1 - register the handler and allow user to set own, " \
"2 - registers the handler and block user from changing it). "
COMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes, COMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV)) COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV))
COMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes, COMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes,
@ -92,9 +93,6 @@ COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,
COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes, COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE)) COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))
#undef COMMON_FLAG_HANDLE_SIGNAL_HELP #undef COMMON_FLAG_HANDLE_SIGNAL_HELP
COMMON_FLAG(bool, allow_user_segv_handler, false,
"If set, allows user to register a SEGV handler even if the tool "
"registers one.")
COMMON_FLAG(bool, use_sigaltstack, true, COMMON_FLAG(bool, use_sigaltstack, true,
"If set, uses alternate stack for signal handling.") "If set, uses alternate stack for signal handling.")
COMMON_FLAG(bool, detect_deadlocks, false, COMMON_FLAG(bool, detect_deadlocks, false,

View File

@ -62,8 +62,6 @@
#if SANITIZER_FREEBSD #if SANITIZER_FREEBSD
#include <sys/exec.h> #include <sys/exec.h>
#include <sys/sysctl.h> #include <sys/sysctl.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/atomic.h> #include <machine/atomic.h>
extern "C" { extern "C" {
// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on // <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
@ -263,7 +261,7 @@ uptr internal_stat(const char *path, void *buf) {
uptr internal_lstat(const char *path, void *buf) { uptr internal_lstat(const char *path, void *buf) {
#if SANITIZER_FREEBSD #if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path,
(uptr)buf, AT_SYMLINK_NOFOLLOW); (uptr)buf, AT_SYMLINK_NOFOLLOW);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS #elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, AT_SYMLINK_NOFOLLOW); (uptr)buf, AT_SYMLINK_NOFOLLOW);
@ -551,7 +549,7 @@ void BlockingMutex::Lock() {
void BlockingMutex::Unlock() { void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed); u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
CHECK_NE(v, MtxUnlocked); CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping) { if (v == MtxSleeping) {
#if SANITIZER_FREEBSD #if SANITIZER_FREEBSD
@ -1398,7 +1396,7 @@ AndroidApiLevel AndroidGetApiLevel() {
#endif #endif
bool IsHandledDeadlySignal(int signum) { HandleSignalMode GetHandleSignalMode(int signum) {
switch (signum) { switch (signum) {
case SIGABRT: case SIGABRT:
return common_flags()->handle_abort; return common_flags()->handle_abort;
@ -1411,7 +1409,7 @@ bool IsHandledDeadlySignal(int signum) {
case SIGBUS: case SIGBUS:
return common_flags()->handle_sigbus; return common_flags()->handle_sigbus;
} }
return false; return kHandleSignalNo;
} }
#if !SANITIZER_GO #if !SANITIZER_GO

View File

@ -178,6 +178,13 @@ static bool FixedCVE_2016_2143() {
// 4.4.6+ is OK. // 4.4.6+ is OK.
if (minor == 4 && patch >= 6) if (minor == 4 && patch >= 6)
return true; return true;
if (minor == 4 && patch == 0 && ptr[0] == '-' &&
internal_strstr(buf.version, "Ubuntu")) {
// Check Ubuntu 16.04
int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
if (r1 >= 13) // 4.4.0-13 or later
return true;
}
// Otherwise, OK if 4.5+. // Otherwise, OK if 4.5+.
return minor >= 5; return minor >= 5;
} else { } else {

View File

@ -370,6 +370,27 @@ uptr GetTlsSize() {
void InitTlsSize() { void InitTlsSize() {
} }
uptr TlsBaseAddr() {
uptr segbase = 0;
#if defined(__x86_64__)
asm("movq %%gs:0,%0" : "=r"(segbase));
#elif defined(__i386__)
asm("movl %%gs:0,%0" : "=r"(segbase));
#endif
return segbase;
}
// The size of the tls on darwin does not appear to be well documented,
// however the vm memory map suggests that it is 1024 uptrs in size,
// with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386.
uptr TlsSize() {
#if defined(__x86_64__) || defined(__i386__)
return 1024 * sizeof(uptr);
#else
return 0;
#endif
}
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) { uptr *tls_addr, uptr *tls_size) {
#if !SANITIZER_GO #if !SANITIZER_GO
@ -377,8 +398,8 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom; *stk_addr = stack_bottom;
*stk_size = stack_top - stack_bottom; *stk_size = stack_top - stack_bottom;
*tls_addr = 0; *tls_addr = TlsBaseAddr();
*tls_size = 0; *tls_size = TlsSize();
#else #else
*stk_addr = 0; *stk_addr = 0;
*stk_size = 0; *stk_size = 0;
@ -393,10 +414,10 @@ void ListOfModules::init() {
memory_mapping.DumpListOfModules(&modules_); memory_mapping.DumpListOfModules(&modules_);
} }
bool IsHandledDeadlySignal(int signum) { HandleSignalMode GetHandleSignalMode(int signum) {
// Handling fatal signals on watchOS and tvOS devices is disallowed. // Handling fatal signals on watchOS and tvOS devices is disallowed.
if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM)) if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
return false; return kHandleSignalNo;
switch (signum) { switch (signum) {
case SIGABRT: case SIGABRT:
return common_flags()->handle_abort; return common_flags()->handle_abort;
@ -409,7 +430,7 @@ bool IsHandledDeadlySignal(int signum) {
case SIGBUS: case SIGBUS:
return common_flags()->handle_sigbus; return common_flags()->handle_sigbus;
} }
return false; return kHandleSignalNo;
} }
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED; MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;

View File

@ -134,7 +134,8 @@ void SleepForMillis(int millis) {
void Abort() { void Abort() {
#if !SANITIZER_GO #if !SANITIZER_GO
// If we are handling SIGABRT, unhandle it first. // If we are handling SIGABRT, unhandle it first.
if (IsHandledDeadlySignal(SIGABRT)) { // TODO(vitalybuka): Check if handler belongs to sanitizer.
if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
struct sigaction sigact; struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact)); internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL; sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
@ -188,8 +189,26 @@ void UnsetAlternateSignalStack() {
static void MaybeInstallSigaction(int signum, static void MaybeInstallSigaction(int signum,
SignalHandlerType handler) { SignalHandlerType handler) {
if (!IsHandledDeadlySignal(signum)) switch (GetHandleSignalMode(signum)) {
return; case kHandleSignalNo:
return;
case kHandleSignalYes: {
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
CHECK_EQ(0, internal_sigaction(signum, nullptr, &sigact));
if (sigact.sa_flags & SA_SIGINFO) {
if (sigact.sa_sigaction) return;
} else {
if (sigact.sa_handler != SIG_DFL && sigact.sa_handler != SIG_IGN &&
sigact.sa_handler != SIG_ERR)
return;
}
break;
}
case kHandleSignalExclusive:
break;
}
struct sigaction sigact; struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact)); internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)handler; sigact.sa_sigaction = (sa_sigaction_t)handler;

View File

@ -832,9 +832,9 @@ void InstallDeadlySignalHandlers(SignalHandlerType handler) {
// FIXME: Decide what to do on Windows. // FIXME: Decide what to do on Windows.
} }
bool IsHandledDeadlySignal(int signum) { HandleSignalMode GetHandleSignalMode(int signum) {
// FIXME: Decide what to do on Windows. // FIXME: Decide what to do on Windows.
return false; return kHandleSignalNo;
} }
// Check based on flags if we should handle this exception. // Check based on flags if we should handle this exception.

View File

@ -18,7 +18,6 @@
#include "scudo_tls.h" #include "scudo_tls.h"
#include <limits.h>
#include <pthread.h> #include <pthread.h>
namespace __scudo { namespace __scudo {
@ -32,15 +31,17 @@ __attribute__((tls_model("initial-exec")))
THREADLOCAL ScudoThreadContext ThreadLocalContext; THREADLOCAL ScudoThreadContext ThreadLocalContext;
static void teardownThread(void *Ptr) { static void teardownThread(void *Ptr) {
uptr Iteration = reinterpret_cast<uptr>(Ptr); uptr I = reinterpret_cast<uptr>(Ptr);
// The glibc POSIX thread-local-storage deallocation routine calls user // The glibc POSIX thread-local-storage deallocation routine calls user
// provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
// We want to be called last since other destructors might call free and the // We want to be called last since other destructors might call free and the
// like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
// quarantine and swallowing the cache. // quarantine and swallowing the cache.
if (Iteration < PTHREAD_DESTRUCTOR_ITERATIONS) { if (I > 1) {
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Iteration + 1)); // If pthread_setspecific fails, we will go ahead with the teardown.
return; if (LIKELY(pthread_setspecific(PThreadKey,
reinterpret_cast<void *>(I - 1)) == 0))
return;
} }
ThreadLocalContext.commitBack(); ThreadLocalContext.commitBack();
ScudoThreadState = ThreadTornDown; ScudoThreadState = ThreadTornDown;
@ -53,8 +54,9 @@ static void initOnce() {
} }
void initThread() { void initThread() {
pthread_once(&GlobalInitialized, initOnce); CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(1)); CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
GetPthreadDestructorIterations())), 0);
ThreadLocalContext.init(); ThreadLocalContext.init();
ScudoThreadState = ThreadInitialized; ScudoThreadState = ThreadInitialized;
} }

View File

@ -816,6 +816,7 @@ void FlushShadowMemory();
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive); void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
int ExtractResolvFDs(void *state, int *fds, int nfd); int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd); int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size);
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime, void *abstime), void *c, void *m, void *abstime,

View File

@ -320,6 +320,20 @@ int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
return res; return res;
} }
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
// Check that the thr object is in tls;
const uptr thr_beg = (uptr)thr;
const uptr thr_end = (uptr)thr + sizeof(*thr);
CHECK_GE(thr_beg, tls_addr);
CHECK_LE(thr_beg, tls_addr + tls_size);
CHECK_GE(thr_end, tls_addr);
CHECK_LE(thr_end, tls_addr + tls_size);
// Since the thr object is huge, skip it.
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr);
MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end,
tls_addr + tls_size - thr_end);
}
// Note: this function runs with async signals enabled, // Note: this function runs with async signals enabled,
// so it must not touch any tsan state. // so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,

View File

@ -75,12 +75,18 @@ static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
static uptr main_thread_identity = 0; static uptr main_thread_identity = 0;
ALIGNED(64) static char main_thread_state[sizeof(ThreadState)]; ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
ThreadState **cur_thread_location() {
ThreadState **thread_identity = (ThreadState **)pthread_self();
return ((uptr)thread_identity == main_thread_identity) ? nullptr
: thread_identity;
}
ThreadState *cur_thread() { ThreadState *cur_thread() {
uptr thread_identity = (uptr)pthread_self(); ThreadState **thr_state_loc = cur_thread_location();
if (thread_identity == main_thread_identity || main_thread_identity == 0) { if (thr_state_loc == nullptr || main_thread_identity == 0) {
return (ThreadState *)&main_thread_state; return (ThreadState *)&main_thread_state;
} }
ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity); ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc);
ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate( ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate(
(uptr *)fake_tls, sizeof(ThreadState)); (uptr *)fake_tls, sizeof(ThreadState));
return thr; return thr;
@ -90,13 +96,13 @@ ThreadState *cur_thread() {
// munmap first and then clear `fake_tls`; if we receive a signal in between, // munmap first and then clear `fake_tls`; if we receive a signal in between,
// handler will try to access the unmapped ThreadState. // handler will try to access the unmapped ThreadState.
void cur_thread_finalize() { void cur_thread_finalize() {
uptr thread_identity = (uptr)pthread_self(); ThreadState **thr_state_loc = cur_thread_location();
if (thread_identity == main_thread_identity) { if (thr_state_loc == nullptr) {
// Calling dispatch_main() or xpc_main() actually invokes pthread_exit to // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
// exit the main thread. Let's keep the main thread's ThreadState. // exit the main thread. Let's keep the main thread's ThreadState.
return; return;
} }
ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity); ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc);
internal_munmap(*fake_tls, sizeof(ThreadState)); internal_munmap(*fake_tls, sizeof(ThreadState));
*fake_tls = nullptr; *fake_tls = nullptr;
} }
@ -239,6 +245,29 @@ void InitializePlatform() {
#endif #endif
} }
#if !SANITIZER_GO
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
// The pointer to the ThreadState object is stored in the shadow memory
// of the tls.
uptr tls_end = tls_addr + tls_size;
ThreadState **thr_state_loc = cur_thread_location();
if (thr_state_loc == nullptr) {
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
} else {
uptr thr_state_start = (uptr)thr_state_loc;
uptr thr_state_end = thr_state_start + sizeof(uptr);
CHECK_GE(thr_state_start, tls_addr);
CHECK_LE(thr_state_start, tls_addr + tls_size);
CHECK_GE(thr_state_end, tls_addr);
CHECK_LE(thr_state_end, tls_addr + tls_size);
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
thr_state_start - tls_addr);
MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
tls_end - thr_state_end);
}
}
#endif
#if !SANITIZER_GO #if !SANITIZER_GO
// Note: this function runs with async signals enabled, // Note: this function runs with async signals enabled,
// so it must not touch any tsan state. // so it must not touch any tsan state.

View File

@ -248,19 +248,7 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread) {
if (stk_addr && stk_size) if (stk_addr && stk_size)
MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size); MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
if (tls_addr && tls_size) { if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
// Check that the thr object is in tls;
const uptr thr_beg = (uptr)thr;
const uptr thr_end = (uptr)thr + sizeof(*thr);
CHECK_GE(thr_beg, tls_addr);
CHECK_LE(thr_beg, tls_addr + tls_size);
CHECK_GE(thr_end, tls_addr);
CHECK_LE(thr_end, tls_addr + tls_size);
// Since the thr object is huge, skip it.
MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
MemoryRangeImitateWrite(thr, /*pc=*/ 2,
thr_end, tls_addr + tls_size - thr_end);
}
} }
#endif #endif

View File

@ -1126,6 +1126,10 @@ _LIBCPP_FUNC_VIS extern "C" void __sanitizer_annotate_contiguous_container(
# define _LIBCPP_HAS_NO_IS_AGGREGATE # define _LIBCPP_HAS_NO_IS_AGGREGATE
#endif #endif
#if !defined(__cpp_coroutines) || __cpp_coroutines < 201703L
# define _LIBCPP_HAS_NO_COROUTINES
#endif
#endif // __cplusplus #endif // __cplusplus
// Decide whether to use availability macros. // Decide whether to use availability macros.

View File

@ -27,7 +27,7 @@
# include <pthread.h> # include <pthread.h>
# include <sched.h> # include <sched.h>
#elif defined(_LIBCPP_HAS_THREAD_API_WIN32) #elif defined(_LIBCPP_HAS_THREAD_API_WIN32)
#include <Windows.h> #include <windows.h>
#include <process.h> #include <process.h>
#include <fibersapi.h> #include <fibersapi.h>
#include <__undef_min_max> #include <__undef_min_max>

View File

@ -35,6 +35,9 @@ template <class InputIterator, class Function>
Function Function
for_each(InputIterator first, InputIterator last, Function f); for_each(InputIterator first, InputIterator last, Function f);
template<class InputIterator, class Size, class Function>
InputIterator for_each_n(InputIterator first, Size n, Function f); // C++17
template <class InputIterator, class T> template <class InputIterator, class T>
InputIterator InputIterator
find(InputIterator first, InputIterator last, const T& value); find(InputIterator first, InputIterator last, const T& value);
@ -961,6 +964,26 @@ for_each(_InputIterator __first, _InputIterator __last, _Function __f)
return __f; return __f;
} }
#if _LIBCPP_STD_VER > 14
// for_each_n
template <class _InputIterator, class _Size, class _Function>
inline _LIBCPP_INLINE_VISIBILITY
_InputIterator
for_each_n(_InputIterator __first, _Size __orig_n, _Function __f)
{
typedef decltype(__convert_to_integral(__orig_n)) _IntegralSize;
_IntegralSize __n = __orig_n;
while (__n > 0)
{
__f(*__first);
++__first;
--__n;
}
return __first;
}
#endif
// find // find
template <class _InputIterator, class _Tp> template <class _InputIterator, class _Tp>

View File

@ -44,6 +44,13 @@
#define _LIBCPP_END_NAMESPACE_EXPERIMENTAL_FILESYSTEM \ #define _LIBCPP_END_NAMESPACE_EXPERIMENTAL_FILESYSTEM \
} } _LIBCPP_END_NAMESPACE_EXPERIMENTAL } } _LIBCPP_END_NAMESPACE_EXPERIMENTAL
#define _LIBCPP_BEGIN_NAMESPACE_EXPERIMENTAL_COROUTINES \
_LIBCPP_BEGIN_NAMESPACE_EXPERIMENTAL inline namespace coroutines_v1 {
#define _LIBCPP_END_NAMESPACE_EXPERIMENTAL_COROUTINES \
} _LIBCPP_END_NAMESPACE_EXPERIMENTAL
#define _VSTD_CORO _VSTD_EXPERIMENTAL::coroutines_v1
#define _VSTD_FS ::std::experimental::filesystem::v1 #define _VSTD_FS ::std::experimental::filesystem::v1

View File

@ -0,0 +1,270 @@
// -*- C++ -*-
//===----------------------------- coroutine -----------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef _LIBCPP_EXPERIMENTAL_COROUTINE
#define _LIBCPP_EXPERIMENTAL_COROUTINE
/**
experimental/coroutine synopsis
// C++next
namespace std {
namespace experimental {
inline namespace coroutines_v1 {
// 18.11.1 coroutine traits
template <typename R, typename... ArgTypes>
class coroutine_traits;
// 18.11.2 coroutine handle
template <typename Promise = void>
class coroutine_handle;
// 18.11.2.7 comparison operators:
bool operator==(coroutine_handle<> x, coroutine_handle<> y) _NOEXCEPT;
bool operator!=(coroutine_handle<> x, coroutine_handle<> y) _NOEXCEPT;
bool operator<(coroutine_handle<> x, coroutine_handle<> y) _NOEXCEPT;
bool operator<=(coroutine_handle<> x, coroutine_handle<> y) _NOEXCEPT;
bool operator>=(coroutine_handle<> x, coroutine_handle<> y) _NOEXCEPT;
bool operator>(coroutine_handle<> x, coroutine_handle<> y) _NOEXCEPT;
// 18.11.3 trivial awaitables
struct suspend_never;
struct suspend_always;
// 18.11.2.8 hash support:
template <class T> struct hash;
template <class P> struct hash<coroutine_handle<P>>;
} // namespace coroutines_v1
} // namespace experimental
} // namespace std
*/
#include <experimental/__config>
#include <new>
#include <type_traits>
#include <functional>
#include <memory> // for hash<T*>
#include <cstddef>
#include <cassert>
#include <__debug>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
#pragma GCC system_header
#endif
#ifdef _LIBCPP_HAS_NO_COROUTINES
# if defined(_LIBCPP_WARNING)
_LIBCPP_WARNING("<experimental/coroutine> cannot be used with this compiler")
# else
# warning <experimental/coroutine> cannot be used with this compiler
# endif
#endif
#ifndef _LIBCPP_HAS_NO_COROUTINES
_LIBCPP_BEGIN_NAMESPACE_EXPERIMENTAL_COROUTINES
template <class _Tp, class = void>
struct __coroutine_traits_sfinae {};
template <class _Tp>
struct __coroutine_traits_sfinae<
_Tp, typename __void_t<typename _Tp::promise_type>::type>
{
using promise_type = typename _Tp::promise_type;
};
template <typename _Ret, typename... _Args>
struct _LIBCPP_TEMPLATE_VIS coroutine_traits
: public __coroutine_traits_sfinae<_Ret>
{
};
template <typename _Promise = void>
class _LIBCPP_TEMPLATE_VIS coroutine_handle;
template <>
class _LIBCPP_TEMPLATE_VIS coroutine_handle<void> {
public:
_LIBCPP_ALWAYS_INLINE
_LIBCPP_CONSTEXPR coroutine_handle() _NOEXCEPT : __handle_(nullptr) {}
_LIBCPP_ALWAYS_INLINE
_LIBCPP_CONSTEXPR coroutine_handle(nullptr_t) _NOEXCEPT : __handle_(nullptr) {}
_LIBCPP_ALWAYS_INLINE
coroutine_handle& operator=(nullptr_t) _NOEXCEPT {
__handle_ = nullptr;
return *this;
}
_LIBCPP_ALWAYS_INLINE
_LIBCPP_CONSTEXPR void* address() const _NOEXCEPT { return __handle_; }
_LIBCPP_ALWAYS_INLINE
_LIBCPP_CONSTEXPR explicit operator bool() const _NOEXCEPT { return __handle_; }
_LIBCPP_ALWAYS_INLINE
void operator()() { resume(); }
_LIBCPP_ALWAYS_INLINE
void resume() {
_LIBCPP_ASSERT(__is_suspended(),
"resume() can only be called on suspended coroutines");
_LIBCPP_ASSERT(!done(),
"resume() has undefined behavior when the coroutine is done");
__builtin_coro_resume(__handle_);
}
_LIBCPP_ALWAYS_INLINE
void destroy() {
_LIBCPP_ASSERT(__is_suspended(),
"destroy() can only be called on suspended coroutines");
__builtin_coro_destroy(__handle_);
}
_LIBCPP_ALWAYS_INLINE
bool done() const {
_LIBCPP_ASSERT(__is_suspended(),
"done() can only be called on suspended coroutines");
return __builtin_coro_done(__handle_);
}
public:
_LIBCPP_ALWAYS_INLINE
static coroutine_handle from_address(void* __addr) _NOEXCEPT {
coroutine_handle __tmp;
__tmp.__handle_ = __addr;
return __tmp;
}
private:
bool __is_suspended() const _NOEXCEPT {
// FIXME actually implement a check for if the coro is suspended.
return __handle_;
}
template <class _PromiseT> friend class coroutine_handle;
void* __handle_;
};
// 18.11.2.7 comparison operators:
inline _LIBCPP_ALWAYS_INLINE
bool operator==(coroutine_handle<> __x, coroutine_handle<> __y) _NOEXCEPT {
return __x.address() == __y.address();
}
inline _LIBCPP_ALWAYS_INLINE
bool operator!=(coroutine_handle<> __x, coroutine_handle<> __y) _NOEXCEPT {
return !(__x == __y);
}
inline _LIBCPP_ALWAYS_INLINE
bool operator<(coroutine_handle<> __x, coroutine_handle<> __y) _NOEXCEPT {
return less<void*>()(__x.address(), __y.address());
}
inline _LIBCPP_ALWAYS_INLINE
bool operator>(coroutine_handle<> __x, coroutine_handle<> __y) _NOEXCEPT {
return __y < __x;
}
inline _LIBCPP_ALWAYS_INLINE
bool operator<=(coroutine_handle<> __x, coroutine_handle<> __y) _NOEXCEPT {
return !(__x > __y);
}
inline _LIBCPP_ALWAYS_INLINE
bool operator>=(coroutine_handle<> __x, coroutine_handle<> __y) _NOEXCEPT {
return !(__x < __y);
}
template <typename _Promise>
class _LIBCPP_TEMPLATE_VIS coroutine_handle : public coroutine_handle<> {
using _Base = coroutine_handle<>;
public:
#ifndef _LIBCPP_CXX03_LANG
// 18.11.2.1 construct/reset
using coroutine_handle<>::coroutine_handle;
#else
_LIBCPP_ALWAYS_INLINE coroutine_handle() _NOEXCEPT : _Base() {}
_LIBCPP_ALWAYS_INLINE coroutine_handle(nullptr_t) _NOEXCEPT : _Base(nullptr) {}
#endif
_LIBCPP_INLINE_VISIBILITY
coroutine_handle& operator=(nullptr_t) _NOEXCEPT {
_Base::operator=(nullptr);
return *this;
}
_LIBCPP_INLINE_VISIBILITY
_Promise& promise() const {
return *reinterpret_cast<_Promise*>(
__builtin_coro_promise(this->__handle_, __alignof(_Promise), false));
}
public:
_LIBCPP_ALWAYS_INLINE
static coroutine_handle from_address(void* __addr) _NOEXCEPT {
coroutine_handle __tmp;
__tmp.__handle_ = __addr;
return __tmp;
}
// NOTE: this overload isn't required by the standard but is needed so
// the deleted _Promise* overload doesn't make from_address(nullptr)
// ambiguous.
// FIXME: should from_address work with nullptr?
_LIBCPP_ALWAYS_INLINE
static coroutine_handle from_address(nullptr_t) _NOEXCEPT {
return {};
}
// from_address cannot be used with the coroutines promise type.
static coroutine_handle from_address(_Promise*) = delete;
_LIBCPP_ALWAYS_INLINE
static coroutine_handle from_promise(_Promise& __promise) _NOEXCEPT {
coroutine_handle __tmp;
__tmp.__handle_ = __builtin_coro_promise(_VSTD::addressof(__promise),
__alignof(_Promise), true);
return __tmp;
}
};
struct _LIBCPP_TYPE_VIS suspend_never {
_LIBCPP_ALWAYS_INLINE
bool await_ready() const _NOEXCEPT { return true; }
_LIBCPP_ALWAYS_INLINE
void await_suspend(coroutine_handle<>) const _NOEXCEPT {}
_LIBCPP_ALWAYS_INLINE
void await_resume() const _NOEXCEPT {}
};
struct _LIBCPP_TYPE_VIS suspend_always {
_LIBCPP_ALWAYS_INLINE
bool await_ready() const _NOEXCEPT { return false; }
_LIBCPP_ALWAYS_INLINE
void await_suspend(coroutine_handle<>) const _NOEXCEPT {}
_LIBCPP_ALWAYS_INLINE
void await_resume() const _NOEXCEPT {}
};
_LIBCPP_END_NAMESPACE_EXPERIMENTAL_COROUTINES
_LIBCPP_BEGIN_NAMESPACE_STD
template <class _Tp>
struct hash<_VSTD_CORO::coroutine_handle<_Tp> > {
using __arg_type = _VSTD_CORO::coroutine_handle<_Tp>;
_LIBCPP_INLINE_VISIBILITY
size_t operator()(__arg_type const& __v) const _NOEXCEPT
{return hash<void*>()(__v.address());}
};
_LIBCPP_END_NAMESPACE_STD
#endif // !defined(_LIBCPP_HAS_NO_COROUTINES)
#endif /* _LIBCPP_EXPERIMENTAL_COROUTINE */

View File

@ -990,7 +990,6 @@ public:
_LIBCPP_INLINE_VISIBILITY char_type operator*() const _LIBCPP_INLINE_VISIBILITY char_type operator*() const
{return static_cast<char_type>(__sbuf_->sgetc());} {return static_cast<char_type>(__sbuf_->sgetc());}
_LIBCPP_INLINE_VISIBILITY char_type* operator->() const {return nullptr;}
_LIBCPP_INLINE_VISIBILITY istreambuf_iterator& operator++() _LIBCPP_INLINE_VISIBILITY istreambuf_iterator& operator++()
{ {
__sbuf_->sbumpc(); __sbuf_->sbumpc();

View File

@ -2251,6 +2251,8 @@ void swap(__compressed_pair<_T1, _T2>& __x, __compressed_pair<_T1, _T2>& __y)
template <class _Tp> template <class _Tp>
struct _LIBCPP_TEMPLATE_VIS default_delete { struct _LIBCPP_TEMPLATE_VIS default_delete {
static_assert(!is_function<_Tp>::value,
"default_delete cannot be instantiated for function types");
#ifndef _LIBCPP_CXX03_LANG #ifndef _LIBCPP_CXX03_LANG
_LIBCPP_INLINE_VISIBILITY constexpr default_delete() noexcept = default; _LIBCPP_INLINE_VISIBILITY constexpr default_delete() noexcept = default;
#else #else
@ -3653,6 +3655,18 @@ __shared_ptr_emplace<_Tp, _Alloc>::__on_zero_shared_weak() _NOEXCEPT
__a.deallocate(_PTraits::pointer_to(*this), 1); __a.deallocate(_PTraits::pointer_to(*this), 1);
} }
struct __shared_ptr_dummy_rebind_allocator_type;
template <>
class _LIBCPP_TEMPLATE_VIS allocator<__shared_ptr_dummy_rebind_allocator_type>
{
public:
template <class _Other>
struct rebind
{
typedef allocator<_Other> other;
};
};
template<class _Tp> class _LIBCPP_TEMPLATE_VIS enable_shared_from_this; template<class _Tp> class _LIBCPP_TEMPLATE_VIS enable_shared_from_this;
template<class _Tp> template<class _Tp>
@ -3921,6 +3935,17 @@ public:
#endif // _LIBCPP_HAS_NO_VARIADICS #endif // _LIBCPP_HAS_NO_VARIADICS
private: private:
template <class _Yp, bool = is_function<_Yp>::value>
struct __shared_ptr_default_allocator
{
typedef allocator<_Yp> type;
};
template <class _Yp>
struct __shared_ptr_default_allocator<_Yp, true>
{
typedef allocator<__shared_ptr_dummy_rebind_allocator_type> type;
};
template <class _Yp, class _OrigPtr> template <class _Yp, class _OrigPtr>
_LIBCPP_INLINE_VISIBILITY _LIBCPP_INLINE_VISIBILITY
@ -3939,8 +3964,7 @@ private:
} }
} }
_LIBCPP_INLINE_VISIBILITY _LIBCPP_INLINE_VISIBILITY void __enable_weak_this(...) _NOEXCEPT {}
void __enable_weak_this(const volatile void*, const volatile void*) _NOEXCEPT {}
template <class _Up> friend class _LIBCPP_TEMPLATE_VIS shared_ptr; template <class _Up> friend class _LIBCPP_TEMPLATE_VIS shared_ptr;
template <class _Up> friend class _LIBCPP_TEMPLATE_VIS weak_ptr; template <class _Up> friend class _LIBCPP_TEMPLATE_VIS weak_ptr;
@ -3972,8 +3996,9 @@ shared_ptr<_Tp>::shared_ptr(_Yp* __p,
: __ptr_(__p) : __ptr_(__p)
{ {
unique_ptr<_Yp> __hold(__p); unique_ptr<_Yp> __hold(__p);
typedef __shared_ptr_pointer<_Yp*, default_delete<_Yp>, allocator<_Yp> > _CntrlBlk; typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
__cntrl_ = new _CntrlBlk(__p, default_delete<_Yp>(), allocator<_Yp>()); typedef __shared_ptr_pointer<_Yp*, default_delete<_Yp>, _AllocT > _CntrlBlk;
__cntrl_ = new _CntrlBlk(__p, default_delete<_Yp>(), _AllocT());
__hold.release(); __hold.release();
__enable_weak_this(__p, __p); __enable_weak_this(__p, __p);
} }
@ -3988,8 +4013,9 @@ shared_ptr<_Tp>::shared_ptr(_Yp* __p, _Dp __d,
try try
{ {
#endif // _LIBCPP_NO_EXCEPTIONS #endif // _LIBCPP_NO_EXCEPTIONS
typedef __shared_ptr_pointer<_Yp*, _Dp, allocator<_Yp> > _CntrlBlk; typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
__cntrl_ = new _CntrlBlk(__p, __d, allocator<_Yp>()); typedef __shared_ptr_pointer<_Yp*, _Dp, _AllocT > _CntrlBlk;
__cntrl_ = new _CntrlBlk(__p, __d, _AllocT());
__enable_weak_this(__p, __p); __enable_weak_this(__p, __p);
#ifndef _LIBCPP_NO_EXCEPTIONS #ifndef _LIBCPP_NO_EXCEPTIONS
} }
@ -4010,8 +4036,9 @@ shared_ptr<_Tp>::shared_ptr(nullptr_t __p, _Dp __d)
try try
{ {
#endif // _LIBCPP_NO_EXCEPTIONS #endif // _LIBCPP_NO_EXCEPTIONS
typedef __shared_ptr_pointer<nullptr_t, _Dp, allocator<_Tp> > _CntrlBlk; typedef typename __shared_ptr_default_allocator<_Tp>::type _AllocT;
__cntrl_ = new _CntrlBlk(__p, __d, allocator<_Tp>()); typedef __shared_ptr_pointer<nullptr_t, _Dp, _AllocT > _CntrlBlk;
__cntrl_ = new _CntrlBlk(__p, __d, _AllocT());
#ifndef _LIBCPP_NO_EXCEPTIONS #ifndef _LIBCPP_NO_EXCEPTIONS
} }
catch (...) catch (...)
@ -4179,8 +4206,9 @@ shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp> __r,
else else
#endif #endif
{ {
typedef __shared_ptr_pointer<_Yp*, _Dp, allocator<_Yp> > _CntrlBlk; typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
__cntrl_ = new _CntrlBlk(__r.get(), __r.get_deleter(), allocator<_Yp>()); typedef __shared_ptr_pointer<_Yp*, _Dp, _AllocT > _CntrlBlk;
__cntrl_ = new _CntrlBlk(__r.get(), __r.get_deleter(), _AllocT());
__enable_weak_this(__r.get(), __r.get()); __enable_weak_this(__r.get(), __r.get());
} }
__r.release(); __r.release();
@ -4208,10 +4236,11 @@ shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp> __r,
else else
#endif #endif
{ {
typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
typedef __shared_ptr_pointer<_Yp*, typedef __shared_ptr_pointer<_Yp*,
reference_wrapper<typename remove_reference<_Dp>::type>, reference_wrapper<typename remove_reference<_Dp>::type>,
allocator<_Yp> > _CntrlBlk; _AllocT > _CntrlBlk;
__cntrl_ = new _CntrlBlk(__r.get(), ref(__r.get_deleter()), allocator<_Yp>()); __cntrl_ = new _CntrlBlk(__r.get(), ref(__r.get_deleter()), _AllocT());
__enable_weak_this(__r.get(), __r.get()); __enable_weak_this(__r.get(), __r.get());
} }
__r.release(); __r.release();

View File

@ -500,6 +500,10 @@ module std [system] {
module chrono { module chrono {
header "experimental/chrono" header "experimental/chrono"
export * export *
}
module coroutine {
header "experimental/coroutine"
export *
} }
module deque { module deque {
header "experimental/deque" header "experimental/deque"

View File

@ -59,6 +59,7 @@ class Triple {
mips64, // MIPS64: mips64 mips64, // MIPS64: mips64
mips64el, // MIPS64EL: mips64el mips64el, // MIPS64EL: mips64el
msp430, // MSP430: msp430 msp430, // MSP430: msp430
nios2, // NIOSII: nios2
ppc, // PPC: powerpc ppc, // PPC: powerpc
ppc64, // PPC64: powerpc64, ppu ppc64, // PPC64: powerpc64, ppu
ppc64le, // PPC64LE: powerpc64le ppc64le, // PPC64LE: powerpc64le

View File

@ -70,174 +70,173 @@ struct SimplifyQuery {
Copy.CxtI = I; Copy.CxtI = I;
return Copy; return Copy;
} }
}; };
// NOTE: the explicit multiple argument versions of these functions are // NOTE: the explicit multiple argument versions of these functions are
// deprecated. // deprecated.
// Please use the SimplifyQuery versions in new code. // Please use the SimplifyQuery versions in new code.
/// Given operands for an Add, fold the result or return null. /// Given operands for an Add, fold the result or return null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for a Sub, fold the result or return null. /// Given operands for a Sub, fold the result or return null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for an FAdd, fold the result or return null. /// Given operands for an FAdd, fold the result or return null.
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for an FSub, fold the result or return null.
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for an FMul, fold the result or return null.
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for a Mul, fold the result or return null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an SDiv, fold the result or return null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for a UDiv, fold the result or return null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an FDiv, fold the result or return null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for an SRem, fold the result or return null.
Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for a URem, fold the result or return null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an FRem, fold the result or return null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for a Shl, fold the result or return null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
/// Given operands for a LShr, fold the result or return null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
/// Given operands for a AShr, fold the result or return nulll.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
/// Given operands for an And, fold the result or return null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an Or, fold the result or return null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an Xor, fold the result or return null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an ICmpInst, fold the result or return null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
/// Given operands for an FCmpInst, fold the result or return null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q);
/// Given operands for a SelectInst, fold the result or return null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for an FSub, fold the result or return null. /// Given operands for a GetElementPtrInst, fold the result or return null.
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for an FMul, fold the result or return null. /// Given operands for an InsertValueInst, fold the result or return null.
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for a Mul, fold the result or return null. /// Given operands for an ExtractValueInst, fold the result or return null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q); Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q);
/// Given operands for an SDiv, fold the result or return null. /// Given operands for an ExtractElementInst, fold the result or return null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q); Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
/// Given operands for a UDiv, fold the result or return null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an FDiv, fold the result or return null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for an SRem, fold the result or return null.
Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for a URem, fold the result or return null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an FRem, fold the result or return null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
/// Given operands for a Shl, fold the result or return null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
/// Given operands for a LShr, fold the result or return null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
/// Given operands for a AShr, fold the result or return nulll.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
/// Given operands for an And, fold the result or return null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an Or, fold the result or return null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an Xor, fold the result or return null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
/// Given operands for an ICmpInst, fold the result or return null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
/// Given operands for an FCmpInst, fold the result or return null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q);
/// Given operands for a SelectInst, fold the result or return null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const SimplifyQuery &Q);
/// Given operands for a GetElementPtrInst, fold the result or return null.
Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
const SimplifyQuery &Q);
/// Given operands for an InsertValueInst, fold the result or return null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q);
/// Given operands for an ExtractValueInst, fold the result or return null.
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for an ExtractElementInst, fold the result or return null. /// Given operands for a CastInst, fold the result or return null.
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for a CastInst, fold the result or return null. /// Given operands for a ShuffleVectorInst, fold the result or return null.
Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
const SimplifyQuery &Q); Type *RetTy, const SimplifyQuery &Q);
/// Given operands for a ShuffleVectorInst, fold the result or return null. //=== Helper functions for higher up the class hierarchy.
Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
Type *RetTy, const SimplifyQuery &Q);
//=== Helper functions for higher up the class hierarchy. /// Given operands for a CmpInst, fold the result or return null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
/// Given operands for a CmpInst, fold the result or return null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
/// Given operands for a BinaryOperator, fold the result or return null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const SimplifyQuery &Q); const SimplifyQuery &Q);
/// Given operands for an FP BinaryOperator, fold the result or return null. /// Given operands for a BinaryOperator, fold the result or return null.
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp. const SimplifyQuery &Q);
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q);
/// Given a function and iterators over arguments, fold the result or return /// Given operands for an FP BinaryOperator, fold the result or return null.
/// null. /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin, /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
User::op_iterator ArgEnd, const SimplifyQuery &Q); Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q);
/// Given a function and set of arguments, fold the result or return null. /// Given a function and iterators over arguments, fold the result or return
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q); /// null.
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const SimplifyQuery &Q);
/// See if we can compute a simplified version of this instruction. If not, /// Given a function and set of arguments, fold the result or return null.
/// return null. Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q);
Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
OptimizationRemarkEmitter *ORE = nullptr);
/// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively. /// See if we can compute a simplified version of this instruction. If not,
/// /// return null.
/// This first performs a normal RAUW of I with SimpleV. It then recursively Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
/// attempts to simplify those users updated by the operation. The 'I' OptimizationRemarkEmitter *ORE = nullptr);
/// instruction must not be equal to the simplified value 'SimpleV'.
///
/// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
/// Recursively attempt to simplify an instruction. /// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
/// ///
/// This routine uses SimplifyInstruction to simplify 'I', and if successful /// This first performs a normal RAUW of I with SimpleV. It then recursively
/// replaces uses of 'I' with the simplified value. It then recurses on each /// attempts to simplify those users updated by the operation. The 'I'
/// of the users impacted. It returns true if any simplifications were /// instruction must not be equal to the simplified value 'SimpleV'.
/// performed. ///
bool recursivelySimplifyInstruction(Instruction *I, /// The function returns true if any simplifications were performed.
const TargetLibraryInfo *TLI = nullptr, bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const DominatorTree *DT = nullptr, const TargetLibraryInfo *TLI = nullptr,
AssumptionCache *AC = nullptr); const DominatorTree *DT = nullptr,
// These helper functions return a SimplifyQuery structure that contains as AssumptionCache *AC = nullptr);
// many of the optional analysis we use as are currently valid. This is the
// strongly preferred way of constructing SimplifyQuery in passes. /// Recursively attempt to simplify an instruction.
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &); ///
template <class T, class... TArgs> /// This routine uses SimplifyInstruction to simplify 'I', and if successful
const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &, /// replaces uses of 'I' with the simplified value. It then recurses on each
Function &); /// of the users impacted. It returns true if any simplifications were
const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &, /// performed.
const DataLayout &); bool recursivelySimplifyInstruction(Instruction *I,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
// These helper functions return a SimplifyQuery structure that contains as
// many of the optional analysis we use as are currently valid. This is the
// strongly preferred way of constructing SimplifyQuery in passes.
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
template <class T, class... TArgs>
const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
Function &);
const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
const DataLayout &);
} // end namespace llvm } // end namespace llvm
#endif #endif

View File

@ -126,9 +126,8 @@ class LPPassManager : public FunctionPass, public PMDataManager {
} }
public: public:
// Add a new loop into the loop queue as a child of the given parent, or at // Add a new loop into the loop queue.
// the top level if \c ParentLoop is null. void addLoop(Loop &L);
Loop &addLoop(Loop *ParentLoop);
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//
/// SimpleAnalysis - Provides simple interface to update analysis info /// SimpleAnalysis - Provides simple interface to update analysis info

View File

@ -1533,6 +1533,12 @@ class ScalarEvolution {
/// specified loop. /// specified loop.
bool isLoopInvariant(const SCEV *S, const Loop *L); bool isLoopInvariant(const SCEV *S, const Loop *L);
/// Determine if the SCEV can be evaluated at loop's entry. It is true if it
/// doesn't depend on a SCEVUnknown of an instruction which is dominated by
/// the header of loop L.
bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L, DominatorTree &DT,
LoopInfo &LI);
/// Return true if the given SCEV changes value in a known way in the /// Return true if the given SCEV changes value in a known way in the
/// specified loop. This property being true implies that the value is /// specified loop. This property being true implies that the value is
/// variant in the loop AND that we can emit an expression to compute the /// variant in the loop AND that we can emit an expression to compute the

View File

@ -396,6 +396,9 @@ class TargetTransformInfo {
bool isLegalMaskedScatter(Type *DataType) const; bool isLegalMaskedScatter(Type *DataType) const;
bool isLegalMaskedGather(Type *DataType) const; bool isLegalMaskedGather(Type *DataType) const;
/// Return true if target doesn't mind addresses in vectors.
bool prefersVectorizedAddressing() const;
/// \brief Return the cost of the scaling factor used in the addressing /// \brief Return the cost of the scaling factor used in the addressing
/// mode represented by AM for this target, for a load/store /// mode represented by AM for this target, for a load/store
/// of the specified type. /// of the specified type.
@ -807,6 +810,7 @@ class TargetTransformInfo::Concept {
virtual bool isLegalMaskedLoad(Type *DataType) = 0; virtual bool isLegalMaskedLoad(Type *DataType) = 0;
virtual bool isLegalMaskedScatter(Type *DataType) = 0; virtual bool isLegalMaskedScatter(Type *DataType) = 0;
virtual bool isLegalMaskedGather(Type *DataType) = 0; virtual bool isLegalMaskedGather(Type *DataType) = 0;
virtual bool prefersVectorizedAddressing() = 0;
virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset, bool HasBaseReg, int64_t BaseOffset, bool HasBaseReg,
int64_t Scale, unsigned AddrSpace) = 0; int64_t Scale, unsigned AddrSpace) = 0;
@ -1000,6 +1004,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
bool isLegalMaskedGather(Type *DataType) override { bool isLegalMaskedGather(Type *DataType) override {
return Impl.isLegalMaskedGather(DataType); return Impl.isLegalMaskedGather(DataType);
} }
bool prefersVectorizedAddressing() override {
return Impl.prefersVectorizedAddressing();
}
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale, bool HasBaseReg, int64_t Scale,
unsigned AddrSpace) override { unsigned AddrSpace) override {

View File

@ -237,6 +237,8 @@ class TargetTransformInfoImplBase {
bool isLegalMaskedGather(Type *DataType) { return false; } bool isLegalMaskedGather(Type *DataType) { return false; }
bool prefersVectorizedAddressing() { return true; }
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
// Guess that all legal addressing mode are free. // Guess that all legal addressing mode are free.

View File

@ -60,7 +60,8 @@ template <typename T> class ArrayRef;
KnownBits computeKnownBits(const Value *V, const DataLayout &DL, KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr, unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr, const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr); const DominatorTree *DT = nullptr,
OptimizationRemarkEmitter *ORE = nullptr);
/// Compute known bits from the range metadata. /// Compute known bits from the range metadata.
/// \p KnownZero the set of bits that are known to be zero /// \p KnownZero the set of bits that are known to be zero
/// \p KnownOne the set of bits that are known to be one /// \p KnownOne the set of bits that are known to be one

View File

@ -34,6 +34,7 @@
namespace llvm { namespace llvm {
class AsmPrinterHandler; class AsmPrinterHandler;
class BasicBlock;
class BlockAddress; class BlockAddress;
class Constant; class Constant;
class ConstantArray; class ConstantArray;
@ -43,6 +44,7 @@ class DIEAbbrev;
class DwarfDebug; class DwarfDebug;
class GCMetadataPrinter; class GCMetadataPrinter;
class GlobalIndirectSymbol; class GlobalIndirectSymbol;
class GlobalObject;
class GlobalValue; class GlobalValue;
class GlobalVariable; class GlobalVariable;
class GCStrategy; class GCStrategy;
@ -65,6 +67,8 @@ class MCSubtargetInfo;
class MCSymbol; class MCSymbol;
class MCTargetOptions; class MCTargetOptions;
class MDNode; class MDNode;
class Module;
class raw_ostream;
class TargetLoweringObjectFile; class TargetLoweringObjectFile;
class TargetMachine; class TargetMachine;
@ -109,7 +113,7 @@ class AsmPrinter : public MachineFunctionPass {
/// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of /// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
/// its number of uses by other globals. /// its number of uses by other globals.
typedef std::pair<const GlobalVariable *, unsigned> GOTEquivUsePair; using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs; MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
/// Enable print [latency:throughput] in output /// Enable print [latency:throughput] in output

View File

@ -1,4 +1,4 @@
//===-- AtomicExpandUtils.h - Utilities for expanding atomic instructions -===// //===- AtomicExpandUtils.h - Utilities for expanding atomic instructions --===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -7,19 +7,24 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_ATOMICEXPANDUTILS_H
#define LLVM_CODEGEN_ATOMICEXPANDUTILS_H
#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLExtras.h"
#include "llvm/IR/IRBuilder.h" #include "llvm/IR/IRBuilder.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm { namespace llvm {
class Value;
class AtomicRMWInst;
class AtomicRMWInst;
class Value;
/// Parameters (see the expansion example below): /// Parameters (see the expansion example below):
/// (the builder, %addr, %loaded, %new_val, ordering, /// (the builder, %addr, %loaded, %new_val, ordering,
/// /* OUT */ %success, /* OUT */ %new_loaded) /// /* OUT */ %success, /* OUT */ %new_loaded)
typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *, using CreateCmpXchgInstFun =
AtomicOrdering, Value *&, Value *&)> CreateCmpXchgInstFun; function_ref<void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering,
Value *&, Value *&)>;
/// \brief Expand an atomic RMW instruction into a loop utilizing /// \brief Expand an atomic RMW instruction into a loop utilizing
/// cmpxchg. You'll want to make sure your target machine likes cmpxchg /// cmpxchg. You'll want to make sure your target machine likes cmpxchg
@ -42,7 +47,8 @@ typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
/// loop: /// loop:
/// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ] /// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
/// %new = some_op iN %loaded, %incr /// %new = some_op iN %loaded, %incr
/// ; This is what -atomic-expand will produce using this function on i686 targets: /// ; This is what -atomic-expand will produce using this function on i686
/// targets:
/// %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val /// %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
/// %new_loaded = extractvalue { iN, i1 } %pair, 0 /// %new_loaded = extractvalue { iN, i1 } %pair, 0
/// %success = extractvalue { iN, i1 } %pair, 1 /// %success = extractvalue { iN, i1 } %pair, 1
@ -52,6 +58,8 @@ typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
/// [...] /// [...]
/// ///
/// Returns true if the containing function was modified. /// Returns true if the containing function was modified.
bool bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
} } // end namespace llvm
#endif // LLVM_CODEGEN_ATOMICEXPANDUTILS_H

View File

@ -1,4 +1,4 @@
//===--- lib/CodeGen/DIE.h - DWARF Info Entries -----------------*- C++ -*-===// //===- lib/CodeGen/DIE.h - DWARF Info Entries -------------------*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -31,6 +31,7 @@
#include <iterator> #include <iterator>
#include <new> #include <new>
#include <type_traits> #include <type_traits>
#include <utility>
#include <vector> #include <vector>
namespace llvm { namespace llvm {
@ -53,11 +54,11 @@ class DIEAbbrevData {
dwarf::Form Form; dwarf::Form Form;
/// Dwarf attribute value for DW_FORM_implicit_const /// Dwarf attribute value for DW_FORM_implicit_const
int64_t Value; int64_t Value = 0;
public: public:
DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) DIEAbbrevData(dwarf::Attribute A, dwarf::Form F)
: Attribute(A), Form(F), Value(0) {} : Attribute(A), Form(F) {}
DIEAbbrevData(dwarf::Attribute A, int64_t V) DIEAbbrevData(dwarf::Attribute A, int64_t V)
: Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {} : Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {}
@ -136,13 +137,14 @@ class DIEAbbrevSet {
/// storage container. /// storage container.
BumpPtrAllocator &Alloc; BumpPtrAllocator &Alloc;
/// \brief FoldingSet that uniques the abbreviations. /// \brief FoldingSet that uniques the abbreviations.
llvm::FoldingSet<DIEAbbrev> AbbreviationsSet; FoldingSet<DIEAbbrev> AbbreviationsSet;
/// A list of all the unique abbreviations in use. /// A list of all the unique abbreviations in use.
std::vector<DIEAbbrev *> Abbreviations; std::vector<DIEAbbrev *> Abbreviations;
public: public:
DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {} DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {}
~DIEAbbrevSet(); ~DIEAbbrevSet();
/// Generate the abbreviation declaration for a DIE and return a pointer to /// Generate the abbreviation declaration for a DIE and return a pointer to
/// the generated abbreviation. /// the generated abbreviation.
/// ///
@ -289,13 +291,11 @@ class DIEInlineString {
/// A pointer to another debug information entry. An instance of this class can /// A pointer to another debug information entry. An instance of this class can
/// also be used as a proxy for a debug information entry not yet defined /// also be used as a proxy for a debug information entry not yet defined
/// (ie. types.) /// (ie. types.)
class DIE;
class DIEEntry { class DIEEntry {
DIE *Entry; DIE *Entry;
DIEEntry() = delete;
public: public:
DIEEntry() = delete;
explicit DIEEntry(DIE &E) : Entry(&E) {} explicit DIEEntry(DIE &E) : Entry(&E) {}
DIE &getEntry() const { return *Entry; } DIE &getEntry() const { return *Entry; }
@ -348,10 +348,10 @@ class DIEValue {
/// ///
/// All values that aren't standard layout (or are larger than 8 bytes) /// All values that aren't standard layout (or are larger than 8 bytes)
/// should be stored by reference instead of by value. /// should be stored by reference instead of by value.
typedef AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel, using ValTy = AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
DIEDelta *, DIEEntry, DIEBlock *, DIELoc *, DIEDelta *, DIEEntry, DIEBlock *,
DIELocList> DIELoc *, DIELocList>;
ValTy;
static_assert(sizeof(ValTy) <= sizeof(uint64_t) || static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
sizeof(ValTy) <= sizeof(void *), sizeof(ValTy) <= sizeof(void *),
"Expected all large types to be stored via pointer"); "Expected all large types to be stored via pointer");
@ -486,10 +486,12 @@ struct IntrusiveBackListNode {
}; };
struct IntrusiveBackListBase { struct IntrusiveBackListBase {
typedef IntrusiveBackListNode Node; using Node = IntrusiveBackListNode;
Node *Last = nullptr; Node *Last = nullptr;
bool empty() const { return !Last; } bool empty() const { return !Last; }
void push_back(Node &N) { void push_back(Node &N) {
assert(N.Next.getPointer() == &N && "Expected unlinked node"); assert(N.Next.getPointer() == &N && "Expected unlinked node");
assert(N.Next.getInt() == true && "Expected unlinked node"); assert(N.Next.getInt() == true && "Expected unlinked node");
@ -505,6 +507,7 @@ struct IntrusiveBackListBase {
template <class T> class IntrusiveBackList : IntrusiveBackListBase { template <class T> class IntrusiveBackList : IntrusiveBackListBase {
public: public:
using IntrusiveBackListBase::empty; using IntrusiveBackListBase::empty;
void push_back(T &N) { IntrusiveBackListBase::push_back(N); } void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
T &back() { return *static_cast<T *>(Last); } T &back() { return *static_cast<T *>(Last); }
const T &back() const { return *static_cast<T *>(Last); } const T &back() const { return *static_cast<T *>(Last); }
@ -513,6 +516,7 @@ template <class T> class IntrusiveBackList : IntrusiveBackListBase {
class iterator class iterator
: public iterator_facade_base<iterator, std::forward_iterator_tag, T> { : public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
friend class const_iterator; friend class const_iterator;
Node *N = nullptr; Node *N = nullptr;
public: public:
@ -585,10 +589,12 @@ template <class T> class IntrusiveBackList : IntrusiveBackListBase {
class DIEValueList { class DIEValueList {
struct Node : IntrusiveBackListNode { struct Node : IntrusiveBackListNode {
DIEValue V; DIEValue V;
explicit Node(DIEValue V) : V(V) {} explicit Node(DIEValue V) : V(V) {}
}; };
typedef IntrusiveBackList<Node> ListTy; using ListTy = IntrusiveBackList<Node>;
ListTy List; ListTy List;
public: public:
@ -597,9 +603,10 @@ class DIEValueList {
: public iterator_adaptor_base<value_iterator, ListTy::iterator, : public iterator_adaptor_base<value_iterator, ListTy::iterator,
std::forward_iterator_tag, DIEValue> { std::forward_iterator_tag, DIEValue> {
friend class const_value_iterator; friend class const_value_iterator;
typedef iterator_adaptor_base<value_iterator, ListTy::iterator,
std::forward_iterator_tag, using iterator_adaptor =
DIEValue> iterator_adaptor; iterator_adaptor_base<value_iterator, ListTy::iterator,
std::forward_iterator_tag, DIEValue>;
public: public:
value_iterator() = default; value_iterator() = default;
@ -612,9 +619,9 @@ class DIEValueList {
class const_value_iterator : public iterator_adaptor_base< class const_value_iterator : public iterator_adaptor_base<
const_value_iterator, ListTy::const_iterator, const_value_iterator, ListTy::const_iterator,
std::forward_iterator_tag, const DIEValue> { std::forward_iterator_tag, const DIEValue> {
typedef iterator_adaptor_base<const_value_iterator, ListTy::const_iterator, using iterator_adaptor =
std::forward_iterator_tag, iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
const DIEValue> iterator_adaptor; std::forward_iterator_tag, const DIEValue>;
public: public:
const_value_iterator() = default; const_value_iterator() = default;
@ -627,8 +634,8 @@ class DIEValueList {
const DIEValue &operator*() const { return wrapped()->V; } const DIEValue &operator*() const { return wrapped()->V; }
}; };
typedef iterator_range<value_iterator> value_range; using value_range = iterator_range<value_iterator>;
typedef iterator_range<const_value_iterator> const_value_range; using const_value_range = iterator_range<const_value_iterator>;
value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) { value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) {
List.push_back(*new (Alloc) Node(V)); List.push_back(*new (Alloc) Node(V));
@ -657,15 +664,15 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
friend class DIEUnit; friend class DIEUnit;
/// Dwarf unit relative offset. /// Dwarf unit relative offset.
unsigned Offset; unsigned Offset = 0;
/// Size of instance + children. /// Size of instance + children.
unsigned Size; unsigned Size = 0;
unsigned AbbrevNumber = ~0u; unsigned AbbrevNumber = ~0u;
/// Dwarf tag code. /// Dwarf tag code.
dwarf::Tag Tag = (dwarf::Tag)0; dwarf::Tag Tag = (dwarf::Tag)0;
/// Set to true to force a DIE to emit an abbreviation that says it has /// Set to true to force a DIE to emit an abbreviation that says it has
/// children even when it doesn't. This is used for unit testing purposes. /// children even when it doesn't. This is used for unit testing purposes.
bool ForceChildren; bool ForceChildren = false;
/// Children DIEs. /// Children DIEs.
IntrusiveBackList<DIE> Children; IntrusiveBackList<DIE> Children;
@ -673,20 +680,19 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
/// DIEUnit which contains this DIE as its unit DIE. /// DIEUnit which contains this DIE as its unit DIE.
PointerUnion<DIE *, DIEUnit *> Owner; PointerUnion<DIE *, DIEUnit *> Owner;
DIE() = delete; explicit DIE(dwarf::Tag Tag) : Tag(Tag) {}
explicit DIE(dwarf::Tag Tag) : Offset(0), Size(0), Tag(Tag),
ForceChildren(false) {}
public: public:
DIE() = delete;
DIE(const DIE &RHS) = delete;
DIE(DIE &&RHS) = delete;
DIE &operator=(const DIE &RHS) = delete;
DIE &operator=(const DIE &&RHS) = delete;
static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) { static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
return new (Alloc) DIE(Tag); return new (Alloc) DIE(Tag);
} }
DIE(const DIE &RHS) = delete;
DIE(DIE &&RHS) = delete;
void operator=(const DIE &RHS) = delete;
void operator=(const DIE &&RHS) = delete;
// Accessors. // Accessors.
unsigned getAbbrevNumber() const { return AbbrevNumber; } unsigned getAbbrevNumber() const { return AbbrevNumber; }
dwarf::Tag getTag() const { return Tag; } dwarf::Tag getTag() const { return Tag; }
@ -696,10 +702,10 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
bool hasChildren() const { return ForceChildren || !Children.empty(); } bool hasChildren() const { return ForceChildren || !Children.empty(); }
void setForceChildren(bool B) { ForceChildren = B; } void setForceChildren(bool B) { ForceChildren = B; }
typedef IntrusiveBackList<DIE>::iterator child_iterator; using child_iterator = IntrusiveBackList<DIE>::iterator;
typedef IntrusiveBackList<DIE>::const_iterator const_child_iterator; using const_child_iterator = IntrusiveBackList<DIE>::const_iterator;
typedef iterator_range<child_iterator> child_range; using child_range = iterator_range<child_iterator>;
typedef iterator_range<const_child_iterator> const_child_range; using const_child_range = iterator_range<const_child_iterator>;
child_range children() { child_range children() {
return make_range(Children.begin(), Children.end()); return make_range(Children.begin(), Children.end());
@ -838,10 +844,10 @@ struct BasicDIEUnit final : DIEUnit {
/// DIELoc - Represents an expression location. /// DIELoc - Represents an expression location.
// //
class DIELoc : public DIEValueList { class DIELoc : public DIEValueList {
mutable unsigned Size; // Size in bytes excluding size header. mutable unsigned Size = 0; // Size in bytes excluding size header.
public: public:
DIELoc() : Size(0) {} DIELoc() = default;
/// ComputeSize - Calculate the size of the location expression. /// ComputeSize - Calculate the size of the location expression.
/// ///
@ -872,10 +878,10 @@ class DIELoc : public DIEValueList {
/// DIEBlock - Represents a block of values. /// DIEBlock - Represents a block of values.
// //
class DIEBlock : public DIEValueList { class DIEBlock : public DIEValueList {
mutable unsigned Size; // Size in bytes excluding size header. mutable unsigned Size = 0; // Size in bytes excluding size header.
public: public:
DIEBlock() : Size(0) {} DIEBlock() = default;
/// ComputeSize - Calculate the size of the location expression. /// ComputeSize - Calculate the size of the location expression.
/// ///

View File

@ -56,7 +56,7 @@ class FaultMaps {
HandlerOffsetExpr(HandlerOffset) {} HandlerOffsetExpr(HandlerOffset) {}
}; };
typedef std::vector<FaultInfo> FunctionFaultInfos; using FunctionFaultInfos = std::vector<FaultInfo>;
// We'd like to keep a stable iteration order for FunctionInfos to help // We'd like to keep a stable iteration order for FunctionInfos to help
// FileCheck based testing. // FileCheck based testing.
@ -78,20 +78,17 @@ class FaultMaps {
/// generated by the version of LLVM that includes it. No guarantees are made /// generated by the version of LLVM that includes it. No guarantees are made
/// with respect to forward or backward compatibility. /// with respect to forward or backward compatibility.
class FaultMapParser { class FaultMapParser {
typedef uint8_t FaultMapVersionType; using FaultMapVersionType = uint8_t;
static const size_t FaultMapVersionOffset = 0; using Reserved0Type = uint8_t;
using Reserved1Type = uint16_t;
using NumFunctionsType = uint32_t;
typedef uint8_t Reserved0Type; static const size_t FaultMapVersionOffset = 0;
static const size_t Reserved0Offset = static const size_t Reserved0Offset =
FaultMapVersionOffset + sizeof(FaultMapVersionType); FaultMapVersionOffset + sizeof(FaultMapVersionType);
typedef uint16_t Reserved1Type;
static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type); static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
typedef uint32_t NumFunctionsType;
static const size_t NumFunctionsOffset = static const size_t NumFunctionsOffset =
Reserved1Offset + sizeof(Reserved1Type); Reserved1Offset + sizeof(Reserved1Type);
static const size_t FunctionInfosOffset = static const size_t FunctionInfosOffset =
NumFunctionsOffset + sizeof(NumFunctionsType); NumFunctionsOffset + sizeof(NumFunctionsType);
@ -105,14 +102,13 @@ class FaultMapParser {
public: public:
class FunctionFaultInfoAccessor { class FunctionFaultInfoAccessor {
typedef uint32_t FaultKindType; using FaultKindType = uint32_t;
static const size_t FaultKindOffset = 0; using FaultingPCOffsetType = uint32_t;
using HandlerPCOffsetType = uint32_t;
typedef uint32_t FaultingPCOffsetType; static const size_t FaultKindOffset = 0;
static const size_t FaultingPCOffsetOffset = static const size_t FaultingPCOffsetOffset =
FaultKindOffset + sizeof(FaultKindType); FaultKindOffset + sizeof(FaultKindType);
typedef uint32_t HandlerPCOffsetType;
static const size_t HandlerPCOffsetOffset = static const size_t HandlerPCOffsetOffset =
FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType); FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
@ -140,20 +136,17 @@ class FaultMapParser {
}; };
class FunctionInfoAccessor { class FunctionInfoAccessor {
typedef uint64_t FunctionAddrType; using FunctionAddrType = uint64_t;
static const size_t FunctionAddrOffset = 0; using NumFaultingPCsType = uint32_t;
using ReservedType = uint32_t;
typedef uint32_t NumFaultingPCsType; static const size_t FunctionAddrOffset = 0;
static const size_t NumFaultingPCsOffset = static const size_t NumFaultingPCsOffset =
FunctionAddrOffset + sizeof(FunctionAddrType); FunctionAddrOffset + sizeof(FunctionAddrType);
typedef uint32_t ReservedType;
static const size_t ReservedOffset = static const size_t ReservedOffset =
NumFaultingPCsOffset + sizeof(NumFaultingPCsType); NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
static const size_t FunctionFaultInfosOffset = static const size_t FunctionFaultInfosOffset =
ReservedOffset + sizeof(ReservedType); ReservedOffset + sizeof(ReservedType);
static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset; static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
const uint8_t *P = nullptr; const uint8_t *P = nullptr;

View File

@ -0,0 +1,78 @@
//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// \file This file describes the interface of the Localizer pass.
/// This pass moves/duplicates constant-like instructions close to their uses.
/// Its primarily goal is to workaround the deficiencies of the fast register
/// allocator.
/// With GlobalISel constants are all materialized in the entry block of
/// a function. However, the fast allocator cannot rematerialize constants and
/// has a lot more live-ranges to deal with and will most likely end up
/// spilling a lot.
/// By pushing the constants close to their use, we only create small
/// live-ranges.
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
namespace llvm {
// Forward declarations.
class MachineRegisterInfo;
/// This pass implements the localization mechanism described at the
/// top of this file. One specificity of the implementation is that
/// it will materialize one and only one instance of a constant per
/// basic block, thus enabling reuse of that constant within that block.
/// Moreover, it only materializes constants in blocks where they
/// are used. PHI uses are considered happening at the end of the
/// related predecessor.
class Localizer : public MachineFunctionPass {
public:
static char ID;
private:
/// MRI contains all the register class/bank information that this
/// pass uses and updates.
MachineRegisterInfo *MRI;
/// Check whether or not \p MI needs to be moved close to its uses.
static bool shouldLocalize(const MachineInstr &MI);
/// Check if \p MOUse is used in the same basic block as \p Def.
/// If the use is in the same block, we say it is local.
/// When the use is not local, \p InsertMBB will contain the basic
/// block when to insert \p Def to have a local use.
static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
MachineBasicBlock *&InsertMBB);
/// Initialize the field members using \p MF.
void init(MachineFunction &MF);
public:
Localizer();
StringRef getPassName() const override { return "Localizer"; }
MachineFunctionProperties getRequiredProperties() const override {
return MachineFunctionProperties()
.set(MachineFunctionProperties::Property::IsSSA)
.set(MachineFunctionProperties::Property::Legalized)
.set(MachineFunctionProperties::Property::RegBankSelected);
}
bool runOnMachineFunction(MachineFunction &MF) override;
};
} // End namespace llvm.
#endif

View File

@ -264,6 +264,14 @@ namespace ISD {
/// optimized. /// optimized.
STRICT_FADD, STRICT_FSUB, STRICT_FMUL, STRICT_FDIV, STRICT_FREM, STRICT_FADD, STRICT_FSUB, STRICT_FMUL, STRICT_FDIV, STRICT_FREM,
/// Constrained versions of libm-equivalent floating point intrinsics.
/// These will be lowered to the equivalent non-constrained pseudo-op
/// (or expanded to the equivalent library call) before final selection.
/// They are used to limit optimizations while the DAG is being optimized.
STRICT_FSQRT, STRICT_FPOW, STRICT_FPOWI, STRICT_FSIN, STRICT_FCOS,
STRICT_FEXP, STRICT_FEXP2, STRICT_FLOG, STRICT_FLOG10, STRICT_FLOG2,
STRICT_FRINT, STRICT_FNEARBYINT,
/// FMA - Perform a * b + c with no intermediate rounding step. /// FMA - Perform a * b + c with no intermediate rounding step.
FMA, FMA,

View File

@ -1,4 +1,4 @@
//===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===// //===- llvm/CodeGen/LiveInterval.h - Interval representation ----*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -21,22 +21,30 @@
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H #ifndef LLVM_CODEGEN_LIVEINTERVAL_H
#define LLVM_CODEGEN_LIVEINTERVAL_H #define LLVM_CODEGEN_LIVEINTERVAL_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IntEqClasses.h" #include "llvm/ADT/IntEqClasses.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/Allocator.h" #include "llvm/Support/Allocator.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert> #include <cassert>
#include <climits> #include <cstddef>
#include <functional>
#include <memory>
#include <set> #include <set>
#include <tuple>
#include <utility>
namespace llvm { namespace llvm {
class CoalescerPair; class CoalescerPair;
class LiveIntervals; class LiveIntervals;
class MachineInstr;
class MachineRegisterInfo; class MachineRegisterInfo;
class TargetRegisterInfo;
class raw_ostream; class raw_ostream;
template <typename T, unsigned Small> class SmallPtrSet;
/// VNInfo - Value Number Information. /// VNInfo - Value Number Information.
/// This class holds information about a machine level values, including /// This class holds information about a machine level values, including
@ -44,7 +52,7 @@ namespace llvm {
/// ///
class VNInfo { class VNInfo {
public: public:
typedef BumpPtrAllocator Allocator; using Allocator = BumpPtrAllocator;
/// The ID number of this value. /// The ID number of this value.
unsigned id; unsigned id;
@ -53,14 +61,10 @@ namespace llvm {
SlotIndex def; SlotIndex def;
/// VNInfo constructor. /// VNInfo constructor.
VNInfo(unsigned i, SlotIndex d) VNInfo(unsigned i, SlotIndex d) : id(i), def(d) {}
: id(i), def(d)
{ }
/// VNInfo constructor, copies values from orig, except for the value number. /// VNInfo constructor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig) VNInfo(unsigned i, const VNInfo &orig) : id(i), def(orig.def) {}
: id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo. /// Copy from the parameter into this VNInfo.
void copyFrom(VNInfo &src) { void copyFrom(VNInfo &src) {
@ -152,16 +156,16 @@ namespace llvm {
/// segment with a new value number is used. /// segment with a new value number is used.
class LiveRange { class LiveRange {
public: public:
/// This represents a simple continuous liveness interval for a value. /// This represents a simple continuous liveness interval for a value.
/// The start point is inclusive, the end point exclusive. These intervals /// The start point is inclusive, the end point exclusive. These intervals
/// are rendered as [start,end). /// are rendered as [start,end).
struct Segment { struct Segment {
SlotIndex start; // Start point of the interval (inclusive) SlotIndex start; // Start point of the interval (inclusive)
SlotIndex end; // End point of the interval (exclusive) SlotIndex end; // End point of the interval (exclusive)
VNInfo *valno; // identifier for the value contained in this segment. VNInfo *valno = nullptr; // identifier for the value contained in this
// segment.
Segment() : valno(nullptr) {} Segment() = default;
Segment(SlotIndex S, SlotIndex E, VNInfo *V) Segment(SlotIndex S, SlotIndex E, VNInfo *V)
: start(S), end(E), valno(V) { : start(S), end(E), valno(V) {
@ -189,8 +193,8 @@ namespace llvm {
void dump() const; void dump() const;
}; };
typedef SmallVector<Segment, 2> Segments; using Segments = SmallVector<Segment, 2>;
typedef SmallVector<VNInfo *, 2> VNInfoList; using VNInfoList = SmallVector<VNInfo *, 2>;
Segments segments; // the liveness segments Segments segments; // the liveness segments
VNInfoList valnos; // value#'s VNInfoList valnos; // value#'s
@ -198,22 +202,24 @@ namespace llvm {
// The segment set is used temporarily to accelerate initial computation // The segment set is used temporarily to accelerate initial computation
// of live ranges of physical registers in computeRegUnitRange. // of live ranges of physical registers in computeRegUnitRange.
// After that the set is flushed to the segment vector and deleted. // After that the set is flushed to the segment vector and deleted.
typedef std::set<Segment> SegmentSet; using SegmentSet = std::set<Segment>;
std::unique_ptr<SegmentSet> segmentSet; std::unique_ptr<SegmentSet> segmentSet;
typedef Segments::iterator iterator; using iterator = Segments::iterator;
using const_iterator = Segments::const_iterator;
iterator begin() { return segments.begin(); } iterator begin() { return segments.begin(); }
iterator end() { return segments.end(); } iterator end() { return segments.end(); }
typedef Segments::const_iterator const_iterator;
const_iterator begin() const { return segments.begin(); } const_iterator begin() const { return segments.begin(); }
const_iterator end() const { return segments.end(); } const_iterator end() const { return segments.end(); }
typedef VNInfoList::iterator vni_iterator; using vni_iterator = VNInfoList::iterator;
using const_vni_iterator = VNInfoList::const_iterator;
vni_iterator vni_begin() { return valnos.begin(); } vni_iterator vni_begin() { return valnos.begin(); }
vni_iterator vni_end() { return valnos.end(); } vni_iterator vni_end() { return valnos.end(); }
typedef VNInfoList::const_iterator const_vni_iterator;
const_vni_iterator vni_begin() const { return valnos.begin(); } const_vni_iterator vni_begin() const { return valnos.begin(); }
const_vni_iterator vni_end() const { return valnos.end(); } const_vni_iterator vni_end() const { return valnos.end(); }
@ -631,40 +637,37 @@ namespace llvm {
/// or stack slot. /// or stack slot.
class LiveInterval : public LiveRange { class LiveInterval : public LiveRange {
public: public:
typedef LiveRange super; using super = LiveRange;
/// A live range for subregisters. The LaneMask specifies which parts of the /// A live range for subregisters. The LaneMask specifies which parts of the
/// super register are covered by the interval. /// super register are covered by the interval.
/// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()). /// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
class SubRange : public LiveRange { class SubRange : public LiveRange {
public: public:
SubRange *Next; SubRange *Next = nullptr;
LaneBitmask LaneMask; LaneBitmask LaneMask;
/// Constructs a new SubRange object. /// Constructs a new SubRange object.
SubRange(LaneBitmask LaneMask) SubRange(LaneBitmask LaneMask) : LaneMask(LaneMask) {}
: Next(nullptr), LaneMask(LaneMask) {
}
/// Constructs a new SubRange object by copying liveness from @p Other. /// Constructs a new SubRange object by copying liveness from @p Other.
SubRange(LaneBitmask LaneMask, const LiveRange &Other, SubRange(LaneBitmask LaneMask, const LiveRange &Other,
BumpPtrAllocator &Allocator) BumpPtrAllocator &Allocator)
: LiveRange(Other, Allocator), Next(nullptr), LaneMask(LaneMask) { : LiveRange(Other, Allocator), LaneMask(LaneMask) {}
}
void print(raw_ostream &OS) const; void print(raw_ostream &OS) const;
void dump() const; void dump() const;
}; };
private: private:
SubRange *SubRanges; ///< Single linked list of subregister live ranges. SubRange *SubRanges = nullptr; ///< Single linked list of subregister live
/// ranges.
public: public:
const unsigned reg; // the register or stack slot of this interval. const unsigned reg; // the register or stack slot of this interval.
float weight; // weight of this interval float weight; // weight of this interval
LiveInterval(unsigned Reg, float Weight) LiveInterval(unsigned Reg, float Weight) : reg(Reg), weight(Weight) {}
: SubRanges(nullptr), reg(Reg), weight(Weight) {}
~LiveInterval() { ~LiveInterval() {
clearSubRanges(); clearSubRanges();
@ -673,8 +676,10 @@ namespace llvm {
template<typename T> template<typename T>
class SingleLinkedListIterator { class SingleLinkedListIterator {
T *P; T *P;
public: public:
SingleLinkedListIterator<T>(T *P) : P(P) {} SingleLinkedListIterator<T>(T *P) : P(P) {}
SingleLinkedListIterator<T> &operator++() { SingleLinkedListIterator<T> &operator++() {
P = P->Next; P = P->Next;
return *this; return *this;
@ -698,7 +703,9 @@ namespace llvm {
} }
}; };
typedef SingleLinkedListIterator<SubRange> subrange_iterator; using subrange_iterator = SingleLinkedListIterator<SubRange>;
using const_subrange_iterator = SingleLinkedListIterator<const SubRange>;
subrange_iterator subrange_begin() { subrange_iterator subrange_begin() {
return subrange_iterator(SubRanges); return subrange_iterator(SubRanges);
} }
@ -706,7 +713,6 @@ namespace llvm {
return subrange_iterator(nullptr); return subrange_iterator(nullptr);
} }
typedef SingleLinkedListIterator<const SubRange> const_subrange_iterator;
const_subrange_iterator subrange_begin() const { const_subrange_iterator subrange_begin() const {
return const_subrange_iterator(SubRanges); return const_subrange_iterator(SubRanges);
} }
@ -759,12 +765,12 @@ namespace llvm {
/// isSpillable - Can this interval be spilled? /// isSpillable - Can this interval be spilled?
bool isSpillable() const { bool isSpillable() const {
return weight != llvm::huge_valf; return weight != huge_valf;
} }
/// markNotSpillable - Mark interval as not spillable /// markNotSpillable - Mark interval as not spillable
void markNotSpillable() { void markNotSpillable() {
weight = llvm::huge_valf; weight = huge_valf;
} }
/// For a given lane mask @p LaneMask, compute indexes at which the /// For a given lane mask @p LaneMask, compute indexes at which the
@ -931,5 +937,7 @@ namespace llvm {
void Distribute(LiveInterval &LI, LiveInterval *LIV[], void Distribute(LiveInterval &LI, LiveInterval *LIV[],
MachineRegisterInfo &MRI); MachineRegisterInfo &MRI);
}; };
}
#endif } // end namespace llvm
#endif // LLVM_CODEGEN_LIVEINTERVAL_H

View File

@ -1,4 +1,4 @@
//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===// //===- LiveIntervalAnalysis.h - Live Interval Analysis ----------*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -20,6 +20,7 @@
#ifndef LLVM_CODEGEN_LIVEINTERVALANALYSIS_H #ifndef LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
#define LLVM_CODEGEN_LIVEINTERVALANALYSIS_H #define LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/IndexedMap.h" #include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasAnalysis.h"
@ -27,27 +28,29 @@
#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Support/Allocator.h" #include "llvm/MC/LaneBitmask.h"
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include <cmath> #include <cassert>
#include <cstdint>
#include <utility>
namespace llvm { namespace llvm {
extern cl::opt<bool> UseSegmentSetForPhysRegs; extern cl::opt<bool> UseSegmentSetForPhysRegs;
class BitVector; class BitVector;
class BlockFrequency; class LiveRangeCalc;
class LiveRangeCalc; class MachineBlockFrequencyInfo;
class LiveVariables; class MachineDominatorTree;
class MachineDominatorTree; class MachineFunction;
class MachineLoopInfo; class MachineInstr;
class TargetRegisterInfo; class MachineRegisterInfo;
class MachineRegisterInfo; class raw_ostream;
class TargetInstrInfo; class TargetInstrInfo;
class TargetRegisterClass; class VirtRegMap;
class VirtRegMap;
class MachineBlockFrequencyInfo;
class LiveIntervals : public MachineFunctionPass { class LiveIntervals : public MachineFunctionPass {
MachineFunction* MF; MachineFunction* MF;
@ -56,8 +59,8 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
const TargetInstrInfo* TII; const TargetInstrInfo* TII;
AliasAnalysis *AA; AliasAnalysis *AA;
SlotIndexes* Indexes; SlotIndexes* Indexes;
MachineDominatorTree *DomTree; MachineDominatorTree *DomTree = nullptr;
LiveRangeCalc *LRCalc; LiveRangeCalc *LRCalc = nullptr;
/// Special pool allocator for VNInfo's (LiveInterval val#). /// Special pool allocator for VNInfo's (LiveInterval val#).
VNInfo::Allocator VNInfoAllocator; VNInfo::Allocator VNInfoAllocator;
@ -95,6 +98,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
public: public:
static char ID; static char ID;
LiveIntervals(); LiveIntervals();
~LiveIntervals() override; ~LiveIntervals() override;
@ -466,6 +470,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
class HMEditor; class HMEditor;
}; };
} // End llvm namespace
#endif } // end namespace llvm
#endif // LLVM_CODEGEN_LIVEINTERVALANALYSIS_H

View File

@ -26,12 +26,14 @@
namespace llvm { namespace llvm {
class raw_ostream;
class TargetRegisterInfo; class TargetRegisterInfo;
#ifndef NDEBUG #ifndef NDEBUG
// forward declaration // forward declaration
template <unsigned Element> class SparseBitVector; template <unsigned Element> class SparseBitVector;
typedef SparseBitVector<128> LiveVirtRegBitSet;
using LiveVirtRegBitSet = SparseBitVector<128>;
#endif #endif
/// Union of live intervals that are strong candidates for coalescing into a /// Union of live intervals that are strong candidates for coalescing into a
@ -42,19 +44,19 @@ class LiveIntervalUnion {
// A set of live virtual register segments that supports fast insertion, // A set of live virtual register segments that supports fast insertion,
// intersection, and removal. // intersection, and removal.
// Mapping SlotIndex intervals to virtual register numbers. // Mapping SlotIndex intervals to virtual register numbers.
typedef IntervalMap<SlotIndex, LiveInterval*> LiveSegments; using LiveSegments = IntervalMap<SlotIndex, LiveInterval*>;
public: public:
// SegmentIter can advance to the next segment ordered by starting position // SegmentIter can advance to the next segment ordered by starting position
// which may belong to a different live virtual register. We also must be able // which may belong to a different live virtual register. We also must be able
// to reach the current segment's containing virtual register. // to reach the current segment's containing virtual register.
typedef LiveSegments::iterator SegmentIter; using SegmentIter = LiveSegments::iterator;
/// Const version of SegmentIter. /// Const version of SegmentIter.
typedef LiveSegments::const_iterator ConstSegmentIter; using ConstSegmentIter = LiveSegments::const_iterator;
// LiveIntervalUnions share an external allocator. // LiveIntervalUnions share an external allocator.
typedef LiveSegments::Allocator Allocator; using Allocator = LiveSegments::Allocator;
private: private:
unsigned Tag = 0; // unique tag for current contents. unsigned Tag = 0; // unique tag for current contents.
@ -76,7 +78,7 @@ class LiveIntervalUnion {
SlotIndex startIndex() const { return Segments.start(); } SlotIndex startIndex() const { return Segments.start(); }
// Provide public access to the underlying map to allow overlap iteration. // Provide public access to the underlying map to allow overlap iteration.
typedef LiveSegments Map; using Map = LiveSegments;
const Map &getMap() const { return Segments; } const Map &getMap() const { return Segments; }
/// getTag - Return an opaque tag representing the current state of the union. /// getTag - Return an opaque tag representing the current state of the union.

View File

@ -7,23 +7,24 @@
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// This file implements the LivePhysRegs utility for tracking liveness of /// \file
// physical registers. This can be used for ad-hoc liveness tracking after /// This file implements the LivePhysRegs utility for tracking liveness of
// register allocation. You can start with the live-ins/live-outs at the /// physical registers. This can be used for ad-hoc liveness tracking after
// beginning/end of a block and update the information while walking the /// register allocation. You can start with the live-ins/live-outs at the
// instructions inside the block. This implementation tracks the liveness on a /// beginning/end of a block and update the information while walking the
// sub-register granularity. /// instructions inside the block. This implementation tracks the liveness on a
// /// sub-register granularity.
// We assume that the high bits of a physical super-register are not preserved ///
// unless the instruction has an implicit-use operand reading the super- /// We assume that the high bits of a physical super-register are not preserved
// register. /// unless the instruction has an implicit-use operand reading the super-
// /// register.
// X86 Example: ///
// %YMM0<def> = ... /// X86 Example:
// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0) /// %YMM0<def> = ...
// /// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
// %YMM0<def> = ... ///
// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive) /// %YMM0<def> = ...
/// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H #ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
@ -39,40 +40,42 @@
namespace llvm { namespace llvm {
class MachineInstr; class MachineInstr;
class MachineOperand;
class MachineRegisterInfo;
class raw_ostream;
/// \brief A set of live physical registers with functions to track liveness /// \brief A set of physical registers with utility functions to track liveness
/// when walking backward/forward through a basic block. /// when walking backward/forward through a basic block.
class LivePhysRegs { class LivePhysRegs {
const TargetRegisterInfo *TRI = nullptr; const TargetRegisterInfo *TRI = nullptr;
SparseSet<unsigned> LiveRegs; SparseSet<unsigned> LiveRegs;
public:
/// Constructs an unitialized set. init() needs to be called to initialize it.
LivePhysRegs() = default;
/// Constructs and initializes an empty set.
LivePhysRegs(const TargetRegisterInfo &TRI) : TRI(&TRI) {
LiveRegs.setUniverse(TRI.getNumRegs());
}
LivePhysRegs(const LivePhysRegs&) = delete; LivePhysRegs(const LivePhysRegs&) = delete;
LivePhysRegs &operator=(const LivePhysRegs&) = delete; LivePhysRegs &operator=(const LivePhysRegs&) = delete;
public: /// (re-)initializes and clears the set.
/// \brief Constructs a new empty LivePhysRegs set.
LivePhysRegs() = default;
/// \brief Constructs and initialize an empty LivePhysRegs set.
LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {
assert(TRI && "Invalid TargetRegisterInfo pointer.");
LiveRegs.setUniverse(TRI->getNumRegs());
}
/// \brief Clear and initialize the LivePhysRegs set.
void init(const TargetRegisterInfo &TRI) { void init(const TargetRegisterInfo &TRI) {
this->TRI = &TRI; this->TRI = &TRI;
LiveRegs.clear(); LiveRegs.clear();
LiveRegs.setUniverse(TRI.getNumRegs()); LiveRegs.setUniverse(TRI.getNumRegs());
} }
/// \brief Clears the LivePhysRegs set. /// Clears the set.
void clear() { LiveRegs.clear(); } void clear() { LiveRegs.clear(); }
/// \brief Returns true if the set is empty. /// Returns true if the set is empty.
bool empty() const { return LiveRegs.empty(); } bool empty() const { return LiveRegs.empty(); }
/// \brief Adds a physical register and all its sub-registers to the set. /// Adds a physical register and all its sub-registers to the set.
void addReg(unsigned Reg) { void addReg(unsigned Reg) {
assert(TRI && "LivePhysRegs is not initialized."); assert(TRI && "LivePhysRegs is not initialized.");
assert(Reg <= TRI->getNumRegs() && "Expected a physical register."); assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
@ -90,12 +93,13 @@ class LivePhysRegs {
LiveRegs.erase(*R); LiveRegs.erase(*R);
} }
/// \brief Removes physical registers clobbered by the regmask operand @p MO. /// Removes physical registers clobbered by the regmask operand \p MO.
void removeRegsInMask(const MachineOperand &MO, void removeRegsInMask(const MachineOperand &MO,
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers); SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
nullptr);
/// \brief Returns true if register @p Reg is contained in the set. This also /// \brief Returns true if register \p Reg is contained in the set. This also
/// works if only the super register of @p Reg has been defined, because /// works if only the super register of \p Reg has been defined, because
/// addReg() always adds all sub-registers to the set as well. /// addReg() always adds all sub-registers to the set as well.
/// Note: Returns false if just some sub registers are live, use available() /// Note: Returns false if just some sub registers are live, use available()
/// when searching a free register. /// when searching a free register.
@ -104,48 +108,48 @@ class LivePhysRegs {
/// Returns true if register \p Reg and no aliasing register is in the set. /// Returns true if register \p Reg and no aliasing register is in the set.
bool available(const MachineRegisterInfo &MRI, unsigned Reg) const; bool available(const MachineRegisterInfo &MRI, unsigned Reg) const;
/// \brief Simulates liveness when stepping backwards over an /// Simulates liveness when stepping backwards over an instruction(bundle).
/// instruction(bundle): Remove Defs, add uses. This is the recommended way of /// Remove Defs, add uses. This is the recommended way of calculating
/// calculating liveness. /// liveness.
void stepBackward(const MachineInstr &MI); void stepBackward(const MachineInstr &MI);
/// \brief Simulates liveness when stepping forward over an /// Simulates liveness when stepping forward over an instruction(bundle).
/// instruction(bundle): Remove killed-uses, add defs. This is the not /// Remove killed-uses, add defs. This is the not recommended way, because it
/// recommended way, because it depends on accurate kill flags. If possible /// depends on accurate kill flags. If possible use stepBackward() instead of
/// use stepBackward() instead of this function. /// this function. The clobbers set will be the list of registers either
/// The clobbers set will be the list of registers either defined or clobbered /// defined or clobbered by a regmask. The operand will identify whether this
/// by a regmask. The operand will identify whether this is a regmask or /// is a regmask or register operand.
/// register operand.
void stepForward(const MachineInstr &MI, void stepForward(const MachineInstr &MI,
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers); SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
/// Adds all live-in registers of basic block @p MBB. /// Adds all live-in registers of basic block \p MBB.
/// Live in registers are the registers in the blocks live-in list and the /// Live in registers are the registers in the blocks live-in list and the
/// pristine registers. /// pristine registers.
void addLiveIns(const MachineBasicBlock &MBB); void addLiveIns(const MachineBasicBlock &MBB);
/// Adds all live-out registers of basic block @p MBB. /// Adds all live-out registers of basic block \p MBB.
/// Live out registers are the union of the live-in registers of the successor /// Live out registers are the union of the live-in registers of the successor
/// blocks and pristine registers. Live out registers of the end block are the /// blocks and pristine registers. Live out registers of the end block are the
/// callee saved registers. /// callee saved registers.
void addLiveOuts(const MachineBasicBlock &MBB); void addLiveOuts(const MachineBasicBlock &MBB);
/// Like addLiveOuts() but does not add pristine registers/callee saved /// Adds all live-out registers of basic block \p MBB but skips pristine
/// registers. /// registers.
void addLiveOutsNoPristines(const MachineBasicBlock &MBB); void addLiveOutsNoPristines(const MachineBasicBlock &MBB);
typedef SparseSet<unsigned>::const_iterator const_iterator; using const_iterator = SparseSet<unsigned>::const_iterator;
const_iterator begin() const { return LiveRegs.begin(); } const_iterator begin() const { return LiveRegs.begin(); }
const_iterator end() const { return LiveRegs.end(); } const_iterator end() const { return LiveRegs.end(); }
/// \brief Prints the currently live registers to @p OS. /// Prints the currently live registers to \p OS.
void print(raw_ostream &OS) const; void print(raw_ostream &OS) const;
/// \brief Dumps the currently live registers to the debug output. /// Dumps the currently live registers to the debug output.
void dump() const; void dump() const;
private: private:
/// Adds live-in registers from basic block @p MBB, taking associated /// \brief Adds live-in registers from basic block \p MBB, taking associated
/// lane masks into consideration. /// lane masks into consideration.
void addBlockLiveIns(const MachineBasicBlock &MBB); void addBlockLiveIns(const MachineBasicBlock &MBB);
}; };
@ -155,11 +159,11 @@ inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
return OS; return OS;
} }
/// Compute the live-in list for \p MBB assuming all of its successors live-in /// \brief Computes the live-in list for \p MBB assuming all of its successors
/// lists are up-to-date. Uses the given LivePhysReg instance \p LiveRegs; This /// live-in lists are up-to-date. Uses the given LivePhysReg instance \p
/// is just here to avoid repeated heap allocations when calling this multiple /// LiveRegs; This is just here to avoid repeated heap allocations when calling
/// times in a pass. /// this multiple times in a pass.
void computeLiveIns(LivePhysRegs &LiveRegs, const TargetRegisterInfo &TRI, void computeLiveIns(LivePhysRegs &LiveRegs, const MachineRegisterInfo &MRI,
MachineBasicBlock &MBB); MachineBasicBlock &MBB);
} // end namespace llvm } // end namespace llvm

View File

@ -1,4 +1,4 @@
//===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===// //===- LiveRangeEdit.h - Basic tools for split and spill --------*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -19,19 +19,28 @@
#define LLVM_CODEGEN_LIVERANGEEDIT_H #define LLVM_CODEGEN_LIVERANGEEDIT_H
#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Target/TargetSubtargetInfo.h"
#include <cassert>
namespace llvm { namespace llvm {
class LiveIntervals; class LiveIntervals;
class MachineBlockFrequencyInfo; class MachineBlockFrequencyInfo;
class MachineInstr;
class MachineLoopInfo; class MachineLoopInfo;
class MachineOperand;
class TargetInstrInfo;
class TargetRegisterInfo;
class VirtRegMap; class VirtRegMap;
class LiveRangeEdit : private MachineRegisterInfo::Delegate { class LiveRangeEdit : private MachineRegisterInfo::Delegate {
@ -39,7 +48,10 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
/// Callback methods for LiveRangeEdit owners. /// Callback methods for LiveRangeEdit owners.
class Delegate { class Delegate {
virtual void anchor(); virtual void anchor();
public: public:
virtual ~Delegate() = default;
/// Called immediately before erasing a dead machine instruction. /// Called immediately before erasing a dead machine instruction.
virtual void LRE_WillEraseInstruction(MachineInstr *MI) {} virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
@ -53,8 +65,6 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
/// Called after cloning a virtual register. /// Called after cloning a virtual register.
/// This is used for new registers representing connected components of Old. /// This is used for new registers representing connected components of Old.
virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {} virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
virtual ~Delegate() {}
}; };
private: private:
@ -70,7 +80,7 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
const unsigned FirstNew; const unsigned FirstNew;
/// ScannedRemattable - true when remattable values have been identified. /// ScannedRemattable - true when remattable values have been identified.
bool ScannedRemattable; bool ScannedRemattable = false;
/// DeadRemats - The saved instructions which have already been dead after /// DeadRemats - The saved instructions which have already been dead after
/// rematerialization but not deleted yet -- to be done in postOptimization. /// rematerialization but not deleted yet -- to be done in postOptimization.
@ -78,11 +88,11 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
/// Remattable - Values defined by remattable instructions as identified by /// Remattable - Values defined by remattable instructions as identified by
/// tii.isTriviallyReMaterializable(). /// tii.isTriviallyReMaterializable().
SmallPtrSet<const VNInfo*,4> Remattable; SmallPtrSet<const VNInfo *, 4> Remattable;
/// Rematted - Values that were actually rematted, and so need to have their /// Rematted - Values that were actually rematted, and so need to have their
/// live range trimmed or entirely removed. /// live range trimmed or entirely removed.
SmallPtrSet<const VNInfo*,4> Rematted; SmallPtrSet<const VNInfo *, 4> Rematted;
/// scanRemattable - Identify the Parent values that may rematerialize. /// scanRemattable - Identify the Parent values that may rematerialize.
void scanRemattable(AliasAnalysis *aa); void scanRemattable(AliasAnalysis *aa);
@ -94,11 +104,11 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
/// foldAsLoad - If LI has a single use and a single def that can be folded as /// foldAsLoad - If LI has a single use and a single def that can be folded as
/// a load, eliminate the register by folding the def into the use. /// a load, eliminate the register by folding the def into the use.
bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead); bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr *> &Dead);
using ToShrinkSet = SetVector<LiveInterval *, SmallVector<LiveInterval *, 8>,
SmallPtrSet<LiveInterval *, 8>>;
typedef SetVector<LiveInterval*,
SmallVector<LiveInterval*, 8>,
SmallPtrSet<LiveInterval*, 8> > ToShrinkSet;
/// Helper for eliminateDeadDefs. /// Helper for eliminateDeadDefs.
void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink, void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
AliasAnalysis *AA); AliasAnalysis *AA);
@ -129,26 +139,26 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr) SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)
: Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis), : Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate), VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate),
FirstNew(newRegs.size()), ScannedRemattable(false), FirstNew(newRegs.size()), DeadRemats(deadRemats) {
DeadRemats(deadRemats) {
MRI.setDelegate(this); MRI.setDelegate(this);
} }
~LiveRangeEdit() override { MRI.resetDelegate(this); } ~LiveRangeEdit() override { MRI.resetDelegate(this); }
LiveInterval &getParent() const { LiveInterval &getParent() const {
assert(Parent && "No parent LiveInterval"); assert(Parent && "No parent LiveInterval");
return *Parent; return *Parent;
} }
unsigned getReg() const { return getParent().reg; } unsigned getReg() const { return getParent().reg; }
/// Iterator for accessing the new registers added by this edit. /// Iterator for accessing the new registers added by this edit.
typedef SmallVectorImpl<unsigned>::const_iterator iterator; using iterator = SmallVectorImpl<unsigned>::const_iterator;
iterator begin() const { return NewRegs.begin()+FirstNew; } iterator begin() const { return NewRegs.begin() + FirstNew; }
iterator end() const { return NewRegs.end(); } iterator end() const { return NewRegs.end(); }
unsigned size() const { return NewRegs.size()-FirstNew; } unsigned size() const { return NewRegs.size() - FirstNew; }
bool empty() const { return size() == 0; } bool empty() const { return size() == 0; }
unsigned get(unsigned idx) const { return NewRegs[idx+FirstNew]; } unsigned get(unsigned idx) const { return NewRegs[idx + FirstNew]; }
/// pop_back - It allows LiveRangeEdit users to drop new registers. /// pop_back - It allows LiveRangeEdit users to drop new registers.
/// The context is when an original def instruction of a register is /// The context is when an original def instruction of a register is
@ -176,26 +186,25 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
return createEmptyIntervalFrom(getReg()); return createEmptyIntervalFrom(getReg());
} }
unsigned create() { unsigned create() { return createFrom(getReg()); }
return createFrom(getReg());
}
/// anyRematerializable - Return true if any parent values may be /// anyRematerializable - Return true if any parent values may be
/// rematerializable. /// rematerializable.
/// This function must be called before any rematerialization is attempted. /// This function must be called before any rematerialization is attempted.
bool anyRematerializable(AliasAnalysis*); bool anyRematerializable(AliasAnalysis *);
/// checkRematerializable - Manually add VNI to the list of rematerializable /// checkRematerializable - Manually add VNI to the list of rematerializable
/// values if DefMI may be rematerializable. /// values if DefMI may be rematerializable.
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI, bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
AliasAnalysis*); AliasAnalysis *);
/// Remat - Information needed to rematerialize at a specific location. /// Remat - Information needed to rematerialize at a specific location.
struct Remat { struct Remat {
VNInfo *ParentVNI; // parent_'s value at the remat location. VNInfo *ParentVNI; // parent_'s value at the remat location.
MachineInstr *OrigMI; // Instruction defining OrigVNI. It contains the MachineInstr *OrigMI = nullptr; // Instruction defining OrigVNI. It contains
// real expr for remat. // the real expr for remat.
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI) {}
}; };
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at /// canRematerializeAt - Determine if ParentVNI can be rematerialized at
@ -209,10 +218,8 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
/// liveness is not updated. /// liveness is not updated.
/// Return the SlotIndex of the new instruction. /// Return the SlotIndex of the new instruction.
SlotIndex rematerializeAt(MachineBasicBlock &MBB, SlotIndex rematerializeAt(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, MachineBasicBlock::iterator MI, unsigned DestReg,
unsigned DestReg, const Remat &RM, const TargetRegisterInfo &,
const Remat &RM,
const TargetRegisterInfo&,
bool Late = false); bool Late = false);
/// markRematerialized - explicitly mark a value as rematerialized after doing /// markRematerialized - explicitly mark a value as rematerialized after doing
@ -248,11 +255,10 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
/// calculateRegClassAndHint - Recompute register class and hint for each new /// calculateRegClassAndHint - Recompute register class and hint for each new
/// register. /// register.
void calculateRegClassAndHint(MachineFunction&, void calculateRegClassAndHint(MachineFunction &, const MachineLoopInfo &,
const MachineLoopInfo&, const MachineBlockFrequencyInfo &);
const MachineBlockFrequencyInfo&);
}; };
} } // end namespace llvm
#endif #endif // LLVM_CODEGEN_LIVERANGEEDIT_H

View File

@ -1,4 +1,4 @@
//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===// //===- LiveStackAnalysis.h - Live Stack Slot Analysis -----------*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -18,13 +18,16 @@
#include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/Allocator.h" #include "llvm/Pass.h"
#include "llvm/Target/TargetRegisterInfo.h" #include <cassert>
#include <map> #include <map>
#include <unordered_map> #include <unordered_map>
namespace llvm { namespace llvm {
class TargetRegisterClass;
class TargetRegisterInfo;
class LiveStacks : public MachineFunctionPass { class LiveStacks : public MachineFunctionPass {
const TargetRegisterInfo *TRI; const TargetRegisterInfo *TRI;
@ -33,8 +36,7 @@ class LiveStacks : public MachineFunctionPass {
VNInfo::Allocator VNInfoAllocator; VNInfo::Allocator VNInfoAllocator;
/// S2IMap - Stack slot indices to live interval mapping. /// S2IMap - Stack slot indices to live interval mapping.
/// using SS2IntervalMap = std::unordered_map<int, LiveInterval>;
typedef std::unordered_map<int, LiveInterval> SS2IntervalMap;
SS2IntervalMap S2IMap; SS2IntervalMap S2IMap;
/// S2RCMap - Stack slot indices to register class mapping. /// S2RCMap - Stack slot indices to register class mapping.
@ -42,12 +44,14 @@ class LiveStacks : public MachineFunctionPass {
public: public:
static char ID; // Pass identification, replacement for typeid static char ID; // Pass identification, replacement for typeid
LiveStacks() : MachineFunctionPass(ID) { LiveStacks() : MachineFunctionPass(ID) {
initializeLiveStacksPass(*PassRegistry::getPassRegistry()); initializeLiveStacksPass(*PassRegistry::getPassRegistry());
} }
typedef SS2IntervalMap::iterator iterator; using iterator = SS2IntervalMap::iterator;
typedef SS2IntervalMap::const_iterator const_iterator; using const_iterator = SS2IntervalMap::const_iterator;
const_iterator begin() const { return S2IMap.begin(); } const_iterator begin() const { return S2IMap.begin(); }
const_iterator end() const { return S2IMap.end(); } const_iterator end() const { return S2IMap.end(); }
iterator begin() { return S2IMap.begin(); } iterator begin() { return S2IMap.begin(); }
@ -93,6 +97,7 @@ class LiveStacks : public MachineFunctionPass {
/// print - Implement the dump method. /// print - Implement the dump method.
void print(raw_ostream &O, const Module * = nullptr) const override; void print(raw_ostream &O, const Module * = nullptr) const override;
}; };
}
#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */ } // end namespace llvm
#endif // LLVM_CODEGEN_LIVESTACK_ANALYSIS_H

View File

@ -1,4 +1,4 @@
//===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===// //===- llvm/CodeGen/MachineBasicBlock.h -------------------------*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -15,41 +15,50 @@
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H #define LLVM_CODEGEN_MACHINEBASICBLOCK_H
#include "llvm/ADT/GraphTraits.h" #include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h" #include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/simple_ilist.h"
#include "llvm/CodeGen/MachineInstrBundleIterator.h" #include "llvm/CodeGen/MachineInstrBundleIterator.h"
#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstr.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/Support/BranchProbability.h" #include "llvm/Support/BranchProbability.h"
#include "llvm/MC/LaneBitmask.h" #include "llvm/MC/LaneBitmask.h"
#include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/DataTypes.h" #include <cassert>
#include <cstdint>
#include <functional> #include <functional>
#include <iterator>
#include <string>
#include <vector>
namespace llvm { namespace llvm {
class Pass;
class BasicBlock; class BasicBlock;
class MachineFunction; class MachineFunction;
class MCSymbol; class MCSymbol;
class MIPrinter; class ModuleSlotTracker;
class Pass;
class SlotIndexes; class SlotIndexes;
class StringRef; class StringRef;
class raw_ostream; class raw_ostream;
class MachineBranchProbabilityInfo; class TargetRegisterClass;
class TargetRegisterInfo;
template <> struct ilist_traits<MachineInstr> { template <> struct ilist_traits<MachineInstr> {
private: private:
friend class MachineBasicBlock; // Set by the owning MachineBasicBlock. friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.
MachineBasicBlock *Parent; MachineBasicBlock *Parent;
typedef simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator using instr_iterator =
instr_iterator; simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator;
public: public:
void addNodeToList(MachineInstr *N); void addNodeToList(MachineInstr *N);
void removeNodeFromList(MachineInstr *N); void removeNodeFromList(MachineInstr *N);
void transferNodesFromList(ilist_traits &OldList, instr_iterator First, void transferNodesFromList(ilist_traits &OldList, instr_iterator First,
instr_iterator Last); instr_iterator Last);
void deleteNode(MachineInstr *MI); void deleteNode(MachineInstr *MI);
}; };
@ -69,7 +78,8 @@ class MachineBasicBlock
}; };
private: private:
typedef ilist<MachineInstr, ilist_sentinel_tracking<true>> Instructions; using Instructions = ilist<MachineInstr, ilist_sentinel_tracking<true>>;
Instructions Insts; Instructions Insts;
const BasicBlock *BB; const BasicBlock *BB;
int Number; int Number;
@ -83,12 +93,12 @@ class MachineBasicBlock
/// same order as Successors, or it is empty if we don't use it (disable /// same order as Successors, or it is empty if we don't use it (disable
/// optimization). /// optimization).
std::vector<BranchProbability> Probs; std::vector<BranchProbability> Probs;
typedef std::vector<BranchProbability>::iterator probability_iterator; using probability_iterator = std::vector<BranchProbability>::iterator;
typedef std::vector<BranchProbability>::const_iterator using const_probability_iterator =
const_probability_iterator; std::vector<BranchProbability>::const_iterator;
/// Keep track of the physical registers that are livein of the basicblock. /// Keep track of the physical registers that are livein of the basicblock.
typedef std::vector<RegisterMaskPair> LiveInVector; using LiveInVector = std::vector<RegisterMaskPair>;
LiveInVector LiveIns; LiveInVector LiveIns;
/// Alignment of the basic block. Zero if the basic block does not need to be /// Alignment of the basic block. Zero if the basic block does not need to be
@ -113,7 +123,7 @@ class MachineBasicBlock
mutable MCSymbol *CachedMCSymbol = nullptr; mutable MCSymbol *CachedMCSymbol = nullptr;
// Intrusive list support // Intrusive list support
MachineBasicBlock() {} MachineBasicBlock() = default;
explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB); explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);
@ -145,16 +155,16 @@ class MachineBasicBlock
const MachineFunction *getParent() const { return xParent; } const MachineFunction *getParent() const { return xParent; }
MachineFunction *getParent() { return xParent; } MachineFunction *getParent() { return xParent; }
typedef Instructions::iterator instr_iterator; using instr_iterator = Instructions::iterator;
typedef Instructions::const_iterator const_instr_iterator; using const_instr_iterator = Instructions::const_iterator;
typedef Instructions::reverse_iterator reverse_instr_iterator; using reverse_instr_iterator = Instructions::reverse_iterator;
typedef Instructions::const_reverse_iterator const_reverse_instr_iterator; using const_reverse_instr_iterator = Instructions::const_reverse_iterator;
typedef MachineInstrBundleIterator<MachineInstr> iterator; using iterator = MachineInstrBundleIterator<MachineInstr>;
typedef MachineInstrBundleIterator<const MachineInstr> const_iterator; using const_iterator = MachineInstrBundleIterator<const MachineInstr>;
typedef MachineInstrBundleIterator<MachineInstr, true> reverse_iterator; using reverse_iterator = MachineInstrBundleIterator<MachineInstr, true>;
typedef MachineInstrBundleIterator<const MachineInstr, true> using const_reverse_iterator =
const_reverse_iterator; MachineInstrBundleIterator<const MachineInstr, true>;
unsigned size() const { return (unsigned)Insts.size(); } unsigned size() const { return (unsigned)Insts.size(); }
bool empty() const { return Insts.empty(); } bool empty() const { return Insts.empty(); }
@ -178,8 +188,8 @@ class MachineBasicBlock
reverse_instr_iterator instr_rend () { return Insts.rend(); } reverse_instr_iterator instr_rend () { return Insts.rend(); }
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); } const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
typedef iterator_range<instr_iterator> instr_range; using instr_range = iterator_range<instr_iterator>;
typedef iterator_range<const_instr_iterator> const_instr_range; using const_instr_range = iterator_range<const_instr_iterator>;
instr_range instrs() { return instr_range(instr_begin(), instr_end()); } instr_range instrs() { return instr_range(instr_begin(), instr_end()); }
const_instr_range instrs() const { const_instr_range instrs() const {
return const_instr_range(instr_begin(), instr_end()); return const_instr_range(instr_begin(), instr_end());
@ -213,18 +223,18 @@ class MachineBasicBlock
} }
// Machine-CFG iterators // Machine-CFG iterators
typedef std::vector<MachineBasicBlock *>::iterator pred_iterator; using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator; using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
typedef std::vector<MachineBasicBlock *>::iterator succ_iterator; using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator; using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
typedef std::vector<MachineBasicBlock *>::reverse_iterator using pred_reverse_iterator =
pred_reverse_iterator; std::vector<MachineBasicBlock *>::reverse_iterator;
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator using const_pred_reverse_iterator =
const_pred_reverse_iterator; std::vector<MachineBasicBlock *>::const_reverse_iterator;
typedef std::vector<MachineBasicBlock *>::reverse_iterator using succ_reverse_iterator =
succ_reverse_iterator; std::vector<MachineBasicBlock *>::reverse_iterator;
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator using const_succ_reverse_iterator =
const_succ_reverse_iterator; std::vector<MachineBasicBlock *>::const_reverse_iterator;
pred_iterator pred_begin() { return Predecessors.begin(); } pred_iterator pred_begin() { return Predecessors.begin(); }
const_pred_iterator pred_begin() const { return Predecessors.begin(); } const_pred_iterator pred_begin() const { return Predecessors.begin(); }
pred_iterator pred_end() { return Predecessors.end(); } pred_iterator pred_end() { return Predecessors.end(); }
@ -307,7 +317,7 @@ class MachineBasicBlock
// Iteration support for live in sets. These sets are kept in sorted // Iteration support for live in sets. These sets are kept in sorted
// order by their register number. // order by their register number.
typedef LiveInVector::const_iterator livein_iterator; using livein_iterator = LiveInVector::const_iterator;
#ifndef NDEBUG #ifndef NDEBUG
/// Unlike livein_begin, this method does not check that the liveness /// Unlike livein_begin, this method does not check that the liveness
/// information is accurate. Still for debug purposes it may be useful /// information is accurate. Still for debug purposes it may be useful
@ -455,7 +465,6 @@ class MachineBasicBlock
/// other block. /// other block.
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const; bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
/// Return the fallthrough block if the block can implicitly /// Return the fallthrough block if the block can implicitly
/// transfer control to the block after it by falling off the end of /// transfer control to the block after it by falling off the end of
/// it. This should return null if it can reach the block after /// it. This should return null if it can reach the block after
@ -695,7 +704,7 @@ class MachineBasicBlock
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
unsigned Reg, unsigned Reg,
const_iterator Before, const_iterator Before,
unsigned Neighborhood=10) const; unsigned Neighborhood = 10) const;
// Debugging methods. // Debugging methods.
void dump() const; void dump() const;
@ -714,7 +723,6 @@ class MachineBasicBlock
/// Return the MCSymbol for this basic block. /// Return the MCSymbol for this basic block.
MCSymbol *getSymbol() const; MCSymbol *getSymbol() const;
private: private:
/// Return probability iterator corresponding to the I successor iterator. /// Return probability iterator corresponding to the I successor iterator.
probability_iterator getProbabilityIterator(succ_iterator I); probability_iterator getProbabilityIterator(succ_iterator I);
@ -764,8 +772,8 @@ struct MBB2NumberFunctor :
// //
template <> struct GraphTraits<MachineBasicBlock *> { template <> struct GraphTraits<MachineBasicBlock *> {
typedef MachineBasicBlock *NodeRef; using NodeRef = MachineBasicBlock *;
typedef MachineBasicBlock::succ_iterator ChildIteratorType; using ChildIteratorType = MachineBasicBlock::succ_iterator;
static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; } static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); } static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
@ -773,8 +781,8 @@ template <> struct GraphTraits<MachineBasicBlock *> {
}; };
template <> struct GraphTraits<const MachineBasicBlock *> { template <> struct GraphTraits<const MachineBasicBlock *> {
typedef const MachineBasicBlock *NodeRef; using NodeRef = const MachineBasicBlock *;
typedef MachineBasicBlock::const_succ_iterator ChildIteratorType; using ChildIteratorType = MachineBasicBlock::const_succ_iterator;
static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; } static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); } static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
@ -787,28 +795,30 @@ template <> struct GraphTraits<const MachineBasicBlock *> {
// to be when traversing the predecessor edges of a MBB // to be when traversing the predecessor edges of a MBB
// instead of the successor edges. // instead of the successor edges.
// //
template <> struct GraphTraits<Inverse<MachineBasicBlock*> > { template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
typedef MachineBasicBlock *NodeRef; using NodeRef = MachineBasicBlock *;
typedef MachineBasicBlock::pred_iterator ChildIteratorType; using ChildIteratorType = MachineBasicBlock::pred_iterator;
static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) { static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
return G.Graph; return G.Graph;
} }
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); } static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); } static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
}; };
template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > { template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
typedef const MachineBasicBlock *NodeRef; using NodeRef = const MachineBasicBlock *;
typedef MachineBasicBlock::const_pred_iterator ChildIteratorType; using ChildIteratorType = MachineBasicBlock::const_pred_iterator;
static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) { static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
return G.Graph; return G.Graph;
} }
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); } static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); } static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
}; };
/// MachineInstrSpan provides an interface to get an iteration range /// MachineInstrSpan provides an interface to get an iteration range
/// containing the instruction it was initialized with, along with all /// containing the instruction it was initialized with, along with all
/// those instructions inserted prior to or following that instruction /// those instructions inserted prior to or following that instruction
@ -816,6 +826,7 @@ template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
class MachineInstrSpan { class MachineInstrSpan {
MachineBasicBlock &MBB; MachineBasicBlock &MBB;
MachineBasicBlock::iterator I, B, E; MachineBasicBlock::iterator I, B, E;
public: public:
MachineInstrSpan(MachineBasicBlock::iterator I) MachineInstrSpan(MachineBasicBlock::iterator I)
: MBB(*I->getParent()), : MBB(*I->getParent()),
@ -854,6 +865,6 @@ inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
return It; return It;
} }
} // End llvm namespace } // end namespace llvm
#endif #endif // LLVM_CODEGEN_MACHINEBASICBLOCK_H

View File

@ -1,4 +1,4 @@
//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -*- C++ -*-----===// //===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -----*- C++ -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -17,26 +17,28 @@
#include "llvm/ADT/Optional.h" #include "llvm/ADT/Optional.h"
#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/BlockFrequency.h" #include "llvm/Support/BlockFrequency.h"
#include <climits> #include <cstdint>
#include <memory>
namespace llvm { namespace llvm {
template <class BlockT> class BlockFrequencyInfoImpl;
class MachineBasicBlock; class MachineBasicBlock;
class MachineBranchProbabilityInfo; class MachineBranchProbabilityInfo;
class MachineFunction;
class MachineLoopInfo; class MachineLoopInfo;
template <class BlockT> class BlockFrequencyInfoImpl; class raw_ostream;
/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation /// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
/// to estimate machine basic block frequencies. /// to estimate machine basic block frequencies.
class MachineBlockFrequencyInfo : public MachineFunctionPass { class MachineBlockFrequencyInfo : public MachineFunctionPass {
typedef BlockFrequencyInfoImpl<MachineBasicBlock> ImplType; using ImplType = BlockFrequencyInfoImpl<MachineBasicBlock>;
std::unique_ptr<ImplType> MBFI; std::unique_ptr<ImplType> MBFI;
public: public:
static char ID; static char ID;
MachineBlockFrequencyInfo(); MachineBlockFrequencyInfo();
~MachineBlockFrequencyInfo() override; ~MachineBlockFrequencyInfo() override;
void getAnalysisUsage(AnalysisUsage &AU) const override; void getAnalysisUsage(AnalysisUsage &AU) const override;
@ -74,9 +76,8 @@ class MachineBlockFrequencyInfo : public MachineFunctionPass {
const MachineBasicBlock *MBB) const; const MachineBasicBlock *MBB) const;
uint64_t getEntryFreq() const; uint64_t getEntryFreq() const;
}; };
} } // end namespace llvm
#endif #endif // LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H

View File

@ -11,23 +11,28 @@
#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H #define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
#include "llvm/Analysis/DominanceFrontier.h" #include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Analysis/DominanceFrontierImpl.h"
#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/Support/GenericDomTree.h"
#include <vector>
namespace llvm { namespace llvm {
class MachineDominanceFrontier : public MachineFunctionPass { class MachineDominanceFrontier : public MachineFunctionPass {
ForwardDominanceFrontierBase<MachineBasicBlock> Base; ForwardDominanceFrontierBase<MachineBasicBlock> Base;
public:
typedef DominatorTreeBase<MachineBasicBlock> DomTreeT;
typedef DomTreeNodeBase<MachineBasicBlock> DomTreeNodeT;
typedef DominanceFrontierBase<MachineBasicBlock>::DomSetType DomSetType;
typedef DominanceFrontierBase<MachineBasicBlock>::iterator iterator;
typedef DominanceFrontierBase<MachineBasicBlock>::const_iterator const_iterator;
void operator=(const MachineDominanceFrontier &) = delete; public:
using DomTreeT = DominatorTreeBase<MachineBasicBlock>;
using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
using DomSetType = DominanceFrontierBase<MachineBasicBlock>::DomSetType;
using iterator = DominanceFrontierBase<MachineBasicBlock>::iterator;
using const_iterator =
DominanceFrontierBase<MachineBasicBlock>::const_iterator;
MachineDominanceFrontier(const MachineDominanceFrontier &) = delete; MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
MachineDominanceFrontier &
operator=(const MachineDominanceFrontier &) = delete;
static char ID; static char ID;
@ -104,6 +109,6 @@ class MachineDominanceFrontier : public MachineFunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override; void getAnalysisUsage(AnalysisUsage &AU) const override;
}; };
} } // end namespace llvm
#endif #endif // LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H

View File

@ -1,4 +1,4 @@
//=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==// //==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
@ -16,12 +16,15 @@
#define LLVM_CODEGEN_MACHINEDOMINATORS_H #define LLVM_CODEGEN_MACHINEDOMINATORS_H
#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Support/GenericDomTree.h" #include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/GenericDomTreeConstruction.h" #include "llvm/Support/GenericDomTreeConstruction.h"
#include <cassert>
#include <memory> #include <memory>
#include <vector>
namespace llvm { namespace llvm {
@ -33,7 +36,7 @@ inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB
extern template class DomTreeNodeBase<MachineBasicBlock>; extern template class DomTreeNodeBase<MachineBasicBlock>;
extern template class DominatorTreeBase<MachineBasicBlock>; extern template class DominatorTreeBase<MachineBasicBlock>;
typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode; using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
//===------------------------------------- //===-------------------------------------
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to /// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
@ -52,6 +55,7 @@ class MachineDominatorTree : public MachineFunctionPass {
/// The splitting of a critical edge is local and thus, it is possible /// The splitting of a critical edge is local and thus, it is possible
/// to apply several of those changes at the same time. /// to apply several of those changes at the same time.
mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit; mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
/// \brief Remember all the basic blocks that are inserted during /// \brief Remember all the basic blocks that are inserted during
/// edge splitting. /// edge splitting.
/// Invariant: NewBBs == all the basic blocks contained in the NewBB /// Invariant: NewBBs == all the basic blocks contained in the NewBB
@ -259,8 +263,8 @@ class MachineDominatorTree : public MachineFunctionPass {
template <class Node, class ChildIterator> template <class Node, class ChildIterator>
struct MachineDomTreeGraphTraitsBase { struct MachineDomTreeGraphTraitsBase {
typedef Node *NodeRef; using NodeRef = Node *;
typedef ChildIterator ChildIteratorType; using ChildIteratorType = ChildIterator;
static NodeRef getEntryNode(NodeRef N) { return N; } static NodeRef getEntryNode(NodeRef N) { return N; }
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); } static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
@ -287,6 +291,6 @@ template <> struct GraphTraits<MachineDominatorTree*>
} }
}; };
} } // end namespace llvm
#endif #endif // LLVM_CODEGEN_MACHINEDOMINATORS_H

View File

@ -826,20 +826,12 @@ class MachineInstr
getOperand(0).getSubReg() == getOperand(1).getSubReg(); getOperand(0).getSubReg() == getOperand(1).getSubReg();
} }
/// Return true if this is a transient instruction that is /// Return true if this instruction doesn't produce any output in the form of
/// either very likely to be eliminated during register allocation (such as /// executable instructions.
/// copy-like instructions), or if this instruction doesn't have an bool isMetaInstruction() const {
/// execution-time cost. switch (getOpcode()) {
bool isTransient() const { default:
switch(getOpcode()) { return false;
default: return false;
// Copy-like instructions are usually eliminated during register allocation.
case TargetOpcode::PHI:
case TargetOpcode::COPY:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
// Pseudo-instructions that don't produce any real output.
case TargetOpcode::IMPLICIT_DEF: case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL: case TargetOpcode::KILL:
case TargetOpcode::CFI_INSTRUCTION: case TargetOpcode::CFI_INSTRUCTION:
@ -850,6 +842,23 @@ class MachineInstr
} }
} }
/// Return true if this is a transient instruction that is either very likely
/// to be eliminated during register allocation (such as copy-like
/// instructions), or if this instruction doesn't have an execution-time cost.
bool isTransient() const {
switch (getOpcode()) {
default:
return isMetaInstruction();
// Copy-like instructions are usually eliminated during register allocation.
case TargetOpcode::PHI:
case TargetOpcode::COPY:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
return true;
}
}
/// Return the number of instructions inside the MI bundle, excluding the /// Return the number of instructions inside the MI bundle, excluding the
/// bundle header. /// bundle header.
/// ///

View File

@ -642,6 +642,11 @@ class MachineRegisterInfo {
/// ///
void setRegBank(unsigned Reg, const RegisterBank &RegBank); void setRegBank(unsigned Reg, const RegisterBank &RegBank);
void setRegClassOrRegBank(unsigned Reg,
const RegClassOrRegBank &RCOrRB){
VRegInfo[Reg].first = RCOrRB;
}
/// constrainRegClass - Constrain the register class of the specified virtual /// constrainRegClass - Constrain the register class of the specified virtual
/// register to be a common subclass of RC and the current register class, /// register to be a common subclass of RC and the current register class,
/// but only if the new class has at least MinNumRegs registers. Return the /// but only if the new class has at least MinNumRegs registers. Return the

View File

@ -26,7 +26,7 @@ namespace llvm {
/// Machine Value Type. Every type that is supported natively by some /// Machine Value Type. Every type that is supported natively by some
/// processor targeted by LLVM occurs here. This means that any legal value /// processor targeted by LLVM occurs here. This means that any legal value
/// type can be represented by an MVT. /// type can be represented by an MVT.
class MVT { class MVT {
public: public:
enum SimpleValueType : uint8_t { enum SimpleValueType : uint8_t {
// Simple value types that aren't explicitly part of this enumeration // Simple value types that aren't explicitly part of this enumeration

View File

@ -52,14 +52,14 @@ class TargetRegisterInfo;
/// These are the different kinds of scheduling dependencies. /// These are the different kinds of scheduling dependencies.
enum Kind { enum Kind {
Data, ///< Regular data dependence (aka true-dependence). Data, ///< Regular data dependence (aka true-dependence).
Anti, ///< A register anti-dependedence (aka WAR). Anti, ///< A register anti-dependence (aka WAR).
Output, ///< A register output-dependence (aka WAW). Output, ///< A register output-dependence (aka WAW).
Order ///< Any other ordering dependency. Order ///< Any other ordering dependency.
}; };
// Strong dependencies must be respected by the scheduler. Artificial // Strong dependencies must be respected by the scheduler. Artificial
// dependencies may be removed only if they are redundant with another // dependencies may be removed only if they are redundant with another
// strong depedence. // strong dependence.
// //
// Weak dependencies may be violated by the scheduling strategy, but only if // Weak dependencies may be violated by the scheduling strategy, but only if
// the strategy can prove it is correct to do so. // the strategy can prove it is correct to do so.
@ -342,7 +342,7 @@ class TargetRegisterInfo;
/// BoundaryNodes can have DAG edges, including Data edges, but they do not /// BoundaryNodes can have DAG edges, including Data edges, but they do not
/// correspond to schedulable entities (e.g. instructions) and do not have a /// correspond to schedulable entities (e.g. instructions) and do not have a
/// valid ID. Consequently, always check for boundary nodes before accessing /// valid ID. Consequently, always check for boundary nodes before accessing
/// an assoicative data structure keyed on node ID. /// an associative data structure keyed on node ID.
bool isBoundaryNode() const { return NodeNum == BoundaryID; } bool isBoundaryNode() const { return NodeNum == BoundaryID; }
/// Assigns the representative SDNode for this SUnit. This may be used /// Assigns the representative SDNode for this SUnit. This may be used

View File

@ -18,6 +18,7 @@
#include "llvm/ADT/MapVector.h" #include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SparseMultiSet.h" #include "llvm/ADT/SparseMultiSet.h"
#include "llvm/ADT/SparseSet.h" #include "llvm/ADT/SparseSet.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/ScheduleDAG.h" #include "llvm/CodeGen/ScheduleDAG.h"
#include "llvm/CodeGen/TargetSchedule.h" #include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/Support/Compiler.h" #include "llvm/Support/Compiler.h"
@ -224,7 +225,7 @@ namespace llvm {
MachineInstr *FirstDbgValue; MachineInstr *FirstDbgValue;
/// Set of live physical registers for updating kill flags. /// Set of live physical registers for updating kill flags.
BitVector LiveRegs; LivePhysRegs LiveRegs;
public: public:
explicit ScheduleDAGInstrs(MachineFunction &mf, explicit ScheduleDAGInstrs(MachineFunction &mf,
@ -311,7 +312,7 @@ namespace llvm {
std::string getDAGName() const override; std::string getDAGName() const override;
/// Fixes register kill flags that scheduling has made invalid. /// Fixes register kill flags that scheduling has made invalid.
void fixupKills(MachineBasicBlock *MBB); void fixupKills(MachineBasicBlock &MBB);
protected: protected:
void initSUnits(); void initSUnits();

View File

@ -1070,6 +1070,11 @@ class SelectionDAG {
SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
ArrayRef<SDValue> Ops); ArrayRef<SDValue> Ops);
/// Mutate the specified strict FP node to its non-strict equivalent,
/// unlinking the node from its chain and dropping the metadata arguments.
/// The node must be a strict FP node.
SDNode *mutateStrictFPToFP(SDNode *Node);
/// These are used for target selectors to create a new node /// These are used for target selectors to create a new node
/// with specified return type(s), MachineInstr opcode, and operands. /// with specified return type(s), MachineInstr opcode, and operands.
/// ///

View File

@ -612,6 +612,32 @@ class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
SDNodeBits.IsMemIntrinsic; SDNodeBits.IsMemIntrinsic;
} }
/// Test if this node is a strict floating point pseudo-op.
bool isStrictFPOpcode() {
switch (NodeType) {
default:
return false;
case ISD::STRICT_FADD:
case ISD::STRICT_FSUB:
case ISD::STRICT_FMUL:
case ISD::STRICT_FDIV:
case ISD::STRICT_FREM:
case ISD::STRICT_FSQRT:
case ISD::STRICT_FPOW:
case ISD::STRICT_FPOWI:
case ISD::STRICT_FSIN:
case ISD::STRICT_FCOS:
case ISD::STRICT_FEXP:
case ISD::STRICT_FEXP2:
case ISD::STRICT_FLOG:
case ISD::STRICT_FLOG10:
case ISD::STRICT_FLOG2:
case ISD::STRICT_FRINT:
case ISD::STRICT_FNEARBYINT:
return true;
}
}
/// Test if this node has a post-isel opcode, directly /// Test if this node has a post-isel opcode, directly
/// corresponding to a MachineInstr opcode. /// corresponding to a MachineInstr opcode.
bool isMachineOpcode() const { return NodeType < 0; } bool isMachineOpcode() const { return NodeType < 0; }

View File

@ -14,6 +14,7 @@
#include "llvm/ADT/Optional.h" #include "llvm/ADT/Optional.h"
#include "llvm/DebugInfo/CodeView/CodeViewError.h" #include "llvm/DebugInfo/CodeView/CodeViewError.h"
#include "llvm/DebugInfo/CodeView/RecordSerialization.h" #include "llvm/DebugInfo/CodeView/RecordSerialization.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/BinaryStreamReader.h" #include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/BinaryStreamRef.h" #include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h" #include "llvm/Support/Endian.h"
@ -50,6 +51,13 @@ template <typename Kind> class CVRecord {
Optional<uint32_t> Hash; Optional<uint32_t> Hash;
}; };
template <typename Kind> struct RemappedRecord {
explicit RemappedRecord(const CVRecord<Kind> &R) : OriginalRecord(R) {}
CVRecord<Kind> OriginalRecord;
SmallVector<std::pair<uint32_t, TypeIndex>, 8> Mappings;
};
} // end namespace codeview } // end namespace codeview
template <typename Kind> template <typename Kind>

View File

@ -46,6 +46,7 @@ Error visitMemberRecordStream(ArrayRef<uint8_t> FieldList,
TypeVisitorCallbacks &Callbacks); TypeVisitorCallbacks &Callbacks);
Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks, Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source = VDS_BytesPresent,
TypeServerHandler *TS = nullptr); TypeServerHandler *TS = nullptr);
Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks, Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS = nullptr); TypeServerHandler *TS = nullptr);

View File

@ -40,6 +40,17 @@ class TypeDeserializer : public TypeVisitorCallbacks {
public: public:
TypeDeserializer() = default; TypeDeserializer() = default;
template <typename T> static Error deserializeAs(CVType &CVT, T &Record) {
MappingInfo I(CVT.content());
if (auto EC = I.Mapping.visitTypeBegin(CVT))
return EC;
if (auto EC = I.Mapping.visitKnownRecord(CVT, Record))
return EC;
if (auto EC = I.Mapping.visitTypeEnd(CVT))
return EC;
return Error::success();
}
Error visitTypeBegin(CVType &Record) override { Error visitTypeBegin(CVType &Record) override {
assert(!Mapping && "Already in a type mapping!"); assert(!Mapping && "Already in a type mapping!");
Mapping = llvm::make_unique<MappingInfo>(Record.content()); Mapping = llvm::make_unique<MappingInfo>(Record.content());

View File

@ -0,0 +1,33 @@
//===- TypeIndexDiscovery.h -------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Error.h"
namespace llvm {
namespace codeview {
enum class TiRefKind { TypeRef, IndexRef };
struct TiReference {
TiRefKind Kind;
uint32_t Offset;
uint32_t Count;
};
void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
SmallVectorImpl<TiReference> &Refs);
void discoverTypeIndices(const CVType &Type,
SmallVectorImpl<TiReference> &Refs);
}
}
#endif

View File

@ -35,6 +35,7 @@ using support::ulittle16_t;
using support::ulittle32_t; using support::ulittle32_t;
typedef CVRecord<TypeLeafKind> CVType; typedef CVRecord<TypeLeafKind> CVType;
typedef RemappedRecord<TypeLeafKind> RemappedType;
struct CVMemberRecord { struct CVMemberRecord {
TypeLeafKind Kind; TypeLeafKind Kind;
@ -278,15 +279,9 @@ class PointerRecord : public TypeRecord {
Attrs(calcAttrs(PK, PM, PO, Size)) {} Attrs(calcAttrs(PK, PM, PO, Size)) {}
PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM, PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
PointerOptions PO, uint8_t Size, PointerOptions PO, uint8_t Size, const MemberPointerInfo &MPI)
const MemberPointerInfo &Member)
: TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType), : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(Member) {} Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(MPI) {}
PointerRecord(TypeIndex ReferentType, uint32_t Attrs,
const MemberPointerInfo &Member)
: TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
Attrs(Attrs), MemberInfo(Member) {}
TypeIndex getReferentType() const { return ReferentType; } TypeIndex getReferentType() const { return ReferentType; }

View File

@ -17,7 +17,6 @@
#include "llvm/ADT/Optional.h" #include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h" #include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h" #include "llvm/Support/Error.h"
@ -26,6 +25,8 @@ namespace llvm {
namespace codeview { namespace codeview {
class TypeHasher;
class TypeSerializer : public TypeVisitorCallbacks { class TypeSerializer : public TypeVisitorCallbacks {
struct SubRecord { struct SubRecord {
SubRecord(TypeLeafKind K, uint32_t S) : Kind(K), Size(S) {} SubRecord(TypeLeafKind K, uint32_t S) : Kind(K), Size(S) {}
@ -45,14 +46,13 @@ class TypeSerializer : public TypeVisitorCallbacks {
} }
}; };
typedef SmallVector<MutableArrayRef<uint8_t>, 2> RecordList; typedef SmallVector<MutableArrayRef<uint8_t>, 2> MutableRecordList;
static constexpr uint8_t ContinuationLength = 8; static constexpr uint8_t ContinuationLength = 8;
BumpPtrAllocator &RecordStorage; BumpPtrAllocator &RecordStorage;
RecordSegment CurrentSegment; RecordSegment CurrentSegment;
RecordList FieldListSegments; MutableRecordList FieldListSegments;
TypeIndex LastTypeIndex;
Optional<TypeLeafKind> TypeKind; Optional<TypeLeafKind> TypeKind;
Optional<TypeLeafKind> MemberKind; Optional<TypeLeafKind> MemberKind;
std::vector<uint8_t> RecordBuffer; std::vector<uint8_t> RecordBuffer;
@ -60,28 +60,35 @@ class TypeSerializer : public TypeVisitorCallbacks {
BinaryStreamWriter Writer; BinaryStreamWriter Writer;
TypeRecordMapping Mapping; TypeRecordMapping Mapping;
RecordList SeenRecords; /// Private type record hashing implementation details are handled here.
StringMap<TypeIndex> HashedRecords; std::unique_ptr<TypeHasher> Hasher;
/// Contains a list of all records indexed by TypeIndex.toArrayIndex().
SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;
/// Temporary storage that we use to copy a record's data while re-writing
/// its type indices.
SmallVector<uint8_t, 256> RemapStorage;
TypeIndex nextTypeIndex() const;
bool isInFieldList() const; bool isInFieldList() const;
TypeIndex calcNextTypeIndex() const;
TypeIndex incrementTypeIndex();
MutableArrayRef<uint8_t> getCurrentSubRecordData(); MutableArrayRef<uint8_t> getCurrentSubRecordData();
MutableArrayRef<uint8_t> getCurrentRecordData(); MutableArrayRef<uint8_t> getCurrentRecordData();
Error writeRecordPrefix(TypeLeafKind Kind); Error writeRecordPrefix(TypeLeafKind Kind);
TypeIndex insertRecordBytesPrivate(MutableArrayRef<uint8_t> Record);
TypeIndex insertRecordBytesWithCopy(CVType &Record,
MutableArrayRef<uint8_t> Data);
Expected<MutableArrayRef<uint8_t>> Expected<MutableArrayRef<uint8_t>>
addPadding(MutableArrayRef<uint8_t> Record); addPadding(MutableArrayRef<uint8_t> Record);
public: public:
explicit TypeSerializer(BumpPtrAllocator &Storage); explicit TypeSerializer(BumpPtrAllocator &Storage, bool Hash = true);
~TypeSerializer();
ArrayRef<MutableArrayRef<uint8_t>> records() const; void reset();
TypeIndex getLastTypeIndex() const;
TypeIndex insertRecordBytes(MutableArrayRef<uint8_t> Record); ArrayRef<ArrayRef<uint8_t>> records() const;
TypeIndex insertRecordBytes(ArrayRef<uint8_t> &Record);
TypeIndex insertRecord(const RemappedType &Record);
Expected<TypeIndex> visitTypeEndGetIndex(CVType &Record); Expected<TypeIndex> visitTypeEndGetIndex(CVType &Record);
Error visitTypeBegin(CVType &Record) override; Error visitTypeBegin(CVType &Record) override;

View File

@ -22,12 +22,75 @@ class TypeIndex;
class TypeServerHandler; class TypeServerHandler;
class TypeTableBuilder; class TypeTableBuilder;
/// Merges one type stream into another. Returns true on success. /// \brief Merge one set of type records into another. This method assumes
Error mergeTypeStreams(TypeTableBuilder &DestIdStream, /// that all records are type records, and there are no Id records present.
TypeTableBuilder &DestTypeStream, ///
/// \param Dest The table to store the re-written type records into.
///
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
/// type stream, that contains the index of the corresponding type record
/// in the destination stream.
///
/// \param Handler (optional) If non-null, an interface that gets invoked
/// to handle type server records.
///
/// \param Types The collection of types to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
/// appropriate error code.
Error mergeTypeRecords(TypeTableBuilder &Dest,
SmallVectorImpl<TypeIndex> &SourceToDest, SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler, const CVTypeArray &Types); TypeServerHandler *Handler, const CVTypeArray &Types);
/// \brief Merge one set of id records into another. This method assumes
/// that all records are id records, and there are no Type records present.
/// However, since Id records can refer back to Type records, this method
/// assumes that the referenced type records have also been merged into
/// another type stream (for example using the above method), and accepts
/// the mapping from source to dest for that stream so that it can re-write
/// the type record mappings accordingly.
///
/// \param Dest The table to store the re-written id records into.
///
/// \param Types The mapping to use for the type records that these id
/// records refer to.
///
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
/// id stream, that contains the index of the corresponding id record
/// in the destination stream.
///
/// \param Ids The collection of id records to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
/// appropriate error code.
Error mergeIdRecords(TypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
SmallVectorImpl<TypeIndex> &SourceToDest,
const CVTypeArray &Ids);
/// \brief Merge a unified set of type and id records, splitting them into
/// separate output streams.
///
/// \param DestIds The table to store the re-written id records into.
///
/// \param DestTypes the table to store the re-written type records into.
///
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
/// id stream, that contains the index of the corresponding id record
/// in the destination stream.
///
/// \param Handler (optional) If non-null, an interface that gets invoked
/// to handle type server records.
///
/// \param IdsAndTypes The collection of id records to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
/// appropriate error code.
Error mergeTypeAndIdRecords(TypeTableBuilder &DestIds,
TypeTableBuilder &DestTypes,
SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler,
const CVTypeArray &IdsAndTypes);
} // end namespace codeview } // end namespace codeview
} // end namespace llvm } // end namespace llvm

View File

@ -64,10 +64,14 @@ class TypeTableBuilder {
return *ExpectedIndex; return *ExpectedIndex;
} }
TypeIndex writeSerializedRecord(MutableArrayRef<uint8_t> Record) { TypeIndex writeSerializedRecord(ArrayRef<uint8_t> Record) {
return Serializer.insertRecordBytes(Record); return Serializer.insertRecordBytes(Record);
} }
TypeIndex writeSerializedRecord(const RemappedType &Record) {
return Serializer.insertRecord(Record);
}
template <typename TFunc> void ForEachRecord(TFunc Func) { template <typename TFunc> void ForEachRecord(TFunc Func) {
uint32_t Index = TypeIndex::FirstNonSimpleIndex; uint32_t Index = TypeIndex::FirstNonSimpleIndex;
@ -77,23 +81,24 @@ class TypeTableBuilder {
} }
} }
ArrayRef<MutableArrayRef<uint8_t>> records() const { ArrayRef<ArrayRef<uint8_t>> records() const { return Serializer.records(); }
return Serializer.records();
}
}; };
class FieldListRecordBuilder { class FieldListRecordBuilder {
TypeTableBuilder &TypeTable; TypeTableBuilder &TypeTable;
BumpPtrAllocator Allocator;
TypeSerializer TempSerializer; TypeSerializer TempSerializer;
CVType Type; CVType Type;
public: public:
explicit FieldListRecordBuilder(TypeTableBuilder &TypeTable) explicit FieldListRecordBuilder(TypeTableBuilder &TypeTable)
: TypeTable(TypeTable), TempSerializer(TypeTable.getAllocator()) { : TypeTable(TypeTable), TempSerializer(Allocator, false) {
Type.Type = TypeLeafKind::LF_FIELDLIST; Type.Type = TypeLeafKind::LF_FIELDLIST;
} }
void begin() { void begin() {
TempSerializer.reset();
if (auto EC = TempSerializer.visitTypeBegin(Type)) if (auto EC = TempSerializer.visitTypeBegin(Type))
consumeError(std::move(EC)); consumeError(std::move(EC));
} }
@ -109,23 +114,19 @@ class FieldListRecordBuilder {
consumeError(std::move(EC)); consumeError(std::move(EC));
} }
TypeIndex end() { TypeIndex end(bool Write) {
TypeIndex Index;
if (auto EC = TempSerializer.visitTypeEnd(Type)) { if (auto EC = TempSerializer.visitTypeEnd(Type)) {
consumeError(std::move(EC)); consumeError(std::move(EC));
return TypeIndex(); return TypeIndex();
} }
TypeIndex Index; if (Write) {
for (auto Record : TempSerializer.records()) { for (auto Record : TempSerializer.records())
Index = TypeTable.writeSerializedRecord(Record); Index = TypeTable.writeSerializedRecord(Record);
} }
return Index;
}
/// Stop building the record. return Index;
void reset() {
if (auto EC = TempSerializer.visitTypeEnd(Type))
consumeError(std::move(EC));
} }
}; };

View File

@ -18,7 +18,7 @@ namespace codeview {
class TypeTableCollection : public TypeCollection { class TypeTableCollection : public TypeCollection {
public: public:
explicit TypeTableCollection(ArrayRef<MutableArrayRef<uint8_t>> Records); explicit TypeTableCollection(ArrayRef<ArrayRef<uint8_t>> Records);
Optional<TypeIndex> getFirst() override; Optional<TypeIndex> getFirst() override;
Optional<TypeIndex> getNext(TypeIndex Prev) override; Optional<TypeIndex> getNext(TypeIndex Prev) override;
@ -33,7 +33,7 @@ class TypeTableCollection : public TypeCollection {
bool hasCapacityFor(TypeIndex Index) const; bool hasCapacityFor(TypeIndex Index) const;
void ensureTypeExists(TypeIndex Index); void ensureTypeExists(TypeIndex Index);
ArrayRef<MutableArrayRef<uint8_t>> Records; ArrayRef<ArrayRef<uint8_t>> Records;
TypeDatabase Database; TypeDatabase Database;
}; };
} }

View File

@ -46,7 +46,8 @@ class raw_ostream;
/// Reads a value from data extractor and applies a relocation to the result if /// Reads a value from data extractor and applies a relocation to the result if
/// one exists for the given offset. /// one exists for the given offset.
uint64_t getRelocatedValue(const DataExtractor &Data, uint32_t Size, uint64_t getRelocatedValue(const DataExtractor &Data, uint32_t Size,
uint32_t *Off, const RelocAddrMap *Relocs); uint32_t *Off, const RelocAddrMap *Relocs,
uint64_t *SecNdx = nullptr);
/// DWARFContext /// DWARFContext
/// This data structure is the top level entity that deals with dwarf debug /// This data structure is the top level entity that deals with dwarf debug
@ -71,6 +72,14 @@ class DWARFContext : public DIContext {
std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO; std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
std::unique_ptr<DWARFDebugLocDWO> LocDWO; std::unique_ptr<DWARFDebugLocDWO> LocDWO;
struct DWOFile {
object::OwningBinary<object::ObjectFile> File;
std::unique_ptr<DWARFContext> Context;
};
StringMap<std::weak_ptr<DWOFile>> DWOFiles;
std::weak_ptr<DWOFile> DWP;
bool CheckedForDWP = false;
/// Read compile units from the debug_info section (if necessary) /// Read compile units from the debug_info section (if necessary)
/// and store them in CUs. /// and store them in CUs.
void parseCompileUnits(); void parseCompileUnits();
@ -165,6 +174,8 @@ class DWARFContext : public DIContext {
return DWOCUs[index].get(); return DWOCUs[index].get();
} }
DWARFCompileUnit *getDWOCompileUnitForHash(uint64_t Hash);
/// Get a DIE given an exact offset. /// Get a DIE given an exact offset.
DWARFDie getDIEForOffset(uint32_t Offset); DWARFDie getDIEForOffset(uint32_t Offset);
@ -206,6 +217,7 @@ class DWARFContext : public DIContext {
DIInliningInfo getInliningInfoForAddress(uint64_t Address, DIInliningInfo getInliningInfoForAddress(uint64_t Address,
DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override; DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
virtual StringRef getFileName() const = 0;
virtual bool isLittleEndian() const = 0; virtual bool isLittleEndian() const = 0;
virtual uint8_t getAddressSize() const = 0; virtual uint8_t getAddressSize() const = 0;
virtual const DWARFSection &getInfoSection() = 0; virtual const DWARFSection &getInfoSection() = 0;
@ -248,6 +260,8 @@ class DWARFContext : public DIContext {
return version == 2 || version == 3 || version == 4 || version == 5; return version == 2 || version == 3 || version == 4 || version == 5;
} }
std::shared_ptr<DWARFContext> getDWOContext(StringRef AbsolutePath);
private: private:
/// Return the compile unit that includes an offset (relative to .debug_info). /// Return the compile unit that includes an offset (relative to .debug_info).
DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset); DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset);
@ -263,6 +277,7 @@ class DWARFContext : public DIContext {
class DWARFContextInMemory : public DWARFContext { class DWARFContextInMemory : public DWARFContext {
virtual void anchor(); virtual void anchor();
StringRef FileName;
bool IsLittleEndian; bool IsLittleEndian;
uint8_t AddressSize; uint8_t AddressSize;
DWARFSection InfoSection; DWARFSection InfoSection;
@ -316,6 +331,7 @@ class DWARFContextInMemory : public DWARFContext {
uint8_t AddrSize, uint8_t AddrSize,
bool isLittleEndian = sys::IsLittleEndianHost); bool isLittleEndian = sys::IsLittleEndianHost);
StringRef getFileName() const override { return FileName; }
bool isLittleEndian() const override { return IsLittleEndian; } bool isLittleEndian() const override { return IsLittleEndian; }
uint8_t getAddressSize() const override { return AddressSize; } uint8_t getAddressSize() const override { return AddressSize; }
const DWARFSection &getInfoSection() override { return InfoSection; } const DWARFSection &getInfoSection() override { return InfoSection; }

View File

@ -25,6 +25,7 @@ class raw_ostream;
struct DWARFAddressRange { struct DWARFAddressRange {
uint64_t LowPC; uint64_t LowPC;
uint64_t HighPC; uint64_t HighPC;
uint64_t SectionIndex;
}; };
/// DWARFAddressRangesVector - represents a set of absolute address ranges. /// DWARFAddressRangesVector - represents a set of absolute address ranges.
@ -44,6 +45,8 @@ class DWARFDebugRangeList {
/// address past the end of the address range. The ending address must /// address past the end of the address range. The ending address must
/// be greater than or equal to the beginning address. /// be greater than or equal to the beginning address.
uint64_t EndAddress; uint64_t EndAddress;
/// A section index this range belongs to.
uint64_t SectionIndex;
/// The end of any given range list is marked by an end of list entry, /// The end of any given range list is marked by an end of list entry,
/// which consists of a 0 for the beginning address offset /// which consists of a 0 for the beginning address offset

View File

@ -195,7 +195,8 @@ class DWARFDie {
/// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU. /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
/// Returns true if both attributes are present. /// Returns true if both attributes are present.
bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC) const; bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC,
uint64_t &SectionIndex) const;
/// Get the address ranges for this DIE. /// Get the address ranges for this DIE.
/// ///

View File

@ -47,6 +47,7 @@ class DWARFFormValue {
const char *cstr; const char *cstr;
}; };
const uint8_t *data = nullptr; const uint8_t *data = nullptr;
uint64_t SectionIndex; /// Section index for reference forms.
}; };
dwarf::Form Form; /// Form for this value. dwarf::Form Form; /// Form for this value.
@ -58,6 +59,7 @@ class DWARFFormValue {
dwarf::Form getForm() const { return Form; } dwarf::Form getForm() const { return Form; }
uint64_t getRawUValue() const { return Value.uval; } uint64_t getRawUValue() const { return Value.uval; }
uint64_t getSectionIndex() const { return Value.SectionIndex; }
void setForm(dwarf::Form F) { Form = F; } void setForm(dwarf::Form F) { Form = F; }
void setUValue(uint64_t V) { Value.uval = V; } void setUValue(uint64_t V) { Value.uval = V; }
void setSValue(int64_t V) { Value.sval = V; } void setSValue(int64_t V) { Value.sval = V; }

View File

@ -16,7 +16,10 @@
namespace llvm { namespace llvm {
/// RelocAddrEntry contains relocated value and section index.
/// Section index is -1LL if relocation points to absolute symbol.
struct RelocAddrEntry { struct RelocAddrEntry {
uint64_t SectionIndex;
uint64_t Value; uint64_t Value;
}; };

View File

@ -143,17 +143,7 @@ class DWARFUnit {
typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator> typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>
die_iterator_range; die_iterator_range;
class DWOHolder { std::shared_ptr<DWARFUnit> DWO;
object::OwningBinary<object::ObjectFile> DWOFile;
std::unique_ptr<DWARFContext> DWOContext;
DWARFUnit *DWOU = nullptr;
public:
DWOHolder(StringRef DWOPath, uint64_t DWOId);
DWARFUnit *getUnit() const { return DWOU; }
};
std::unique_ptr<DWOHolder> DWO;
const DWARFUnitIndex::Entry *IndexEntry; const DWARFUnitIndex::Entry *IndexEntry;

View File

@ -43,8 +43,8 @@ class MappedBlockStream : public BinaryStream {
friend class WritableMappedBlockStream; friend class WritableMappedBlockStream;
public: public:
static std::unique_ptr<MappedBlockStream> static std::unique_ptr<MappedBlockStream>
createStream(uint32_t BlockSize, uint32_t NumBlocks, createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
const MSFStreamLayout &Layout, BinaryStreamRef MsfData); BinaryStreamRef MsfData);
static std::unique_ptr<MappedBlockStream> static std::unique_ptr<MappedBlockStream>
createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData, createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
@ -74,12 +74,11 @@ class MappedBlockStream : public BinaryStream {
void invalidateCache(); void invalidateCache();
uint32_t getBlockSize() const { return BlockSize; } uint32_t getBlockSize() const { return BlockSize; }
uint32_t getNumBlocks() const { return NumBlocks; } uint32_t getNumBlocks() const { return StreamLayout.Blocks.size(); }
uint32_t getStreamLength() const { return StreamLayout.Length; } uint32_t getStreamLength() const { return StreamLayout.Length; }
protected: protected:
MappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks, MappedBlockStream(uint32_t BlockSize, const MSFStreamLayout &StreamLayout,
const MSFStreamLayout &StreamLayout,
BinaryStreamRef MsfData); BinaryStreamRef MsfData);
private: private:
@ -91,7 +90,6 @@ class MappedBlockStream : public BinaryStream {
ArrayRef<uint8_t> &Buffer); ArrayRef<uint8_t> &Buffer);
const uint32_t BlockSize; const uint32_t BlockSize;
const uint32_t NumBlocks;
const MSFStreamLayout StreamLayout; const MSFStreamLayout StreamLayout;
BinaryStreamRef MsfData; BinaryStreamRef MsfData;
@ -103,8 +101,8 @@ class MappedBlockStream : public BinaryStream {
class WritableMappedBlockStream : public WritableBinaryStream { class WritableMappedBlockStream : public WritableBinaryStream {
public: public:
static std::unique_ptr<WritableMappedBlockStream> static std::unique_ptr<WritableMappedBlockStream>
createStream(uint32_t BlockSize, uint32_t NumBlocks, createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
const MSFStreamLayout &Layout, WritableBinaryStreamRef MsfData); WritableBinaryStreamRef MsfData);
static std::unique_ptr<WritableMappedBlockStream> static std::unique_ptr<WritableMappedBlockStream>
createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData, createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
@ -139,7 +137,7 @@ class WritableMappedBlockStream : public WritableBinaryStream {
uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); } uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); }
protected: protected:
WritableMappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks, WritableMappedBlockStream(uint32_t BlockSize,
const MSFStreamLayout &StreamLayout, const MSFStreamLayout &StreamLayout,
WritableBinaryStreamRef MsfData); WritableBinaryStreamRef MsfData);

View File

@ -82,6 +82,7 @@ class DbiStreamBuilder {
Error finalize(); Error finalize();
uint32_t calculateModiSubstreamSize() const; uint32_t calculateModiSubstreamSize() const;
uint32_t calculateNamesOffset() const;
uint32_t calculateSectionContribsStreamSize() const; uint32_t calculateSectionContribsStreamSize() const;
uint32_t calculateSectionMapStreamSize() const; uint32_t calculateSectionMapStreamSize() const;
uint32_t calculateFileInfoSubstreamSize() const; uint32_t calculateFileInfoSubstreamSize() const;

View File

@ -11,8 +11,7 @@
#define LLVM_DEBUGINFO_PDB_PDBTYPESERVERHANDLER_H #define LLVM_DEBUGINFO_PDB_PDBTYPESERVERHANDLER_H
#include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h" #include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeServerHandler.h" #include "llvm/DebugInfo/CodeView/TypeServerHandler.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h" #include "llvm/DebugInfo/PDB/Native/NativeSession.h"
@ -39,7 +38,7 @@ class PDBTypeServerHandler : public codeview::TypeServerHandler {
bool RevisitAlways; bool RevisitAlways;
std::unique_ptr<NativeSession> Session; std::unique_ptr<NativeSession> Session;
SmallVector<SmallString<64>, 4> SearchPaths; StringSet<> SearchPaths;
}; };
} }
} }

View File

@ -21,6 +21,9 @@
#include "llvm/Support/Error.h" #include "llvm/Support/Error.h"
namespace llvm { namespace llvm {
namespace codeview {
class LazyRandomTypeCollection;
}
namespace msf { namespace msf {
class MappedBlockStream; class MappedBlockStream;
} }
@ -53,12 +56,16 @@ class TpiStream {
codeview::CVTypeRange types(bool *HadError) const; codeview::CVTypeRange types(bool *HadError) const;
const codeview::CVTypeArray &typeArray() const { return TypeRecords; } const codeview::CVTypeArray &typeArray() const { return TypeRecords; }
codeview::LazyRandomTypeCollection &typeCollection() { return *Types; }
Error commit(); Error commit();
private: private:
const PDBFile &Pdb; const PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream; std::unique_ptr<msf::MappedBlockStream> Stream;
std::unique_ptr<codeview::LazyRandomTypeCollection> Types;
codeview::CVTypeArray TypeRecords; codeview::CVTypeArray TypeRecords;
std::unique_ptr<BinaryStream> HashStream; std::unique_ptr<BinaryStream> HashStream;

View File

@ -322,7 +322,7 @@ template <> struct DenseMapInfo<AttributeSet> {
/// the AttributeList object. The function attributes are at index /// the AttributeList object. The function attributes are at index
/// `AttributeList::FunctionIndex', the return value is at index /// `AttributeList::FunctionIndex', the return value is at index
/// `AttributeList::ReturnIndex', and the attributes for the parameters start at /// `AttributeList::ReturnIndex', and the attributes for the parameters start at
/// index `1'. /// index `AttributeList::FirstArgIndex'.
class AttributeList { class AttributeList {
public: public:
enum AttrIndex : unsigned { enum AttrIndex : unsigned {
@ -347,8 +347,8 @@ class AttributeList {
/// \brief Create an AttributeList with the specified parameters in it. /// \brief Create an AttributeList with the specified parameters in it.
static AttributeList get(LLVMContext &C, static AttributeList get(LLVMContext &C,
ArrayRef<std::pair<unsigned, Attribute>> Attrs); ArrayRef<std::pair<unsigned, Attribute>> Attrs);
static AttributeList static AttributeList get(LLVMContext &C,
get(LLVMContext &C, ArrayRef<std::pair<unsigned, AttributeSet>> Attrs); ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
/// \brief Create an AttributeList from attribute sets for a function, its /// \brief Create an AttributeList from attribute sets for a function, its
/// return value, and all of its arguments. /// return value, and all of its arguments.
@ -356,13 +356,11 @@ class AttributeList {
AttributeSet RetAttrs, AttributeSet RetAttrs,
ArrayRef<AttributeSet> ArgAttrs); ArrayRef<AttributeSet> ArgAttrs);
static AttributeList
getImpl(LLVMContext &C,
ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
private: private:
explicit AttributeList(AttributeListImpl *LI) : pImpl(LI) {} explicit AttributeList(AttributeListImpl *LI) : pImpl(LI) {}
static AttributeList getImpl(LLVMContext &C, ArrayRef<AttributeSet> AttrSets);
public: public:
AttributeList() = default; AttributeList() = default;
@ -521,39 +519,31 @@ class AttributeList {
/// \brief Return the attributes at the index as a string. /// \brief Return the attributes at the index as a string.
std::string getAsString(unsigned Index, bool InAttrGrp = false) const; std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
using iterator = ArrayRef<Attribute>::iterator; //===--------------------------------------------------------------------===//
// AttributeList Introspection
//===--------------------------------------------------------------------===//
iterator begin(unsigned Slot) const; typedef const AttributeSet *iterator;
iterator end(unsigned Slot) const; iterator begin() const;
iterator end() const;
unsigned getNumAttrSets() const;
/// Use these to iterate over the valid attribute indices.
unsigned index_begin() const { return AttributeList::FunctionIndex; }
unsigned index_end() const { return getNumAttrSets() - 1; }
/// operator==/!= - Provide equality predicates. /// operator==/!= - Provide equality predicates.
bool operator==(const AttributeList &RHS) const { return pImpl == RHS.pImpl; } bool operator==(const AttributeList &RHS) const { return pImpl == RHS.pImpl; }
bool operator!=(const AttributeList &RHS) const { return pImpl != RHS.pImpl; } bool operator!=(const AttributeList &RHS) const { return pImpl != RHS.pImpl; }
//===--------------------------------------------------------------------===//
// AttributeList Introspection
//===--------------------------------------------------------------------===//
/// \brief Return a raw pointer that uniquely identifies this attribute list. /// \brief Return a raw pointer that uniquely identifies this attribute list.
void *getRawPointer() const { void *getRawPointer() const {
return pImpl; return pImpl;
} }
/// \brief Return true if there are no attributes. /// \brief Return true if there are no attributes.
bool isEmpty() const { bool isEmpty() const { return pImpl == nullptr; }
return getNumSlots() == 0;
}
/// \brief Return the number of slots used in this attribute list. This is
/// the number of arguments that have an attribute set on them (including the
/// function itself).
unsigned getNumSlots() const;
/// \brief Return the index for the given slot.
unsigned getSlotIndex(unsigned Slot) const;
/// \brief Return the attributes at the given slot.
AttributeSet getSlotAttributes(unsigned Slot) const;
void dump() const; void dump() const;
}; };

View File

@ -33,6 +33,7 @@ class Function;
class LandingPadInst; class LandingPadInst;
class LLVMContext; class LLVMContext;
class Module; class Module;
class PHINode;
class TerminatorInst; class TerminatorInst;
class ValueSymbolTable; class ValueSymbolTable;
@ -261,6 +262,50 @@ class BasicBlock final : public Value, // Basic blocks are data objects also
inline const Instruction &back() const { return InstList.back(); } inline const Instruction &back() const { return InstList.back(); }
inline Instruction &back() { return InstList.back(); } inline Instruction &back() { return InstList.back(); }
/// Iterator to walk just the phi nodes in the basic block.
template <typename PHINodeT = PHINode, typename BBIteratorT = iterator>
class phi_iterator_impl
: public iterator_facade_base<phi_iterator_impl<PHINodeT, BBIteratorT>,
std::forward_iterator_tag, PHINodeT> {
friend BasicBlock;
PHINodeT *PN;
phi_iterator_impl(PHINodeT *PN) : PN(PN) {}
public:
// Allow default construction to build variables, but this doesn't build
// a useful iterator.
phi_iterator_impl() = default;
// Allow conversion between instantiations where valid.
template <typename PHINodeU, typename BBIteratorU>
phi_iterator_impl(const phi_iterator_impl<PHINodeU, BBIteratorU> &Arg)
: PN(Arg.PN) {}
bool operator==(const phi_iterator_impl &Arg) const { return PN == Arg.PN; }
PHINodeT &operator*() const { return *PN; }
using phi_iterator_impl::iterator_facade_base::operator++;
phi_iterator_impl &operator++() {
assert(PN && "Cannot increment the end iterator!");
PN = dyn_cast<PHINodeT>(std::next(BBIteratorT(PN)));
return *this;
}
};
typedef phi_iterator_impl<> phi_iterator;
typedef phi_iterator_impl<const PHINode, BasicBlock::const_iterator>
const_phi_iterator;
/// Returns a range that iterates over the phis in the basic block.
///
/// Note that this cannot be used with basic blocks that have no terminator.
iterator_range<const_phi_iterator> phis() const {
return const_cast<BasicBlock *>(this)->phis();
}
iterator_range<phi_iterator> phis();
/// \brief Return the underlying instruction list container. /// \brief Return the underlying instruction list container.
/// ///
/// Currently you need to access the underlying instruction list container /// Currently you need to access the underlying instruction list container

View File

@ -171,6 +171,7 @@ namespace llvm {
ebStrict ebStrict
}; };
bool isUnaryOp() const;
RoundingMode getRoundingMode() const; RoundingMode getRoundingMode() const;
ExceptionBehavior getExceptionBehavior() const; ExceptionBehavior getExceptionBehavior() const;
@ -182,6 +183,18 @@ namespace llvm {
case Intrinsic::experimental_constrained_fmul: case Intrinsic::experimental_constrained_fmul:
case Intrinsic::experimental_constrained_fdiv: case Intrinsic::experimental_constrained_fdiv:
case Intrinsic::experimental_constrained_frem: case Intrinsic::experimental_constrained_frem:
case Intrinsic::experimental_constrained_sqrt:
case Intrinsic::experimental_constrained_pow:
case Intrinsic::experimental_constrained_powi:
case Intrinsic::experimental_constrained_sin:
case Intrinsic::experimental_constrained_cos:
case Intrinsic::experimental_constrained_exp:
case Intrinsic::experimental_constrained_exp2:
case Intrinsic::experimental_constrained_log:
case Intrinsic::experimental_constrained_log10:
case Intrinsic::experimental_constrained_log2:
case Intrinsic::experimental_constrained_rint:
case Intrinsic::experimental_constrained_nearbyint:
return true; return true;
default: return false; default: return false;
} }

Some files were not shown because too many files have changed in this diff Show More