Merge llvm, clang, lld, lldb, compiler-rt and libc++ r304659, and update
build glue.
This commit is contained in:
commit
6d97bb297c
@ -19,8 +19,6 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Initialize coverage.
|
|
||||||
void __sanitizer_cov_init();
|
|
||||||
// Record and dump coverage info.
|
// Record and dump coverage info.
|
||||||
void __sanitizer_cov_dump();
|
void __sanitizer_cov_dump();
|
||||||
|
|
||||||
@ -28,10 +26,6 @@ extern "C" {
|
|||||||
// .sancov files.
|
// .sancov files.
|
||||||
void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
|
void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
|
||||||
|
|
||||||
// Open <name>.sancov.packed in the coverage directory and return the file
|
|
||||||
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
|
|
||||||
// This is intended for use by sandboxing code.
|
|
||||||
intptr_t __sanitizer_maybe_open_cov_file(const char *name);
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
@ -106,7 +106,6 @@ void AsanDeactivate() {
|
|||||||
// Deactivate the runtime.
|
// Deactivate the runtime.
|
||||||
SetCanPoisonMemory(false);
|
SetCanPoisonMemory(false);
|
||||||
SetMallocContextSize(1);
|
SetMallocContextSize(1);
|
||||||
ReInitializeCoverage(false, nullptr);
|
|
||||||
|
|
||||||
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
|
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
|
||||||
disabled.quarantine_size_mb = 0;
|
disabled.quarantine_size_mb = 0;
|
||||||
@ -130,8 +129,6 @@ void AsanActivate() {
|
|||||||
|
|
||||||
SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
|
SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
|
||||||
SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
|
SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
|
||||||
ReInitializeCoverage(asan_deactivated_flags.coverage,
|
|
||||||
asan_deactivated_flags.coverage_dir);
|
|
||||||
ReInitializeAllocator(asan_deactivated_flags.allocator_options);
|
ReInitializeAllocator(asan_deactivated_flags.allocator_options);
|
||||||
|
|
||||||
asan_is_deactivated = false;
|
asan_is_deactivated = false;
|
||||||
|
@ -242,9 +242,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
|||||||
CheckNoDeepBind(filename, flag); \
|
CheckNoDeepBind(filename, flag); \
|
||||||
} while (false)
|
} while (false)
|
||||||
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
|
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
|
||||||
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
|
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
|
||||||
CoverageUpdateMapping()
|
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
|
||||||
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
|
|
||||||
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
|
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
|
||||||
#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
|
#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
|
||||||
if (AsanThread *t = GetCurrentThread()) { \
|
if (AsanThread *t = GetCurrentThread()) { \
|
||||||
@ -723,9 +722,7 @@ static void AfterFork() {
|
|||||||
INTERCEPTOR(int, fork, void) {
|
INTERCEPTOR(int, fork, void) {
|
||||||
ENSURE_ASAN_INITED();
|
ENSURE_ASAN_INITED();
|
||||||
BeforeFork();
|
BeforeFork();
|
||||||
if (common_flags()->coverage) CovBeforeFork();
|
|
||||||
int pid = REAL(fork)();
|
int pid = REAL(fork)();
|
||||||
if (common_flags()->coverage) CovAfterFork(pid);
|
|
||||||
AfterFork();
|
AfterFork();
|
||||||
return pid;
|
return pid;
|
||||||
}
|
}
|
||||||
|
@ -166,16 +166,19 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
|
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
|
||||||
if (!atomic_load(&stack_switching_, memory_order_acquire))
|
if (!atomic_load(&stack_switching_, memory_order_acquire)) {
|
||||||
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
|
// Make sure the stack bounds are fully initialized.
|
||||||
|
if (stack_bottom_ >= stack_top_) return {0, 0};
|
||||||
|
return {stack_bottom_, stack_top_};
|
||||||
|
}
|
||||||
char local;
|
char local;
|
||||||
const uptr cur_stack = (uptr)&local;
|
const uptr cur_stack = (uptr)&local;
|
||||||
// Note: need to check next stack first, because FinishSwitchFiber
|
// Note: need to check next stack first, because FinishSwitchFiber
|
||||||
// may be in process of overwriting stack_top_/bottom_. But in such case
|
// may be in process of overwriting stack_top_/bottom_. But in such case
|
||||||
// we are already on the next stack.
|
// we are already on the next stack.
|
||||||
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
|
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
|
||||||
return StackBounds{next_stack_bottom_, next_stack_top_}; // NOLINT
|
return {next_stack_bottom_, next_stack_top_};
|
||||||
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
|
return {stack_bottom_, stack_top_};
|
||||||
}
|
}
|
||||||
|
|
||||||
uptr AsanThread::stack_top() {
|
uptr AsanThread::stack_top() {
|
||||||
@ -197,6 +200,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
|||||||
uptr stack_size = this->stack_size();
|
uptr stack_size = this->stack_size();
|
||||||
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
|
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
CHECK_LE(stack_size, 0x10000000);
|
||||||
uptr old_val = 0;
|
uptr old_val = 0;
|
||||||
// fake_stack_ has 3 states:
|
// fake_stack_ has 3 states:
|
||||||
// 0 -- not initialized
|
// 0 -- not initialized
|
||||||
|
@ -408,6 +408,9 @@ static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
|||||||
|
|
||||||
// On Linux, handles dynamically allocated TLS blocks by treating all chunks
|
// On Linux, handles dynamically allocated TLS blocks by treating all chunks
|
||||||
// allocated from ld-linux.so as reachable.
|
// allocated from ld-linux.so as reachable.
|
||||||
|
// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
|
||||||
|
// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
|
||||||
|
// modules accounting etc.
|
||||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||||
|
@ -23,6 +23,10 @@
|
|||||||
#include "sanitizer_common/sanitizer_linux.h"
|
#include "sanitizer_common/sanitizer_linux.h"
|
||||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||||
|
|
||||||
|
#if SANITIZER_USE_GETAUXVAL
|
||||||
|
#include <sys/auxv.h>
|
||||||
|
#endif // SANITIZER_USE_GETAUXVAL
|
||||||
|
|
||||||
namespace __lsan {
|
namespace __lsan {
|
||||||
|
|
||||||
static const char kLinkerName[] = "ld";
|
static const char kLinkerName[] = "ld";
|
||||||
@ -30,8 +34,12 @@ static const char kLinkerName[] = "ld";
|
|||||||
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
|
static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
|
||||||
static LoadedModule *linker = nullptr;
|
static LoadedModule *linker = nullptr;
|
||||||
|
|
||||||
static bool IsLinker(const char* full_name) {
|
static bool IsLinker(const LoadedModule& module) {
|
||||||
return LibraryNameIs(full_name, kLinkerName);
|
#if SANITIZER_USE_GETAUXVAL
|
||||||
|
return module.base_address() == getauxval(AT_BASE);
|
||||||
|
#else
|
||||||
|
return LibraryNameIs(module.full_name(), kLinkerName);
|
||||||
|
#endif // SANITIZER_USE_GETAUXVAL
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((tls_model("initial-exec")))
|
__attribute__((tls_model("initial-exec")))
|
||||||
@ -49,22 +57,25 @@ void InitializePlatformSpecificModules() {
|
|||||||
ListOfModules modules;
|
ListOfModules modules;
|
||||||
modules.init();
|
modules.init();
|
||||||
for (LoadedModule &module : modules) {
|
for (LoadedModule &module : modules) {
|
||||||
if (!IsLinker(module.full_name())) continue;
|
if (!IsLinker(module))
|
||||||
|
continue;
|
||||||
if (linker == nullptr) {
|
if (linker == nullptr) {
|
||||||
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
|
linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
|
||||||
*linker = module;
|
*linker = module;
|
||||||
module = LoadedModule();
|
module = LoadedModule();
|
||||||
} else {
|
} else {
|
||||||
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
|
VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
|
||||||
"TLS will not be handled correctly.\n", kLinkerName);
|
"TLS and other allocations originating from linker might be "
|
||||||
|
"falsely reported as leaks.\n", kLinkerName);
|
||||||
linker->clear();
|
linker->clear();
|
||||||
linker = nullptr;
|
linker = nullptr;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (linker == nullptr) {
|
if (linker == nullptr) {
|
||||||
VReport(1, "LeakSanitizer: Dynamic linker not found. "
|
VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
|
||||||
"TLS will not be handled correctly.\n");
|
"allocations originating from linker might be falsely reported "
|
||||||
|
"as leaks.\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -317,15 +317,9 @@ bool AddressSpaceIsUnlimited();
|
|||||||
void SetAddressSpaceUnlimited();
|
void SetAddressSpaceUnlimited();
|
||||||
void AdjustStackSize(void *attr);
|
void AdjustStackSize(void *attr);
|
||||||
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
|
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
|
||||||
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
|
|
||||||
void SetSandboxingCallback(void (*f)());
|
void SetSandboxingCallback(void (*f)());
|
||||||
|
|
||||||
void CoverageUpdateMapping();
|
|
||||||
void CovBeforeFork();
|
|
||||||
void CovAfterFork(int child_pid);
|
|
||||||
|
|
||||||
void InitializeCoverage(bool enabled, const char *coverage_dir);
|
void InitializeCoverage(bool enabled, const char *coverage_dir);
|
||||||
void ReInitializeCoverage(bool enabled, const char *coverage_dir);
|
|
||||||
|
|
||||||
void InitTlsSize();
|
void InitTlsSize();
|
||||||
uptr GetTlsSize();
|
uptr GetTlsSize();
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
INTERFACE_FUNCTION(__sanitizer_cov_dump)
|
INTERFACE_FUNCTION(__sanitizer_cov_dump)
|
||||||
INTERFACE_FUNCTION(__sanitizer_dump_coverage)
|
INTERFACE_FUNCTION(__sanitizer_dump_coverage)
|
||||||
INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
|
INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
|
||||||
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
|
|
||||||
INTERFACE_WEAK_FUNCTION(__sancov_default_options)
|
INTERFACE_WEAK_FUNCTION(__sancov_default_options)
|
||||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
|
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
|
||||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
|
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
|
||||||
|
@ -1,627 +0,0 @@
|
|||||||
//===-- sanitizer_coverage.cc ---------------------------------------------===//
|
|
||||||
//
|
|
||||||
// The LLVM Compiler Infrastructure
|
|
||||||
//
|
|
||||||
// This file is distributed under the University of Illinois Open Source
|
|
||||||
// License. See LICENSE.TXT for details.
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// Sanitizer Coverage.
|
|
||||||
// This file implements run-time support for a poor man's coverage tool.
|
|
||||||
//
|
|
||||||
// Compiler instrumentation:
|
|
||||||
// For every interesting basic block the compiler injects the following code:
|
|
||||||
// if (Guard < 0) {
|
|
||||||
// __sanitizer_cov(&Guard);
|
|
||||||
// }
|
|
||||||
// At the module start up time __sanitizer_cov_module_init sets the guards
|
|
||||||
// to consecutive negative numbers (-1, -2, -3, ...).
|
|
||||||
// It's fine to call __sanitizer_cov more than once for a given block.
|
|
||||||
//
|
|
||||||
// Run-time:
|
|
||||||
// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
|
|
||||||
// and atomically set Guard to -Guard.
|
|
||||||
// - __sanitizer_cov_dump: dump the coverage data to disk.
|
|
||||||
// For every module of the current process that has coverage data
|
|
||||||
// this will create a file module_name.PID.sancov.
|
|
||||||
//
|
|
||||||
// The file format is simple: the first 8 bytes is the magic,
|
|
||||||
// one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
|
|
||||||
// magic defines the size of the following offsets.
|
|
||||||
// The rest of the data is the offsets in the module.
|
|
||||||
//
|
|
||||||
// Eventually, this coverage implementation should be obsoleted by a more
|
|
||||||
// powerful general purpose Clang/LLVM coverage instrumentation.
|
|
||||||
// Consider this implementation as prototype.
|
|
||||||
//
|
|
||||||
// FIXME: support (or at least test with) dlclose.
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "sanitizer_allocator_internal.h"
|
|
||||||
#include "sanitizer_common.h"
|
|
||||||
#include "sanitizer_libc.h"
|
|
||||||
#include "sanitizer_mutex.h"
|
|
||||||
#include "sanitizer_procmaps.h"
|
|
||||||
#include "sanitizer_stacktrace.h"
|
|
||||||
#include "sanitizer_symbolizer.h"
|
|
||||||
#include "sanitizer_flags.h"
|
|
||||||
|
|
||||||
using namespace __sanitizer;
|
|
||||||
|
|
||||||
static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
|
|
||||||
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
|
|
||||||
static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
|
|
||||||
static const u64 kMagic = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
|
|
||||||
|
|
||||||
static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
|
|
||||||
|
|
||||||
static atomic_uintptr_t coverage_counter;
|
|
||||||
|
|
||||||
// pc_array is the array containing the covered PCs.
|
|
||||||
// To make the pc_array thread- and async-signal-safe it has to be large enough.
|
|
||||||
// 128M counters "ought to be enough for anybody" (4M on 32-bit).
|
|
||||||
|
|
||||||
// With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
|
|
||||||
// In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
|
|
||||||
// dump current memory layout to another file.
|
|
||||||
|
|
||||||
static bool cov_sandboxed = false;
|
|
||||||
static fd_t cov_fd = kInvalidFd;
|
|
||||||
static unsigned int cov_max_block_size = 0;
|
|
||||||
static bool coverage_enabled = false;
|
|
||||||
static const char *coverage_dir;
|
|
||||||
|
|
||||||
namespace __sanitizer {
|
|
||||||
|
|
||||||
class CoverageData {
|
|
||||||
public:
|
|
||||||
void Init();
|
|
||||||
void Enable();
|
|
||||||
void Disable();
|
|
||||||
void ReInit();
|
|
||||||
void BeforeFork();
|
|
||||||
void AfterFork(int child_pid);
|
|
||||||
void Extend(uptr npcs);
|
|
||||||
void Add(uptr pc, u32 *guard);
|
|
||||||
void DumpOffsets();
|
|
||||||
void DumpAll();
|
|
||||||
|
|
||||||
void InitializeGuardArray(s32 *guards);
|
|
||||||
void InitializeGuards(s32 *guards, uptr n, const char *module_name,
|
|
||||||
uptr caller_pc);
|
|
||||||
void ReinitializeGuards();
|
|
||||||
|
|
||||||
uptr *data();
|
|
||||||
uptr size() const;
|
|
||||||
|
|
||||||
private:
|
|
||||||
struct NamedPcRange {
|
|
||||||
const char *copied_module_name;
|
|
||||||
uptr beg, end; // elements [beg,end) in pc_array.
|
|
||||||
};
|
|
||||||
|
|
||||||
void DirectOpen();
|
|
||||||
void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
|
|
||||||
void GetRangeOffsets(const NamedPcRange& r, Symbolizer* s,
|
|
||||||
InternalMmapVector<uptr>* offsets) const;
|
|
||||||
|
|
||||||
// Maximal size pc array may ever grow.
|
|
||||||
// We MmapNoReserve this space to ensure that the array is contiguous.
|
|
||||||
static const uptr kPcArrayMaxSize =
|
|
||||||
FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
|
|
||||||
// The amount file mapping for the pc array is grown by.
|
|
||||||
static const uptr kPcArrayMmapSize = 64 * 1024;
|
|
||||||
|
|
||||||
// pc_array is allocated with MmapNoReserveOrDie and so it uses only as
|
|
||||||
// much RAM as it really needs.
|
|
||||||
uptr *pc_array;
|
|
||||||
// Index of the first available pc_array slot.
|
|
||||||
atomic_uintptr_t pc_array_index;
|
|
||||||
// Array size.
|
|
||||||
atomic_uintptr_t pc_array_size;
|
|
||||||
// Current file mapped size of the pc array.
|
|
||||||
uptr pc_array_mapped_size;
|
|
||||||
// Descriptor of the file mapped pc array.
|
|
||||||
fd_t pc_fd;
|
|
||||||
|
|
||||||
// Vector of coverage guard arrays, protected by mu.
|
|
||||||
InternalMmapVectorNoCtor<s32*> guard_array_vec;
|
|
||||||
|
|
||||||
// Vector of module and compilation unit pc ranges.
|
|
||||||
InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
|
|
||||||
InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
|
|
||||||
|
|
||||||
StaticSpinMutex mu;
|
|
||||||
};
|
|
||||||
|
|
||||||
static CoverageData coverage_data;
|
|
||||||
|
|
||||||
void CovUpdateMapping(const char *path, uptr caller_pc = 0);
|
|
||||||
|
|
||||||
void CoverageData::DirectOpen() {
|
|
||||||
InternalScopedString path(kMaxPathLength);
|
|
||||||
internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
|
|
||||||
coverage_dir, internal_getpid());
|
|
||||||
pc_fd = OpenFile(path.data(), RdWr);
|
|
||||||
if (pc_fd == kInvalidFd) {
|
|
||||||
Report("Coverage: failed to open %s for reading/writing\n", path.data());
|
|
||||||
Die();
|
|
||||||
}
|
|
||||||
|
|
||||||
pc_array_mapped_size = 0;
|
|
||||||
CovUpdateMapping(coverage_dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::Init() {
|
|
||||||
pc_fd = kInvalidFd;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::Enable() {
|
|
||||||
if (pc_array)
|
|
||||||
return;
|
|
||||||
pc_array = reinterpret_cast<uptr *>(
|
|
||||||
MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
|
|
||||||
atomic_store(&pc_array_index, 0, memory_order_relaxed);
|
|
||||||
if (common_flags()->coverage_direct) {
|
|
||||||
Report("coverage_direct=1 is deprecated, don't use it.\n");
|
|
||||||
Die();
|
|
||||||
atomic_store(&pc_array_size, 0, memory_order_relaxed);
|
|
||||||
} else {
|
|
||||||
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::InitializeGuardArray(s32 *guards) {
|
|
||||||
Enable(); // Make sure coverage is enabled at this point.
|
|
||||||
s32 n = guards[0];
|
|
||||||
for (s32 j = 1; j <= n; j++) {
|
|
||||||
uptr idx = atomic_load_relaxed(&pc_array_index);
|
|
||||||
atomic_store_relaxed(&pc_array_index, idx + 1);
|
|
||||||
guards[j] = -static_cast<s32>(idx + 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::Disable() {
|
|
||||||
if (pc_array) {
|
|
||||||
UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
|
|
||||||
pc_array = nullptr;
|
|
||||||
}
|
|
||||||
if (pc_fd != kInvalidFd) {
|
|
||||||
CloseFile(pc_fd);
|
|
||||||
pc_fd = kInvalidFd;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::ReinitializeGuards() {
|
|
||||||
// Assuming single thread.
|
|
||||||
atomic_store(&pc_array_index, 0, memory_order_relaxed);
|
|
||||||
for (uptr i = 0; i < guard_array_vec.size(); i++)
|
|
||||||
InitializeGuardArray(guard_array_vec[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::ReInit() {
|
|
||||||
Disable();
|
|
||||||
if (coverage_enabled) {
|
|
||||||
if (common_flags()->coverage_direct) {
|
|
||||||
// In memory-mapped mode we must extend the new file to the known array
|
|
||||||
// size.
|
|
||||||
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
|
|
||||||
uptr npcs = size / sizeof(uptr);
|
|
||||||
Enable();
|
|
||||||
if (size) Extend(npcs);
|
|
||||||
if (coverage_enabled) CovUpdateMapping(coverage_dir);
|
|
||||||
} else {
|
|
||||||
Enable();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Re-initialize the guards.
|
|
||||||
// We are single-threaded now, no need to grab any lock.
|
|
||||||
CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
|
|
||||||
ReinitializeGuards();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::BeforeFork() {
|
|
||||||
mu.Lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::AfterFork(int child_pid) {
|
|
||||||
// We are single-threaded so it's OK to release the lock early.
|
|
||||||
mu.Unlock();
|
|
||||||
if (child_pid == 0) ReInit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend coverage PC array to fit additional npcs elements.
|
|
||||||
void CoverageData::Extend(uptr npcs) {
|
|
||||||
if (!common_flags()->coverage_direct) return;
|
|
||||||
SpinMutexLock l(&mu);
|
|
||||||
|
|
||||||
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
|
|
||||||
size += npcs * sizeof(uptr);
|
|
||||||
|
|
||||||
if (coverage_enabled && size > pc_array_mapped_size) {
|
|
||||||
if (pc_fd == kInvalidFd) DirectOpen();
|
|
||||||
CHECK_NE(pc_fd, kInvalidFd);
|
|
||||||
|
|
||||||
uptr new_mapped_size = pc_array_mapped_size;
|
|
||||||
while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
|
|
||||||
CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
|
|
||||||
|
|
||||||
// Extend the file and map the new space at the end of pc_array.
|
|
||||||
uptr res = internal_ftruncate(pc_fd, new_mapped_size);
|
|
||||||
int err;
|
|
||||||
if (internal_iserror(res, &err)) {
|
|
||||||
Printf("failed to extend raw coverage file: %d\n", err);
|
|
||||||
Die();
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
|
|
||||||
void *p = MapWritableFileToMemory((void *)next_map_base,
|
|
||||||
new_mapped_size - pc_array_mapped_size,
|
|
||||||
pc_fd, pc_array_mapped_size);
|
|
||||||
CHECK_EQ((uptr)p, next_map_base);
|
|
||||||
pc_array_mapped_size = new_mapped_size;
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_store(&pc_array_size, size, memory_order_release);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
|
|
||||||
uptr range_end) {
|
|
||||||
auto sym = Symbolizer::GetOrInit();
|
|
||||||
if (!sym)
|
|
||||||
return;
|
|
||||||
const char *module_name = sym->GetModuleNameForPc(caller_pc);
|
|
||||||
if (!module_name) return;
|
|
||||||
if (module_name_vec.empty() ||
|
|
||||||
module_name_vec.back().copied_module_name != module_name)
|
|
||||||
module_name_vec.push_back({module_name, range_beg, range_end});
|
|
||||||
else
|
|
||||||
module_name_vec.back().end = range_end;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::InitializeGuards(s32 *guards, uptr n,
|
|
||||||
const char *comp_unit_name,
|
|
||||||
uptr caller_pc) {
|
|
||||||
// The array 'guards' has n+1 elements, we use the element zero
|
|
||||||
// to store 'n'.
|
|
||||||
CHECK_LT(n, 1 << 30);
|
|
||||||
guards[0] = static_cast<s32>(n);
|
|
||||||
InitializeGuardArray(guards);
|
|
||||||
SpinMutexLock l(&mu);
|
|
||||||
uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
|
|
||||||
uptr range_beg = range_end - n;
|
|
||||||
comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
|
|
||||||
guard_array_vec.push_back(guards);
|
|
||||||
UpdateModuleNameVec(caller_pc, range_beg, range_end);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const uptr kBundleCounterBits = 16;
|
|
||||||
|
|
||||||
// When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
|
|
||||||
// we insert the global counter into the first 16 bits of the PC.
|
|
||||||
uptr BundlePcAndCounter(uptr pc, uptr counter) {
|
|
||||||
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
|
|
||||||
return pc;
|
|
||||||
static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
|
|
||||||
if (counter > kMaxCounter)
|
|
||||||
counter = kMaxCounter;
|
|
||||||
CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
|
|
||||||
return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr UnbundlePc(uptr bundle) {
|
|
||||||
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
|
|
||||||
return bundle;
|
|
||||||
return (bundle << kBundleCounterBits) >> kBundleCounterBits;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr UnbundleCounter(uptr bundle) {
|
|
||||||
if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
|
|
||||||
return 0;
|
|
||||||
return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If guard is negative, atomically set it to -guard and store the PC in
|
|
||||||
// pc_array.
|
|
||||||
void CoverageData::Add(uptr pc, u32 *guard) {
|
|
||||||
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
|
|
||||||
s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
|
|
||||||
if (guard_value >= 0) return;
|
|
||||||
|
|
||||||
atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
|
|
||||||
if (!pc_array) return;
|
|
||||||
|
|
||||||
uptr idx = -guard_value - 1;
|
|
||||||
if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
|
|
||||||
return; // May happen after fork when pc_array_index becomes 0.
|
|
||||||
CHECK_LT(idx, atomic_load(&pc_array_size, memory_order_acquire));
|
|
||||||
uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
|
|
||||||
pc_array[idx] = BundlePcAndCounter(pc, counter);
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr *CoverageData::data() {
|
|
||||||
return pc_array;
|
|
||||||
}
|
|
||||||
|
|
||||||
uptr CoverageData::size() const {
|
|
||||||
return atomic_load(&pc_array_index, memory_order_relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block layout for packed file format: header, followed by module name (no
|
|
||||||
// trailing zero), followed by data blob.
|
|
||||||
struct CovHeader {
|
|
||||||
int pid;
|
|
||||||
unsigned int module_name_length;
|
|
||||||
unsigned int data_length;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void CovWritePacked(int pid, const char *module, const void *blob,
|
|
||||||
unsigned int blob_size) {
|
|
||||||
if (cov_fd == kInvalidFd) return;
|
|
||||||
unsigned module_name_length = internal_strlen(module);
|
|
||||||
CovHeader header = {pid, module_name_length, blob_size};
|
|
||||||
|
|
||||||
if (cov_max_block_size == 0) {
|
|
||||||
// Writing to a file. Just go ahead.
|
|
||||||
WriteToFile(cov_fd, &header, sizeof(header));
|
|
||||||
WriteToFile(cov_fd, module, module_name_length);
|
|
||||||
WriteToFile(cov_fd, blob, blob_size);
|
|
||||||
} else {
|
|
||||||
// Writing to a socket. We want to split the data into appropriately sized
|
|
||||||
// blocks.
|
|
||||||
InternalScopedBuffer<char> block(cov_max_block_size);
|
|
||||||
CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
|
|
||||||
uptr header_size_with_module = sizeof(header) + module_name_length;
|
|
||||||
CHECK_LT(header_size_with_module, cov_max_block_size);
|
|
||||||
unsigned int max_payload_size =
|
|
||||||
cov_max_block_size - header_size_with_module;
|
|
||||||
char *block_pos = block.data();
|
|
||||||
internal_memcpy(block_pos, &header, sizeof(header));
|
|
||||||
block_pos += sizeof(header);
|
|
||||||
internal_memcpy(block_pos, module, module_name_length);
|
|
||||||
block_pos += module_name_length;
|
|
||||||
char *block_data_begin = block_pos;
|
|
||||||
const char *blob_pos = (const char *)blob;
|
|
||||||
while (blob_size > 0) {
|
|
||||||
unsigned int payload_size = Min(blob_size, max_payload_size);
|
|
||||||
blob_size -= payload_size;
|
|
||||||
internal_memcpy(block_data_begin, blob_pos, payload_size);
|
|
||||||
blob_pos += payload_size;
|
|
||||||
((CovHeader *)block.data())->data_length = payload_size;
|
|
||||||
WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If packed = false: <name>.<pid>.<sancov> (name = module name).
|
|
||||||
// If packed = true and name == 0: <pid>.<sancov>.<packed>.
|
|
||||||
// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
|
|
||||||
// user-supplied).
|
|
||||||
static fd_t CovOpenFile(InternalScopedString *path, bool packed,
|
|
||||||
const char *name, const char *extension = "sancov") {
|
|
||||||
path->clear();
|
|
||||||
if (!packed) {
|
|
||||||
CHECK(name);
|
|
||||||
path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
|
|
||||||
extension);
|
|
||||||
} else {
|
|
||||||
if (!name)
|
|
||||||
path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
|
|
||||||
extension);
|
|
||||||
else
|
|
||||||
path->append("%s/%s.%s.packed", coverage_dir, name, extension);
|
|
||||||
}
|
|
||||||
error_t err;
|
|
||||||
fd_t fd = OpenFile(path->data(), WrOnly, &err);
|
|
||||||
if (fd == kInvalidFd)
|
|
||||||
Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
|
|
||||||
path->data(), err);
|
|
||||||
return fd;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::GetRangeOffsets(const NamedPcRange& r, Symbolizer* sym,
|
|
||||||
InternalMmapVector<uptr>* offsets) const {
|
|
||||||
offsets->clear();
|
|
||||||
for (uptr i = 0; i < kNumWordsForMagic; i++)
|
|
||||||
offsets->push_back(0);
|
|
||||||
CHECK(r.copied_module_name);
|
|
||||||
CHECK_LE(r.beg, r.end);
|
|
||||||
CHECK_LE(r.end, size());
|
|
||||||
for (uptr i = r.beg; i < r.end; i++) {
|
|
||||||
uptr pc = UnbundlePc(pc_array[i]);
|
|
||||||
uptr counter = UnbundleCounter(pc_array[i]);
|
|
||||||
if (!pc) continue; // Not visited.
|
|
||||||
uptr offset = 0;
|
|
||||||
sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
|
|
||||||
offsets->push_back(BundlePcAndCounter(offset, counter));
|
|
||||||
}
|
|
||||||
|
|
||||||
CHECK_GE(offsets->size(), kNumWordsForMagic);
|
|
||||||
SortArray(offsets->data(), offsets->size());
|
|
||||||
for (uptr i = 0; i < offsets->size(); i++)
|
|
||||||
(*offsets)[i] = UnbundlePc((*offsets)[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void GenerateHtmlReport(const InternalMmapVector<char *> &cov_files) {
|
|
||||||
if (!common_flags()->html_cov_report) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
char *sancov_path = FindPathToBinary(common_flags()->sancov_path);
|
|
||||||
if (sancov_path == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
InternalMmapVector<char *> sancov_argv(cov_files.size() * 2 + 3);
|
|
||||||
sancov_argv.push_back(sancov_path);
|
|
||||||
sancov_argv.push_back(internal_strdup("-html-report"));
|
|
||||||
auto argv_deleter = at_scope_exit([&] {
|
|
||||||
for (uptr i = 0; i < sancov_argv.size(); ++i) {
|
|
||||||
InternalFree(sancov_argv[i]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
for (const auto &cov_file : cov_files) {
|
|
||||||
sancov_argv.push_back(internal_strdup(cov_file));
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
ListOfModules modules;
|
|
||||||
modules.init();
|
|
||||||
for (const LoadedModule &module : modules) {
|
|
||||||
sancov_argv.push_back(internal_strdup(module.full_name()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
InternalScopedString report_path(kMaxPathLength);
|
|
||||||
fd_t report_fd =
|
|
||||||
CovOpenFile(&report_path, false /* packed */, GetProcessName(), "html");
|
|
||||||
int pid = StartSubprocess(sancov_argv[0], sancov_argv.data(),
|
|
||||||
kInvalidFd /* stdin */, report_fd /* std_out */);
|
|
||||||
if (pid > 0) {
|
|
||||||
int result = WaitForProcess(pid);
|
|
||||||
if (result == 0)
|
|
||||||
Printf("coverage report generated to %s\n", report_path.data());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::DumpOffsets() {
|
|
||||||
auto sym = Symbolizer::GetOrInit();
|
|
||||||
if (!common_flags()->coverage_pcs) return;
|
|
||||||
Printf("**\n***\n***\n");
|
|
||||||
Printf("**WARNING: this implementation of SanitizerCoverage is deprecated\n");
|
|
||||||
Printf("**WARNING: and will be removed in future versions\n");
|
|
||||||
Printf("**WARNING: See https://clang.llvm.org/docs/SanitizerCoverage.html\n");
|
|
||||||
Printf("**\n***\n***\n");
|
|
||||||
|
|
||||||
CHECK_NE(sym, nullptr);
|
|
||||||
InternalMmapVector<uptr> offsets(0);
|
|
||||||
InternalScopedString path(kMaxPathLength);
|
|
||||||
|
|
||||||
InternalMmapVector<char *> cov_files(module_name_vec.size());
|
|
||||||
auto cov_files_deleter = at_scope_exit([&] {
|
|
||||||
for (uptr i = 0; i < cov_files.size(); ++i) {
|
|
||||||
InternalFree(cov_files[i]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
for (uptr m = 0; m < module_name_vec.size(); m++) {
|
|
||||||
auto r = module_name_vec[m];
|
|
||||||
GetRangeOffsets(r, sym, &offsets);
|
|
||||||
|
|
||||||
uptr num_offsets = offsets.size() - kNumWordsForMagic;
|
|
||||||
u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
|
|
||||||
CHECK_EQ(*magic_p, 0ULL);
|
|
||||||
// FIXME: we may want to write 32-bit offsets even in 64-mode
|
|
||||||
// if all the offsets are small enough.
|
|
||||||
*magic_p = kMagic;
|
|
||||||
|
|
||||||
const char *module_name = StripModuleName(r.copied_module_name);
|
|
||||||
if (cov_sandboxed) {
|
|
||||||
if (cov_fd != kInvalidFd) {
|
|
||||||
CovWritePacked(internal_getpid(), module_name, offsets.data(),
|
|
||||||
offsets.size() * sizeof(offsets[0]));
|
|
||||||
VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// One file per module per process.
|
|
||||||
fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
|
|
||||||
if (fd == kInvalidFd) continue;
|
|
||||||
WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
|
|
||||||
CloseFile(fd);
|
|
||||||
cov_files.push_back(internal_strdup(path.data()));
|
|
||||||
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (cov_fd != kInvalidFd)
|
|
||||||
CloseFile(cov_fd);
|
|
||||||
|
|
||||||
GenerateHtmlReport(cov_files);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageData::DumpAll() {
|
|
||||||
if (!coverage_enabled || common_flags()->coverage_direct) return;
|
|
||||||
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
|
|
||||||
return;
|
|
||||||
DumpOffsets();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
|
|
||||||
if (!args) return;
|
|
||||||
if (!coverage_enabled) return;
|
|
||||||
cov_sandboxed = args->coverage_sandboxed;
|
|
||||||
if (!cov_sandboxed) return;
|
|
||||||
cov_max_block_size = args->coverage_max_block_size;
|
|
||||||
if (args->coverage_fd >= 0) {
|
|
||||||
cov_fd = (fd_t)args->coverage_fd;
|
|
||||||
} else {
|
|
||||||
InternalScopedString path(kMaxPathLength);
|
|
||||||
// Pre-open the file now. The sandbox won't allow us to do it later.
|
|
||||||
cov_fd = CovOpenFile(&path, true /* packed */, nullptr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fd_t MaybeOpenCovFile(const char *name) {
|
|
||||||
CHECK(name);
|
|
||||||
if (!coverage_enabled) return kInvalidFd;
|
|
||||||
InternalScopedString path(kMaxPathLength);
|
|
||||||
return CovOpenFile(&path, true /* packed */, name);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CovBeforeFork() {
|
|
||||||
coverage_data.BeforeFork();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CovAfterFork(int child_pid) {
|
|
||||||
coverage_data.AfterFork(child_pid);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void MaybeDumpCoverage() {
|
|
||||||
if (common_flags()->coverage)
|
|
||||||
__sanitizer_cov_dump();
|
|
||||||
}
|
|
||||||
|
|
||||||
void InitializeCoverage(bool enabled, const char *dir) {
|
|
||||||
if (coverage_enabled)
|
|
||||||
return; // May happen if two sanitizer enable coverage in the same process.
|
|
||||||
coverage_enabled = enabled;
|
|
||||||
coverage_dir = dir;
|
|
||||||
coverage_data.Init();
|
|
||||||
if (enabled) coverage_data.Enable();
|
|
||||||
if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
|
|
||||||
AddDieCallback(MaybeDumpCoverage);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReInitializeCoverage(bool enabled, const char *dir) {
|
|
||||||
coverage_enabled = enabled;
|
|
||||||
coverage_dir = dir;
|
|
||||||
coverage_data.ReInit();
|
|
||||||
}
|
|
||||||
|
|
||||||
void CoverageUpdateMapping() {
|
|
||||||
if (coverage_enabled)
|
|
||||||
CovUpdateMapping(coverage_dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace __sanitizer
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
|
|
||||||
__sanitizer_dump_trace_pc_guard_coverage();
|
|
||||||
}
|
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE
|
|
||||||
sptr __sanitizer_maybe_open_cov_file(const char *name) {
|
|
||||||
return (sptr)MaybeOpenCovFile(name);
|
|
||||||
}
|
|
||||||
// Default empty implementations (weak). Users should redefine them.
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
|
|
||||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
|
|
||||||
} // extern "C"
|
|
@ -146,6 +146,17 @@ static TracePcGuardController pc_guard_controller;
|
|||||||
} // namespace
|
} // namespace
|
||||||
} // namespace __sancov
|
} // namespace __sancov
|
||||||
|
|
||||||
|
namespace __sanitizer {
|
||||||
|
void InitializeCoverage(bool enabled, const char *dir) {
|
||||||
|
static bool coverage_enabled = false;
|
||||||
|
if (coverage_enabled)
|
||||||
|
return; // May happen if two sanitizer enable coverage in the same process.
|
||||||
|
coverage_enabled = enabled;
|
||||||
|
Atexit(__sanitizer_cov_dump);
|
||||||
|
AddDieCallback(__sanitizer_cov_dump);
|
||||||
|
}
|
||||||
|
} // namespace __sanitizer
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
|
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
|
||||||
const uptr* pcs, uptr len) {
|
const uptr* pcs, uptr len) {
|
||||||
@ -166,4 +177,18 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
|
|||||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
|
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
|
||||||
__sancov::pc_guard_controller.Dump();
|
__sancov::pc_guard_controller.Dump();
|
||||||
}
|
}
|
||||||
|
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
|
||||||
|
__sanitizer_dump_trace_pc_guard_coverage();
|
||||||
|
}
|
||||||
|
// Default empty implementations (weak). Users should redefine them.
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
|
||||||
|
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
|
||||||
} // extern "C"
|
} // extern "C"
|
||||||
|
@ -1,122 +0,0 @@
|
|||||||
//===-- sanitizer_coverage_mapping.cc -------------------------------------===//
|
|
||||||
//
|
|
||||||
// The LLVM Compiler Infrastructure
|
|
||||||
//
|
|
||||||
// This file is distributed under the University of Illinois Open Source
|
|
||||||
// License. See LICENSE.TXT for details.
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
//
|
|
||||||
// Mmap-based implementation of sanitizer coverage.
|
|
||||||
//
|
|
||||||
// This is part of the implementation of code coverage that does not require
|
|
||||||
// __sanitizer_cov_dump() call. Data is stored in 2 files per process.
|
|
||||||
//
|
|
||||||
// $pid.sancov.map describes process memory layout in the following text-based
|
|
||||||
// format:
|
|
||||||
// <pointer size in bits> // 1 line, 32 or 64
|
|
||||||
// <mapping start> <mapping end> <base address> <dso name> // repeated
|
|
||||||
// ...
|
|
||||||
// Mapping lines are NOT sorted. This file is updated every time memory layout
|
|
||||||
// is changed (i.e. in dlopen() and dlclose() interceptors).
|
|
||||||
//
|
|
||||||
// $pid.sancov.raw is a binary dump of PC values, sizeof(uptr) each. Again, not
|
|
||||||
// sorted. This file is extended by 64Kb at a time and mapped into memory. It
|
|
||||||
// contains one or more 0 words at the end, up to the next 64Kb aligned offset.
|
|
||||||
//
|
|
||||||
// To convert these 2 files to the usual .sancov format, run sancov.py rawunpack
|
|
||||||
// $pid.sancov.raw.
|
|
||||||
//
|
|
||||||
//===----------------------------------------------------------------------===//
|
|
||||||
|
|
||||||
#include "sanitizer_allocator_internal.h"
|
|
||||||
#include "sanitizer_libc.h"
|
|
||||||
#include "sanitizer_procmaps.h"
|
|
||||||
|
|
||||||
namespace __sanitizer {
|
|
||||||
|
|
||||||
static const uptr kMaxTextSize = 64 * 1024;
|
|
||||||
|
|
||||||
struct CachedMapping {
|
|
||||||
public:
|
|
||||||
bool NeedsUpdate(uptr pc) {
|
|
||||||
int new_pid = internal_getpid();
|
|
||||||
if (last_pid == new_pid && pc && pc >= last_range_start &&
|
|
||||||
pc < last_range_end)
|
|
||||||
return false;
|
|
||||||
last_pid = new_pid;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SetModuleRange(uptr start, uptr end) {
|
|
||||||
last_range_start = start;
|
|
||||||
last_range_end = end;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
uptr last_range_start, last_range_end;
|
|
||||||
int last_pid;
|
|
||||||
};
|
|
||||||
|
|
||||||
static CachedMapping cached_mapping;
|
|
||||||
static StaticSpinMutex mapping_mu;
|
|
||||||
|
|
||||||
void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
|
|
||||||
if (!common_flags()->coverage_direct) return;
|
|
||||||
|
|
||||||
SpinMutexLock l(&mapping_mu);
|
|
||||||
|
|
||||||
if (!cached_mapping.NeedsUpdate(caller_pc))
|
|
||||||
return;
|
|
||||||
|
|
||||||
InternalScopedString text(kMaxTextSize);
|
|
||||||
|
|
||||||
{
|
|
||||||
text.append("%d\n", sizeof(uptr) * 8);
|
|
||||||
ListOfModules modules;
|
|
||||||
modules.init();
|
|
||||||
for (const LoadedModule &module : modules) {
|
|
||||||
const char *module_name = StripModuleName(module.full_name());
|
|
||||||
uptr base = module.base_address();
|
|
||||||
for (const auto &range : module.ranges()) {
|
|
||||||
if (range.executable) {
|
|
||||||
uptr start = range.beg;
|
|
||||||
uptr end = range.end;
|
|
||||||
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
|
|
||||||
if (caller_pc && caller_pc >= start && caller_pc < end)
|
|
||||||
cached_mapping.SetModuleRange(start, end);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
error_t err;
|
|
||||||
InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
|
|
||||||
uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
|
|
||||||
"%s/%zd.sancov.map.tmp", coverage_dir,
|
|
||||||
internal_getpid());
|
|
||||||
CHECK_LE(res, tmp_path.size());
|
|
||||||
fd_t map_fd = OpenFile(tmp_path.data(), WrOnly, &err);
|
|
||||||
if (map_fd == kInvalidFd) {
|
|
||||||
Report("Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
|
|
||||||
err);
|
|
||||||
Die();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!WriteToFile(map_fd, text.data(), text.length(), nullptr, &err)) {
|
|
||||||
Printf("sancov.map write failed: %d\n", err);
|
|
||||||
Die();
|
|
||||||
}
|
|
||||||
CloseFile(map_fd);
|
|
||||||
|
|
||||||
InternalScopedString path(64 + internal_strlen(coverage_dir));
|
|
||||||
res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
|
|
||||||
coverage_dir, internal_getpid());
|
|
||||||
CHECK_LE(res, path.size());
|
|
||||||
if (!RenameFile(tmp_path.data(), path.data(), &err)) {
|
|
||||||
Printf("sancov.map rename failed: %d\n", err);
|
|
||||||
Die();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace __sanitizer
|
|
@ -138,16 +138,6 @@ COMMON_FLAG(
|
|||||||
bool, coverage, false,
|
bool, coverage, false,
|
||||||
"If set, coverage information will be dumped at program shutdown (if the "
|
"If set, coverage information will be dumped at program shutdown (if the "
|
||||||
"coverage instrumentation was enabled at compile time).")
|
"coverage instrumentation was enabled at compile time).")
|
||||||
COMMON_FLAG(bool, coverage_pcs, true,
|
|
||||||
"If set (and if 'coverage' is set too), the coverage information "
|
|
||||||
"will be dumped as a set of PC offsets for every module.")
|
|
||||||
COMMON_FLAG(bool, coverage_order_pcs, false,
|
|
||||||
"If true, the PCs will be dumped in the order they've"
|
|
||||||
" appeared during the execution.")
|
|
||||||
COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
|
|
||||||
"If set, coverage information will be dumped directly to a memory "
|
|
||||||
"mapped file. This way data is not lost even if the process is "
|
|
||||||
"suddenly killed.")
|
|
||||||
COMMON_FLAG(const char *, coverage_dir, ".",
|
COMMON_FLAG(const char *, coverage_dir, ".",
|
||||||
"Target directory for coverage dumps. Defaults to the current "
|
"Target directory for coverage dumps. Defaults to the current "
|
||||||
"directory.")
|
"directory.")
|
||||||
|
@ -75,16 +75,6 @@ extern char **environ; // provided by crt1
|
|||||||
#include <sys/signal.h>
|
#include <sys/signal.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef __GLIBC_PREREQ
|
|
||||||
#define __GLIBC_PREREQ(x, y) 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SANITIZER_LINUX && __GLIBC_PREREQ(2, 16)
|
|
||||||
# define SANITIZER_USE_GETAUXVAL 1
|
|
||||||
#else
|
|
||||||
# define SANITIZER_USE_GETAUXVAL 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SANITIZER_USE_GETAUXVAL
|
#if SANITIZER_USE_GETAUXVAL
|
||||||
#include <sys/auxv.h>
|
#include <sys/auxv.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -269,5 +269,14 @@
|
|||||||
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
|
# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef __GLIBC_PREREQ
|
||||||
|
#define __GLIBC_PREREQ(x, y) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if SANITIZER_LINUX && __GLIBC_PREREQ(2, 16)
|
||||||
|
# define SANITIZER_USE_GETAUXVAL 1
|
||||||
|
#else
|
||||||
|
# define SANITIZER_USE_GETAUXVAL 0
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif // SANITIZER_PLATFORM_H
|
#endif // SANITIZER_PLATFORM_H
|
||||||
|
@ -23,6 +23,9 @@
|
|||||||
// incorporates the map structure.
|
// incorporates the map structure.
|
||||||
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
|
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
|
||||||
((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
|
((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
|
||||||
|
// Get sys/_types.h, because that tells us whether 64-bit inodes are
|
||||||
|
// used in struct dirent below.
|
||||||
|
#include <sys/_types.h>
|
||||||
#else
|
#else
|
||||||
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
|
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
|
||||||
#endif // !SANITIZER_FREEBSD
|
#endif // !SANITIZER_FREEBSD
|
||||||
@ -489,8 +492,12 @@ namespace __sanitizer {
|
|||||||
};
|
};
|
||||||
#elif SANITIZER_FREEBSD
|
#elif SANITIZER_FREEBSD
|
||||||
struct __sanitizer_dirent {
|
struct __sanitizer_dirent {
|
||||||
|
#if defined(__INO64)
|
||||||
unsigned long long d_fileno;
|
unsigned long long d_fileno;
|
||||||
unsigned long long d_off;
|
unsigned long long d_off;
|
||||||
|
#else
|
||||||
|
unsigned int d_fileno;
|
||||||
|
#endif
|
||||||
unsigned short d_reclen;
|
unsigned short d_reclen;
|
||||||
// more fields that we don't care about
|
// more fields that we don't care about
|
||||||
};
|
};
|
||||||
|
@ -264,7 +264,6 @@ void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
|
|||||||
// Same for /proc/self/exe in the symbolizer.
|
// Same for /proc/self/exe in the symbolizer.
|
||||||
#if !SANITIZER_GO
|
#if !SANITIZER_GO
|
||||||
Symbolizer::GetOrInit()->PrepareForSandboxing();
|
Symbolizer::GetOrInit()->PrepareForSandboxing();
|
||||||
CovPrepareForSandboxing(args);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,9 +400,6 @@ void ReExec() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
|
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
|
||||||
#if !SANITIZER_GO
|
|
||||||
CovPrepareForSandboxing(args);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool StackSizeIsUnlimited() {
|
bool StackSizeIsUnlimited() {
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
UBSAN_CHECK(GenericUB, "undefined-behavior", "undefined")
|
UBSAN_CHECK(GenericUB, "undefined-behavior", "undefined")
|
||||||
UBSAN_CHECK(NullPointerUse, "null-pointer-use", "null")
|
UBSAN_CHECK(NullPointerUse, "null-pointer-use", "null")
|
||||||
|
UBSAN_CHECK(PointerOverflow, "pointer-overflow", "pointer-overflow")
|
||||||
UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use", "alignment")
|
UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use", "alignment")
|
||||||
UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size", "object-size")
|
UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size", "object-size")
|
||||||
UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow",
|
UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow",
|
||||||
|
@ -554,6 +554,37 @@ void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) {
|
|||||||
Die();
|
Die();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void handlePointerOverflowImpl(PointerOverflowData *Data,
|
||||||
|
ValueHandle Base,
|
||||||
|
ValueHandle Result,
|
||||||
|
ReportOptions Opts) {
|
||||||
|
SourceLocation Loc = Data->Loc.acquire();
|
||||||
|
ErrorType ET = ErrorType::PointerOverflow;
|
||||||
|
|
||||||
|
if (ignoreReport(Loc, Opts, ET))
|
||||||
|
return;
|
||||||
|
|
||||||
|
ScopedReport R(Opts, Loc, ET);
|
||||||
|
|
||||||
|
Diag(Loc, DL_Error, "pointer index expression with base %0 overflowed to %1")
|
||||||
|
<< (void *)Base << (void*)Result;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data,
|
||||||
|
ValueHandle Base,
|
||||||
|
ValueHandle Result) {
|
||||||
|
GET_REPORT_OPTIONS(false);
|
||||||
|
handlePointerOverflowImpl(Data, Base, Result, Opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __ubsan::__ubsan_handle_pointer_overflow_abort(PointerOverflowData *Data,
|
||||||
|
ValueHandle Base,
|
||||||
|
ValueHandle Result) {
|
||||||
|
GET_REPORT_OPTIONS(true);
|
||||||
|
handlePointerOverflowImpl(Data, Base, Result, Opts);
|
||||||
|
Die();
|
||||||
|
}
|
||||||
|
|
||||||
static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function,
|
static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function,
|
||||||
ReportOptions Opts) {
|
ReportOptions Opts) {
|
||||||
if (Data->CheckKind != CFITCK_ICall)
|
if (Data->CheckKind != CFITCK_ICall)
|
||||||
|
@ -152,6 +152,13 @@ struct NonNullArgData {
|
|||||||
RECOVERABLE(nonnull_arg, NonNullArgData *Data)
|
RECOVERABLE(nonnull_arg, NonNullArgData *Data)
|
||||||
RECOVERABLE(nullability_arg, NonNullArgData *Data)
|
RECOVERABLE(nullability_arg, NonNullArgData *Data)
|
||||||
|
|
||||||
|
struct PointerOverflowData {
|
||||||
|
SourceLocation Loc;
|
||||||
|
};
|
||||||
|
|
||||||
|
RECOVERABLE(pointer_overflow, PointerOverflowData *Data, ValueHandle Base,
|
||||||
|
ValueHandle Result)
|
||||||
|
|
||||||
/// \brief Known CFI check kinds.
|
/// \brief Known CFI check kinds.
|
||||||
/// Keep in sync with the enum of the same name in CodeGenFunction.h
|
/// Keep in sync with the enum of the same name in CodeGenFunction.h
|
||||||
enum CFITypeCheckKind : unsigned char {
|
enum CFITypeCheckKind : unsigned char {
|
||||||
|
@ -36,6 +36,8 @@ INTERFACE_FUNCTION(__ubsan_handle_nullability_return)
|
|||||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_abort)
|
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_abort)
|
||||||
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
|
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
|
||||||
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
|
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
|
||||||
|
INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow)
|
||||||
|
INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow_abort)
|
||||||
INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds)
|
INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds)
|
||||||
INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds_abort)
|
INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds_abort)
|
||||||
INTERFACE_FUNCTION(__ubsan_handle_sub_overflow)
|
INTERFACE_FUNCTION(__ubsan_handle_sub_overflow)
|
||||||
|
@ -137,7 +137,7 @@ inline _LIBCPP_INLINE_VISIBILITY
|
|||||||
size_t
|
size_t
|
||||||
__next_hash_pow2(size_t __n)
|
__next_hash_pow2(size_t __n)
|
||||||
{
|
{
|
||||||
return size_t(1) << (std::numeric_limits<size_t>::digits - __clz(__n-1));
|
return __n < 2 ? __n : (size_t(1) << (std::numeric_limits<size_t>::digits - __clz(__n-1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -415,6 +415,9 @@ public:
|
|||||||
append(IL.begin(), IL.end());
|
append(IL.begin(), IL.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: Consider assigning over existing elements, rather than clearing &
|
||||||
|
// re-initializing them - for all assign(...) variants.
|
||||||
|
|
||||||
void assign(size_type NumElts, const T &Elt) {
|
void assign(size_type NumElts, const T &Elt) {
|
||||||
clear();
|
clear();
|
||||||
if (this->capacity() < NumElts)
|
if (this->capacity() < NumElts)
|
||||||
@ -423,6 +426,11 @@ public:
|
|||||||
std::uninitialized_fill(this->begin(), this->end(), Elt);
|
std::uninitialized_fill(this->begin(), this->end(), Elt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename in_iter> void assign(in_iter in_start, in_iter in_end) {
|
||||||
|
clear();
|
||||||
|
append(in_start, in_end);
|
||||||
|
}
|
||||||
|
|
||||||
void assign(std::initializer_list<T> IL) {
|
void assign(std::initializer_list<T> IL) {
|
||||||
clear();
|
clear();
|
||||||
append(IL);
|
append(IL);
|
||||||
|
@ -58,6 +58,7 @@ public:
|
|||||||
/// comes before \p B in \p BB. This is a simplification that considers
|
/// comes before \p B in \p BB. This is a simplification that considers
|
||||||
/// cached instruction positions and ignores other basic blocks, being
|
/// cached instruction positions and ignores other basic blocks, being
|
||||||
/// only relevant to compare relative instructions positions inside \p BB.
|
/// only relevant to compare relative instructions positions inside \p BB.
|
||||||
|
/// Returns false for A == B.
|
||||||
bool dominates(const Instruction *A, const Instruction *B);
|
bool dominates(const Instruction *A, const Instruction *B);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -78,6 +78,11 @@ public:
|
|||||||
return PMT_RegionPassManager;
|
return PMT_RegionPassManager;
|
||||||
}
|
}
|
||||||
//@}
|
//@}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/// Optional passes call this function to check whether the pass should be
|
||||||
|
/// skipped. This is the case when optimization bisect is over the limit.
|
||||||
|
bool skipRegion(Region &R) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief The pass manager to schedule RegionPasses.
|
/// @brief The pass manager to schedule RegionPasses.
|
||||||
|
@ -636,7 +636,7 @@ private:
|
|||||||
/// @}
|
/// @}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BackedgeTakenInfo() : MaxAndComplete(nullptr, 0) {}
|
BackedgeTakenInfo() : MaxAndComplete(nullptr, 0), MaxOrZero(false) {}
|
||||||
|
|
||||||
BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
|
BackedgeTakenInfo(BackedgeTakenInfo &&) = default;
|
||||||
BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;
|
BackedgeTakenInfo &operator=(BackedgeTakenInfo &&) = default;
|
||||||
|
@ -10,83 +10,77 @@
|
|||||||
#ifndef LLVM_CODEGEN_MACHINEREGIONINFO_H
|
#ifndef LLVM_CODEGEN_MACHINEREGIONINFO_H
|
||||||
#define LLVM_CODEGEN_MACHINEREGIONINFO_H
|
#define LLVM_CODEGEN_MACHINEREGIONINFO_H
|
||||||
|
|
||||||
|
#include "llvm/ADT/DepthFirstIterator.h"
|
||||||
#include "llvm/Analysis/RegionInfo.h"
|
#include "llvm/Analysis/RegionInfo.h"
|
||||||
#include "llvm/Analysis/RegionIterator.h"
|
#include "llvm/Analysis/RegionIterator.h"
|
||||||
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
#include "llvm/CodeGen/MachineDominanceFrontier.h"
|
#include "llvm/CodeGen/MachineDominanceFrontier.h"
|
||||||
#include "llvm/CodeGen/MachineDominators.h"
|
#include "llvm/CodeGen/MachineDominators.h"
|
||||||
#include "llvm/CodeGen/MachineFunction.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||||
#include "llvm/CodeGen/MachineLoopInfo.h"
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
class MachineDominatorTree;
|
|
||||||
struct MachinePostDominatorTree;
|
struct MachinePostDominatorTree;
|
||||||
class MachineRegion;
|
class MachineRegion;
|
||||||
class MachineRegionNode;
|
class MachineRegionNode;
|
||||||
class MachineRegionInfo;
|
class MachineRegionInfo;
|
||||||
|
|
||||||
template<>
|
template <> struct RegionTraits<MachineFunction> {
|
||||||
struct RegionTraits<MachineFunction> {
|
using FuncT = MachineFunction;
|
||||||
typedef MachineFunction FuncT;
|
using BlockT = MachineBasicBlock;
|
||||||
typedef MachineBasicBlock BlockT;
|
using RegionT = MachineRegion;
|
||||||
typedef MachineRegion RegionT;
|
using RegionNodeT = MachineRegionNode;
|
||||||
typedef MachineRegionNode RegionNodeT;
|
using RegionInfoT = MachineRegionInfo;
|
||||||
typedef MachineRegionInfo RegionInfoT;
|
using DomTreeT = MachineDominatorTree;
|
||||||
typedef MachineDominatorTree DomTreeT;
|
using DomTreeNodeT = MachineDomTreeNode;
|
||||||
typedef MachineDomTreeNode DomTreeNodeT;
|
using PostDomTreeT = MachinePostDominatorTree;
|
||||||
typedef MachinePostDominatorTree PostDomTreeT;
|
using DomFrontierT = MachineDominanceFrontier;
|
||||||
typedef MachineDominanceFrontier DomFrontierT;
|
using InstT = MachineInstr;
|
||||||
typedef MachineInstr InstT;
|
using LoopT = MachineLoop;
|
||||||
typedef MachineLoop LoopT;
|
using LoopInfoT = MachineLoopInfo;
|
||||||
typedef MachineLoopInfo LoopInfoT;
|
|
||||||
|
|
||||||
static unsigned getNumSuccessors(MachineBasicBlock *BB) {
|
static unsigned getNumSuccessors(MachineBasicBlock *BB) {
|
||||||
return BB->succ_size();
|
return BB->succ_size();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class MachineRegionNode : public RegionNodeBase<RegionTraits<MachineFunction>> {
|
class MachineRegionNode : public RegionNodeBase<RegionTraits<MachineFunction>> {
|
||||||
public:
|
public:
|
||||||
inline MachineRegionNode(MachineRegion *Parent,
|
inline MachineRegionNode(MachineRegion *Parent, MachineBasicBlock *Entry,
|
||||||
MachineBasicBlock *Entry,
|
|
||||||
bool isSubRegion = false)
|
bool isSubRegion = false)
|
||||||
: RegionNodeBase<RegionTraits<MachineFunction>>(Parent, Entry, isSubRegion) {
|
: RegionNodeBase<RegionTraits<MachineFunction>>(Parent, Entry,
|
||||||
|
isSubRegion) {}
|
||||||
}
|
|
||||||
|
|
||||||
bool operator==(const MachineRegion &RN) const {
|
bool operator==(const MachineRegion &RN) const {
|
||||||
return this == reinterpret_cast<const MachineRegionNode*>(&RN);
|
return this == reinterpret_cast<const MachineRegionNode *>(&RN);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class MachineRegion : public RegionBase<RegionTraits<MachineFunction>> {
|
class MachineRegion : public RegionBase<RegionTraits<MachineFunction>> {
|
||||||
public:
|
public:
|
||||||
MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
|
MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
|
||||||
MachineRegionInfo* RI,
|
MachineRegionInfo *RI, MachineDominatorTree *DT,
|
||||||
MachineDominatorTree *DT, MachineRegion *Parent = nullptr);
|
MachineRegion *Parent = nullptr);
|
||||||
~MachineRegion();
|
~MachineRegion();
|
||||||
|
|
||||||
bool operator==(const MachineRegionNode &RN) const {
|
bool operator==(const MachineRegionNode &RN) const {
|
||||||
return &RN == reinterpret_cast<const MachineRegionNode*>(this);
|
return &RN == reinterpret_cast<const MachineRegionNode *>(this);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class MachineRegionInfo : public RegionInfoBase<RegionTraits<MachineFunction>> {
|
class MachineRegionInfo : public RegionInfoBase<RegionTraits<MachineFunction>> {
|
||||||
public:
|
public:
|
||||||
explicit MachineRegionInfo();
|
explicit MachineRegionInfo();
|
||||||
|
|
||||||
~MachineRegionInfo() override;
|
~MachineRegionInfo() override;
|
||||||
|
|
||||||
// updateStatistics - Update statistic about created regions.
|
// updateStatistics - Update statistic about created regions.
|
||||||
void updateStatistics(MachineRegion *R) final;
|
void updateStatistics(MachineRegion *R) final;
|
||||||
|
|
||||||
void recalculate(MachineFunction &F,
|
void recalculate(MachineFunction &F, MachineDominatorTree *DT,
|
||||||
MachineDominatorTree *DT,
|
MachinePostDominatorTree *PDT, MachineDominanceFrontier *DF);
|
||||||
MachinePostDominatorTree *PDT,
|
|
||||||
MachineDominanceFrontier *DF);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class MachineRegionInfoPass : public MachineFunctionPass {
|
class MachineRegionInfoPass : public MachineFunctionPass {
|
||||||
@ -94,17 +88,13 @@ class MachineRegionInfoPass : public MachineFunctionPass {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
static char ID;
|
static char ID;
|
||||||
explicit MachineRegionInfoPass();
|
|
||||||
|
|
||||||
|
explicit MachineRegionInfoPass();
|
||||||
~MachineRegionInfoPass() override;
|
~MachineRegionInfoPass() override;
|
||||||
|
|
||||||
MachineRegionInfo &getRegionInfo() {
|
MachineRegionInfo &getRegionInfo() { return RI; }
|
||||||
return RI;
|
|
||||||
}
|
|
||||||
|
|
||||||
const MachineRegionInfo &getRegionInfo() const {
|
const MachineRegionInfo &getRegionInfo() const { return RI; }
|
||||||
return RI;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// @name MachineFunctionPass interface
|
/// @name MachineFunctionPass interface
|
||||||
//@{
|
//@{
|
||||||
@ -117,66 +107,76 @@ public:
|
|||||||
//@}
|
//@}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
template <>
|
template <>
|
||||||
inline MachineBasicBlock* RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineBasicBlock>() const {
|
inline MachineBasicBlock *
|
||||||
|
RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineBasicBlock>()
|
||||||
|
const {
|
||||||
assert(!isSubRegion() && "This is not a MachineBasicBlock RegionNode!");
|
assert(!isSubRegion() && "This is not a MachineBasicBlock RegionNode!");
|
||||||
return getEntry();
|
return getEntry();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template <>
|
||||||
template<>
|
template <>
|
||||||
inline MachineRegion* RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineRegion>() const {
|
inline MachineRegion *
|
||||||
|
RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineRegion>()
|
||||||
|
const {
|
||||||
assert(isSubRegion() && "This is not a subregion RegionNode!");
|
assert(isSubRegion() && "This is not a subregion RegionNode!");
|
||||||
auto Unconst = const_cast<RegionNodeBase<RegionTraits<MachineFunction>>*>(this);
|
auto Unconst =
|
||||||
return reinterpret_cast<MachineRegion*>(Unconst);
|
const_cast<RegionNodeBase<RegionTraits<MachineFunction>> *>(this);
|
||||||
|
return reinterpret_cast<MachineRegion *>(Unconst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
RegionNodeGraphTraits(MachineRegionNode, MachineBasicBlock, MachineRegion);
|
RegionNodeGraphTraits(MachineRegionNode, MachineBasicBlock, MachineRegion);
|
||||||
RegionNodeGraphTraits(const MachineRegionNode, MachineBasicBlock, MachineRegion);
|
RegionNodeGraphTraits(const MachineRegionNode, MachineBasicBlock,
|
||||||
|
MachineRegion);
|
||||||
|
|
||||||
RegionGraphTraits(MachineRegion, MachineRegionNode);
|
RegionGraphTraits(MachineRegion, MachineRegionNode);
|
||||||
RegionGraphTraits(const MachineRegion, const MachineRegionNode);
|
RegionGraphTraits(const MachineRegion, const MachineRegionNode);
|
||||||
|
|
||||||
template <> struct GraphTraits<MachineRegionInfo*>
|
template <>
|
||||||
: public GraphTraits<FlatIt<MachineRegionNode*> > {
|
struct GraphTraits<MachineRegionInfo *>
|
||||||
typedef df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
|
: public GraphTraits<FlatIt<MachineRegionNode *>> {
|
||||||
GraphTraits<FlatIt<NodeRef>>>
|
using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
|
||||||
nodes_iterator;
|
false, GraphTraits<FlatIt<NodeRef>>>;
|
||||||
|
|
||||||
static NodeRef getEntryNode(MachineRegionInfo *RI) {
|
static NodeRef getEntryNode(MachineRegionInfo *RI) {
|
||||||
return GraphTraits<FlatIt<MachineRegion*> >::getEntryNode(RI->getTopLevelRegion());
|
return GraphTraits<FlatIt<MachineRegion *>>::getEntryNode(
|
||||||
|
RI->getTopLevelRegion());
|
||||||
}
|
}
|
||||||
static nodes_iterator nodes_begin(MachineRegionInfo* RI) {
|
|
||||||
|
static nodes_iterator nodes_begin(MachineRegionInfo *RI) {
|
||||||
return nodes_iterator::begin(getEntryNode(RI));
|
return nodes_iterator::begin(getEntryNode(RI));
|
||||||
}
|
}
|
||||||
|
|
||||||
static nodes_iterator nodes_end(MachineRegionInfo *RI) {
|
static nodes_iterator nodes_end(MachineRegionInfo *RI) {
|
||||||
return nodes_iterator::end(getEntryNode(RI));
|
return nodes_iterator::end(getEntryNode(RI));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <> struct GraphTraits<MachineRegionInfoPass*>
|
template <>
|
||||||
: public GraphTraits<MachineRegionInfo *> {
|
struct GraphTraits<MachineRegionInfoPass *>
|
||||||
typedef df_iterator<NodeRef, df_iterator_default_set<NodeRef>, false,
|
: public GraphTraits<MachineRegionInfo *> {
|
||||||
GraphTraits<FlatIt<NodeRef>>>
|
using nodes_iterator = df_iterator<NodeRef, df_iterator_default_set<NodeRef>,
|
||||||
nodes_iterator;
|
false, GraphTraits<FlatIt<NodeRef>>>;
|
||||||
|
|
||||||
static NodeRef getEntryNode(MachineRegionInfoPass *RI) {
|
static NodeRef getEntryNode(MachineRegionInfoPass *RI) {
|
||||||
return GraphTraits<MachineRegionInfo*>::getEntryNode(&RI->getRegionInfo());
|
return GraphTraits<MachineRegionInfo *>::getEntryNode(&RI->getRegionInfo());
|
||||||
}
|
}
|
||||||
static nodes_iterator nodes_begin(MachineRegionInfoPass* RI) {
|
|
||||||
return GraphTraits<MachineRegionInfo*>::nodes_begin(&RI->getRegionInfo());
|
static nodes_iterator nodes_begin(MachineRegionInfoPass *RI) {
|
||||||
|
return GraphTraits<MachineRegionInfo *>::nodes_begin(&RI->getRegionInfo());
|
||||||
}
|
}
|
||||||
|
|
||||||
static nodes_iterator nodes_end(MachineRegionInfoPass *RI) {
|
static nodes_iterator nodes_end(MachineRegionInfoPass *RI) {
|
||||||
return GraphTraits<MachineRegionInfo*>::nodes_end(&RI->getRegionInfo());
|
return GraphTraits<MachineRegionInfo *>::nodes_end(&RI->getRegionInfo());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
extern template class RegionBase<RegionTraits<MachineFunction>>;
|
extern template class RegionBase<RegionTraits<MachineFunction>>;
|
||||||
extern template class RegionNodeBase<RegionTraits<MachineFunction>>;
|
extern template class RegionNodeBase<RegionTraits<MachineFunction>>;
|
||||||
extern template class RegionInfoBase<RegionTraits<MachineFunction>>;
|
extern template class RegionInfoBase<RegionTraits<MachineFunction>>;
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
} // end namespace llvm
|
||||||
|
|
||||||
|
#endif // LLVM_CODEGEN_MACHINEREGIONINFO_H
|
||||||
|
@ -14,11 +14,13 @@
|
|||||||
#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
|
#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
|
||||||
#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
|
#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
|
||||||
|
|
||||||
|
#include "llvm/ADT/ArrayRef.h"
|
||||||
#include "llvm/ADT/BitVector.h"
|
#include "llvm/ADT/BitVector.h"
|
||||||
#include "llvm/ADT/DenseMap.h"
|
#include "llvm/ADT/DenseMap.h"
|
||||||
#include "llvm/ADT/IndexedMap.h"
|
#include "llvm/ADT/IndexedMap.h"
|
||||||
#include "llvm/ADT/iterator_range.h"
|
#include "llvm/ADT/iterator_range.h"
|
||||||
#include "llvm/ADT/PointerUnion.h"
|
#include "llvm/ADT/PointerUnion.h"
|
||||||
|
#include "llvm/ADT/SmallVector.h"
|
||||||
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
|
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
|
||||||
#include "llvm/CodeGen/LowLevelType.h"
|
#include "llvm/CodeGen/LowLevelType.h"
|
||||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
@ -41,8 +43,8 @@ namespace llvm {
|
|||||||
class PSetIterator;
|
class PSetIterator;
|
||||||
|
|
||||||
/// Convenient type to represent either a register class or a register bank.
|
/// Convenient type to represent either a register class or a register bank.
|
||||||
typedef PointerUnion<const TargetRegisterClass *, const RegisterBank *>
|
using RegClassOrRegBank =
|
||||||
RegClassOrRegBank;
|
PointerUnion<const TargetRegisterClass *, const RegisterBank *>;
|
||||||
|
|
||||||
/// MachineRegisterInfo - Keep track of information for virtual and physical
|
/// MachineRegisterInfo - Keep track of information for virtual and physical
|
||||||
/// registers, including vreg register classes, use/def chains for registers,
|
/// registers, including vreg register classes, use/def chains for registers,
|
||||||
@ -125,7 +127,7 @@ private:
|
|||||||
/// started.
|
/// started.
|
||||||
BitVector ReservedRegs;
|
BitVector ReservedRegs;
|
||||||
|
|
||||||
typedef DenseMap<unsigned, LLT> VRegToTypeMap;
|
using VRegToTypeMap = DenseMap<unsigned, LLT>;
|
||||||
/// Map generic virtual registers to their actual size.
|
/// Map generic virtual registers to their actual size.
|
||||||
mutable std::unique_ptr<VRegToTypeMap> VRegToType;
|
mutable std::unique_ptr<VRegToTypeMap> VRegToType;
|
||||||
|
|
||||||
@ -266,8 +268,8 @@ public:
|
|||||||
|
|
||||||
/// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
|
/// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
|
||||||
/// register.
|
/// register.
|
||||||
typedef defusechain_iterator<true,true,false,true,false,false>
|
using reg_iterator =
|
||||||
reg_iterator;
|
defusechain_iterator<true, true, false, true, false, false>;
|
||||||
reg_iterator reg_begin(unsigned RegNo) const {
|
reg_iterator reg_begin(unsigned RegNo) const {
|
||||||
return reg_iterator(getRegUseDefListHead(RegNo));
|
return reg_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -279,8 +281,8 @@ public:
|
|||||||
|
|
||||||
/// reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses
|
/// reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses
|
||||||
/// of the specified register, stepping by MachineInstr.
|
/// of the specified register, stepping by MachineInstr.
|
||||||
typedef defusechain_instr_iterator<true,true,false,false,true,false>
|
using reg_instr_iterator =
|
||||||
reg_instr_iterator;
|
defusechain_instr_iterator<true, true, false, false, true, false>;
|
||||||
reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
|
reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
|
||||||
return reg_instr_iterator(getRegUseDefListHead(RegNo));
|
return reg_instr_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -295,8 +297,8 @@ public:
|
|||||||
|
|
||||||
/// reg_bundle_iterator/reg_bundle_begin/reg_bundle_end - Walk all defs and uses
|
/// reg_bundle_iterator/reg_bundle_begin/reg_bundle_end - Walk all defs and uses
|
||||||
/// of the specified register, stepping by bundle.
|
/// of the specified register, stepping by bundle.
|
||||||
typedef defusechain_instr_iterator<true,true,false,false,false,true>
|
using reg_bundle_iterator =
|
||||||
reg_bundle_iterator;
|
defusechain_instr_iterator<true, true, false, false, false, true>;
|
||||||
reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
|
reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
|
||||||
return reg_bundle_iterator(getRegUseDefListHead(RegNo));
|
return reg_bundle_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -314,8 +316,8 @@ public:
|
|||||||
|
|
||||||
/// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
|
/// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
|
||||||
/// of the specified register, skipping those marked as Debug.
|
/// of the specified register, skipping those marked as Debug.
|
||||||
typedef defusechain_iterator<true,true,true,true,false,false>
|
using reg_nodbg_iterator =
|
||||||
reg_nodbg_iterator;
|
defusechain_iterator<true, true, true, true, false, false>;
|
||||||
reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
|
reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
|
||||||
return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
|
return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -331,8 +333,8 @@ public:
|
|||||||
/// reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk
|
/// reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk
|
||||||
/// all defs and uses of the specified register, stepping by MachineInstr,
|
/// all defs and uses of the specified register, stepping by MachineInstr,
|
||||||
/// skipping those marked as Debug.
|
/// skipping those marked as Debug.
|
||||||
typedef defusechain_instr_iterator<true,true,true,false,true,false>
|
using reg_instr_nodbg_iterator =
|
||||||
reg_instr_nodbg_iterator;
|
defusechain_instr_iterator<true, true, true, false, true, false>;
|
||||||
reg_instr_nodbg_iterator reg_instr_nodbg_begin(unsigned RegNo) const {
|
reg_instr_nodbg_iterator reg_instr_nodbg_begin(unsigned RegNo) const {
|
||||||
return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
|
return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -348,8 +350,8 @@ public:
|
|||||||
/// reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk
|
/// reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk
|
||||||
/// all defs and uses of the specified register, stepping by bundle,
|
/// all defs and uses of the specified register, stepping by bundle,
|
||||||
/// skipping those marked as Debug.
|
/// skipping those marked as Debug.
|
||||||
typedef defusechain_instr_iterator<true,true,true,false,false,true>
|
using reg_bundle_nodbg_iterator =
|
||||||
reg_bundle_nodbg_iterator;
|
defusechain_instr_iterator<true, true, true, false, false, true>;
|
||||||
reg_bundle_nodbg_iterator reg_bundle_nodbg_begin(unsigned RegNo) const {
|
reg_bundle_nodbg_iterator reg_bundle_nodbg_begin(unsigned RegNo) const {
|
||||||
return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
|
return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -369,8 +371,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// def_iterator/def_begin/def_end - Walk all defs of the specified register.
|
/// def_iterator/def_begin/def_end - Walk all defs of the specified register.
|
||||||
typedef defusechain_iterator<false,true,false,true,false,false>
|
using def_iterator =
|
||||||
def_iterator;
|
defusechain_iterator<false, true, false, true, false, false>;
|
||||||
def_iterator def_begin(unsigned RegNo) const {
|
def_iterator def_begin(unsigned RegNo) const {
|
||||||
return def_iterator(getRegUseDefListHead(RegNo));
|
return def_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -382,8 +384,8 @@ public:
|
|||||||
|
|
||||||
/// def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the
|
/// def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the
|
||||||
/// specified register, stepping by MachineInst.
|
/// specified register, stepping by MachineInst.
|
||||||
typedef defusechain_instr_iterator<false,true,false,false,true,false>
|
using def_instr_iterator =
|
||||||
def_instr_iterator;
|
defusechain_instr_iterator<false, true, false, false, true, false>;
|
||||||
def_instr_iterator def_instr_begin(unsigned RegNo) const {
|
def_instr_iterator def_instr_begin(unsigned RegNo) const {
|
||||||
return def_instr_iterator(getRegUseDefListHead(RegNo));
|
return def_instr_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -398,8 +400,8 @@ public:
|
|||||||
|
|
||||||
/// def_bundle_iterator/def_bundle_begin/def_bundle_end - Walk all defs of the
|
/// def_bundle_iterator/def_bundle_begin/def_bundle_end - Walk all defs of the
|
||||||
/// specified register, stepping by bundle.
|
/// specified register, stepping by bundle.
|
||||||
typedef defusechain_instr_iterator<false,true,false,false,false,true>
|
using def_bundle_iterator =
|
||||||
def_bundle_iterator;
|
defusechain_instr_iterator<false, true, false, false, false, true>;
|
||||||
def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
|
def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
|
||||||
return def_bundle_iterator(getRegUseDefListHead(RegNo));
|
return def_bundle_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -425,8 +427,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// use_iterator/use_begin/use_end - Walk all uses of the specified register.
|
/// use_iterator/use_begin/use_end - Walk all uses of the specified register.
|
||||||
typedef defusechain_iterator<true,false,false,true,false,false>
|
using use_iterator =
|
||||||
use_iterator;
|
defusechain_iterator<true, false, false, true, false, false>;
|
||||||
use_iterator use_begin(unsigned RegNo) const {
|
use_iterator use_begin(unsigned RegNo) const {
|
||||||
return use_iterator(getRegUseDefListHead(RegNo));
|
return use_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -438,8 +440,8 @@ public:
|
|||||||
|
|
||||||
/// use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the
|
/// use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the
|
||||||
/// specified register, stepping by MachineInstr.
|
/// specified register, stepping by MachineInstr.
|
||||||
typedef defusechain_instr_iterator<true,false,false,false,true,false>
|
using use_instr_iterator =
|
||||||
use_instr_iterator;
|
defusechain_instr_iterator<true, false, false, false, true, false>;
|
||||||
use_instr_iterator use_instr_begin(unsigned RegNo) const {
|
use_instr_iterator use_instr_begin(unsigned RegNo) const {
|
||||||
return use_instr_iterator(getRegUseDefListHead(RegNo));
|
return use_instr_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -454,8 +456,8 @@ public:
|
|||||||
|
|
||||||
/// use_bundle_iterator/use_bundle_begin/use_bundle_end - Walk all uses of the
|
/// use_bundle_iterator/use_bundle_begin/use_bundle_end - Walk all uses of the
|
||||||
/// specified register, stepping by bundle.
|
/// specified register, stepping by bundle.
|
||||||
typedef defusechain_instr_iterator<true,false,false,false,false,true>
|
using use_bundle_iterator =
|
||||||
use_bundle_iterator;
|
defusechain_instr_iterator<true, false, false, false, false, true>;
|
||||||
use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
|
use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
|
||||||
return use_bundle_iterator(getRegUseDefListHead(RegNo));
|
return use_bundle_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -482,8 +484,8 @@ public:
|
|||||||
|
|
||||||
/// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
|
/// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
|
||||||
/// specified register, skipping those marked as Debug.
|
/// specified register, skipping those marked as Debug.
|
||||||
typedef defusechain_iterator<true,false,true,true,false,false>
|
using use_nodbg_iterator =
|
||||||
use_nodbg_iterator;
|
defusechain_iterator<true, false, true, true, false, false>;
|
||||||
use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
|
use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
|
||||||
return use_nodbg_iterator(getRegUseDefListHead(RegNo));
|
return use_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -499,8 +501,8 @@ public:
|
|||||||
/// use_instr_nodbg_iterator/use_instr_nodbg_begin/use_instr_nodbg_end - Walk
|
/// use_instr_nodbg_iterator/use_instr_nodbg_begin/use_instr_nodbg_end - Walk
|
||||||
/// all uses of the specified register, stepping by MachineInstr, skipping
|
/// all uses of the specified register, stepping by MachineInstr, skipping
|
||||||
/// those marked as Debug.
|
/// those marked as Debug.
|
||||||
typedef defusechain_instr_iterator<true,false,true,false,true,false>
|
using use_instr_nodbg_iterator =
|
||||||
use_instr_nodbg_iterator;
|
defusechain_instr_iterator<true, false, true, false, true, false>;
|
||||||
use_instr_nodbg_iterator use_instr_nodbg_begin(unsigned RegNo) const {
|
use_instr_nodbg_iterator use_instr_nodbg_begin(unsigned RegNo) const {
|
||||||
return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
|
return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -516,8 +518,8 @@ public:
|
|||||||
/// use_bundle_nodbg_iterator/use_bundle_nodbg_begin/use_bundle_nodbg_end - Walk
|
/// use_bundle_nodbg_iterator/use_bundle_nodbg_begin/use_bundle_nodbg_end - Walk
|
||||||
/// all uses of the specified register, stepping by bundle, skipping
|
/// all uses of the specified register, stepping by bundle, skipping
|
||||||
/// those marked as Debug.
|
/// those marked as Debug.
|
||||||
typedef defusechain_instr_iterator<true,false,true,false,false,true>
|
using use_bundle_nodbg_iterator =
|
||||||
use_bundle_nodbg_iterator;
|
defusechain_instr_iterator<true, false, true, false, false, true>;
|
||||||
use_bundle_nodbg_iterator use_bundle_nodbg_begin(unsigned RegNo) const {
|
use_bundle_nodbg_iterator use_bundle_nodbg_begin(unsigned RegNo) const {
|
||||||
return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
|
return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
|
||||||
}
|
}
|
||||||
@ -593,7 +595,6 @@ public:
|
|||||||
/// Return the register class of the specified virtual register.
|
/// Return the register class of the specified virtual register.
|
||||||
/// This shouldn't be used directly unless \p Reg has a register class.
|
/// This shouldn't be used directly unless \p Reg has a register class.
|
||||||
/// \see getRegClassOrNull when this might happen.
|
/// \see getRegClassOrNull when this might happen.
|
||||||
///
|
|
||||||
const TargetRegisterClass *getRegClass(unsigned Reg) const {
|
const TargetRegisterClass *getRegClass(unsigned Reg) const {
|
||||||
assert(VRegInfo[Reg].first.is<const TargetRegisterClass *>() &&
|
assert(VRegInfo[Reg].first.is<const TargetRegisterClass *>() &&
|
||||||
"Register class not set, wrong accessor");
|
"Register class not set, wrong accessor");
|
||||||
@ -620,7 +621,6 @@ public:
|
|||||||
/// a register bank or has been assigned a register class.
|
/// a register bank or has been assigned a register class.
|
||||||
/// \note It is possible to get the register bank from the register class via
|
/// \note It is possible to get the register bank from the register class via
|
||||||
/// RegisterBankInfo::getRegBankFromRegClass.
|
/// RegisterBankInfo::getRegBankFromRegClass.
|
||||||
///
|
|
||||||
const RegisterBank *getRegBankOrNull(unsigned Reg) const {
|
const RegisterBank *getRegBankOrNull(unsigned Reg) const {
|
||||||
const RegClassOrRegBank &Val = VRegInfo[Reg].first;
|
const RegClassOrRegBank &Val = VRegInfo[Reg].first;
|
||||||
return Val.dyn_cast<const RegisterBank *>();
|
return Val.dyn_cast<const RegisterBank *>();
|
||||||
@ -629,17 +629,14 @@ public:
|
|||||||
/// Return the register bank or register class of \p Reg.
|
/// Return the register bank or register class of \p Reg.
|
||||||
/// \note Before the register bank gets assigned (i.e., before the
|
/// \note Before the register bank gets assigned (i.e., before the
|
||||||
/// RegBankSelect pass) \p Reg may not have either.
|
/// RegBankSelect pass) \p Reg may not have either.
|
||||||
///
|
|
||||||
const RegClassOrRegBank &getRegClassOrRegBank(unsigned Reg) const {
|
const RegClassOrRegBank &getRegClassOrRegBank(unsigned Reg) const {
|
||||||
return VRegInfo[Reg].first;
|
return VRegInfo[Reg].first;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// setRegClass - Set the register class of the specified virtual register.
|
/// setRegClass - Set the register class of the specified virtual register.
|
||||||
///
|
|
||||||
void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
|
void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
|
||||||
|
|
||||||
/// Set the register bank to \p RegBank for \p Reg.
|
/// Set the register bank to \p RegBank for \p Reg.
|
||||||
///
|
|
||||||
void setRegBank(unsigned Reg, const RegisterBank &RegBank);
|
void setRegBank(unsigned Reg, const RegisterBank &RegBank);
|
||||||
|
|
||||||
void setRegClassOrRegBank(unsigned Reg,
|
void setRegClassOrRegBank(unsigned Reg,
|
||||||
@ -653,7 +650,6 @@ public:
|
|||||||
/// new register class, or NULL if no such class exists.
|
/// new register class, or NULL if no such class exists.
|
||||||
/// This should only be used when the constraint is known to be trivial, like
|
/// This should only be used when the constraint is known to be trivial, like
|
||||||
/// GR32 -> GR32_NOSP. Beware of increasing register pressure.
|
/// GR32 -> GR32_NOSP. Beware of increasing register pressure.
|
||||||
///
|
|
||||||
const TargetRegisterClass *constrainRegClass(unsigned Reg,
|
const TargetRegisterClass *constrainRegClass(unsigned Reg,
|
||||||
const TargetRegisterClass *RC,
|
const TargetRegisterClass *RC,
|
||||||
unsigned MinNumRegs = 0);
|
unsigned MinNumRegs = 0);
|
||||||
@ -665,12 +661,10 @@ public:
|
|||||||
/// This method can be used after constraints have been removed from a
|
/// This method can be used after constraints have been removed from a
|
||||||
/// virtual register, for example after removing instructions or splitting
|
/// virtual register, for example after removing instructions or splitting
|
||||||
/// the live range.
|
/// the live range.
|
||||||
///
|
|
||||||
bool recomputeRegClass(unsigned Reg);
|
bool recomputeRegClass(unsigned Reg);
|
||||||
|
|
||||||
/// createVirtualRegister - Create and return a new virtual register in the
|
/// createVirtualRegister - Create and return a new virtual register in the
|
||||||
/// function with the specified register class.
|
/// function with the specified register class.
|
||||||
///
|
|
||||||
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
|
unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
|
||||||
|
|
||||||
/// Accessor for VRegToType. This accessor should only be used
|
/// Accessor for VRegToType. This accessor should only be used
|
||||||
@ -704,7 +698,6 @@ public:
|
|||||||
unsigned createIncompleteVirtualRegister();
|
unsigned createIncompleteVirtualRegister();
|
||||||
|
|
||||||
/// getNumVirtRegs - Return the number of virtual registers created.
|
/// getNumVirtRegs - Return the number of virtual registers created.
|
||||||
///
|
|
||||||
unsigned getNumVirtRegs() const { return VRegInfo.size(); }
|
unsigned getNumVirtRegs() const { return VRegInfo.size(); }
|
||||||
|
|
||||||
/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
|
/// clearVirtRegs - Remove all virtual registers (after physreg assignment).
|
||||||
@ -810,7 +803,6 @@ public:
|
|||||||
///
|
///
|
||||||
/// Reserved registers may belong to an allocatable register class, but the
|
/// Reserved registers may belong to an allocatable register class, but the
|
||||||
/// target has explicitly requested that they are not used.
|
/// target has explicitly requested that they are not used.
|
||||||
///
|
|
||||||
bool isReserved(unsigned PhysReg) const {
|
bool isReserved(unsigned PhysReg) const {
|
||||||
return getReservedRegs().test(PhysReg);
|
return getReservedRegs().test(PhysReg);
|
||||||
}
|
}
|
||||||
@ -838,8 +830,8 @@ public:
|
|||||||
|
|
||||||
// Iteration support for the live-ins set. It's kept in sorted order
|
// Iteration support for the live-ins set. It's kept in sorted order
|
||||||
// by register number.
|
// by register number.
|
||||||
typedef std::vector<std::pair<unsigned,unsigned>>::const_iterator
|
using livein_iterator =
|
||||||
livein_iterator;
|
std::vector<std::pair<unsigned,unsigned>>::const_iterator;
|
||||||
livein_iterator livein_begin() const { return LiveIns.begin(); }
|
livein_iterator livein_begin() const { return LiveIns.begin(); }
|
||||||
livein_iterator livein_end() const { return LiveIns.end(); }
|
livein_iterator livein_end() const { return LiveIns.end(); }
|
||||||
bool livein_empty() const { return LiveIns.empty(); }
|
bool livein_empty() const { return LiveIns.empty(); }
|
||||||
@ -910,10 +902,10 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef std::iterator<std::forward_iterator_tag,
|
using reference = std::iterator<std::forward_iterator_tag,
|
||||||
MachineInstr, ptrdiff_t>::reference reference;
|
MachineInstr, ptrdiff_t>::reference;
|
||||||
typedef std::iterator<std::forward_iterator_tag,
|
using pointer = std::iterator<std::forward_iterator_tag,
|
||||||
MachineInstr, ptrdiff_t>::pointer pointer;
|
MachineInstr, ptrdiff_t>::pointer;
|
||||||
|
|
||||||
defusechain_iterator() = default;
|
defusechain_iterator() = default;
|
||||||
|
|
||||||
@ -1016,10 +1008,10 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef std::iterator<std::forward_iterator_tag,
|
using reference = std::iterator<std::forward_iterator_tag,
|
||||||
MachineInstr, ptrdiff_t>::reference reference;
|
MachineInstr, ptrdiff_t>::reference;
|
||||||
typedef std::iterator<std::forward_iterator_tag,
|
using pointer = std::iterator<std::forward_iterator_tag,
|
||||||
MachineInstr, ptrdiff_t>::pointer pointer;
|
MachineInstr, ptrdiff_t>::pointer;
|
||||||
|
|
||||||
defusechain_instr_iterator() = default;
|
defusechain_instr_iterator() = default;
|
||||||
|
|
||||||
|
@ -104,10 +104,15 @@ extern cl::opt<bool> ForceBottomUp;
|
|||||||
|
|
||||||
class LiveIntervals;
|
class LiveIntervals;
|
||||||
class MachineDominatorTree;
|
class MachineDominatorTree;
|
||||||
|
class MachineFunction;
|
||||||
|
class MachineInstr;
|
||||||
class MachineLoopInfo;
|
class MachineLoopInfo;
|
||||||
class RegisterClassInfo;
|
class RegisterClassInfo;
|
||||||
class SchedDFSResult;
|
class SchedDFSResult;
|
||||||
class ScheduleHazardRecognizer;
|
class ScheduleHazardRecognizer;
|
||||||
|
class TargetInstrInfo;
|
||||||
|
class TargetPassConfig;
|
||||||
|
class TargetRegisterInfo;
|
||||||
|
|
||||||
/// MachineSchedContext provides enough context from the MachineScheduler pass
|
/// MachineSchedContext provides enough context from the MachineScheduler pass
|
||||||
/// for the target to instantiate a scheduler.
|
/// for the target to instantiate a scheduler.
|
||||||
@ -129,10 +134,10 @@ struct MachineSchedContext {
|
|||||||
/// schedulers.
|
/// schedulers.
|
||||||
class MachineSchedRegistry : public MachinePassRegistryNode {
|
class MachineSchedRegistry : public MachinePassRegistryNode {
|
||||||
public:
|
public:
|
||||||
typedef ScheduleDAGInstrs *(*ScheduleDAGCtor)(MachineSchedContext *);
|
using ScheduleDAGCtor = ScheduleDAGInstrs *(*)(MachineSchedContext *);
|
||||||
|
|
||||||
// RegisterPassParser requires a (misnamed) FunctionPassCtor type.
|
// RegisterPassParser requires a (misnamed) FunctionPassCtor type.
|
||||||
typedef ScheduleDAGCtor FunctionPassCtor;
|
using FunctionPassCtor = ScheduleDAGCtor;
|
||||||
|
|
||||||
static MachinePassRegistry Registry;
|
static MachinePassRegistry Registry;
|
||||||
|
|
||||||
@ -527,7 +532,7 @@ public:
|
|||||||
|
|
||||||
unsigned size() const { return Queue.size(); }
|
unsigned size() const { return Queue.size(); }
|
||||||
|
|
||||||
typedef std::vector<SUnit*>::iterator iterator;
|
using iterator = std::vector<SUnit*>::iterator;
|
||||||
|
|
||||||
iterator begin() { return Queue.begin(); }
|
iterator begin() { return Queue.begin(); }
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===---------- CostAllocator.h - PBQP Cost Allocator -----------*- C++ -*-===//
|
//===- CostAllocator.h - PBQP Cost Allocator --------------------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -19,26 +19,28 @@
|
|||||||
#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
|
#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
|
||||||
|
|
||||||
#include "llvm/ADT/DenseSet.h"
|
#include "llvm/ADT/DenseSet.h"
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstdint>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <type_traits>
|
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
namespace PBQP {
|
namespace PBQP {
|
||||||
|
|
||||||
template <typename ValueT>
|
template <typename ValueT> class ValuePool {
|
||||||
class ValuePool {
|
|
||||||
public:
|
public:
|
||||||
typedef std::shared_ptr<const ValueT> PoolRef;
|
using PoolRef = std::shared_ptr<const ValueT>;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
|
class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
|
||||||
public:
|
public:
|
||||||
template <typename ValueKeyT>
|
template <typename ValueKeyT>
|
||||||
PoolEntry(ValuePool &Pool, ValueKeyT Value)
|
PoolEntry(ValuePool &Pool, ValueKeyT Value)
|
||||||
: Pool(Pool), Value(std::move(Value)) {}
|
: Pool(Pool), Value(std::move(Value)) {}
|
||||||
|
|
||||||
~PoolEntry() { Pool.removeEntry(this); }
|
~PoolEntry() { Pool.removeEntry(this); }
|
||||||
const ValueT& getValue() const { return Value; }
|
|
||||||
|
const ValueT &getValue() const { return Value; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ValuePool &Pool;
|
ValuePool &Pool;
|
||||||
ValueT Value;
|
ValueT Value;
|
||||||
@ -46,10 +48,10 @@ private:
|
|||||||
|
|
||||||
class PoolEntryDSInfo {
|
class PoolEntryDSInfo {
|
||||||
public:
|
public:
|
||||||
static inline PoolEntry* getEmptyKey() { return nullptr; }
|
static inline PoolEntry *getEmptyKey() { return nullptr; }
|
||||||
|
|
||||||
static inline PoolEntry* getTombstoneKey() {
|
static inline PoolEntry *getTombstoneKey() {
|
||||||
return reinterpret_cast<PoolEntry*>(static_cast<uintptr_t>(1));
|
return reinterpret_cast<PoolEntry *>(static_cast<uintptr_t>(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename ValueKeyT>
|
template <typename ValueKeyT>
|
||||||
@ -66,8 +68,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename ValueKeyT1, typename ValueKeyT2>
|
template <typename ValueKeyT1, typename ValueKeyT2>
|
||||||
static
|
static bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
|
||||||
bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
|
|
||||||
return C1 == C2;
|
return C1 == C2;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,10 +84,9 @@ private:
|
|||||||
return P1 == P2;
|
return P1 == P2;
|
||||||
return isEqual(P1->getValue(), P2);
|
return isEqual(P1->getValue(), P2);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef DenseSet<PoolEntry*, PoolEntryDSInfo> EntrySetT;
|
using EntrySetT = DenseSet<PoolEntry *, PoolEntryDSInfo>;
|
||||||
|
|
||||||
EntrySetT EntrySet;
|
EntrySetT EntrySet;
|
||||||
|
|
||||||
@ -105,28 +105,31 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename VectorT, typename MatrixT>
|
template <typename VectorT, typename MatrixT> class PoolCostAllocator {
|
||||||
class PoolCostAllocator {
|
|
||||||
private:
|
private:
|
||||||
typedef ValuePool<VectorT> VectorCostPool;
|
using VectorCostPool = ValuePool<VectorT>;
|
||||||
typedef ValuePool<MatrixT> MatrixCostPool;
|
using MatrixCostPool = ValuePool<MatrixT>;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef VectorT Vector;
|
using Vector = VectorT;
|
||||||
typedef MatrixT Matrix;
|
using Matrix = MatrixT;
|
||||||
typedef typename VectorCostPool::PoolRef VectorPtr;
|
using VectorPtr = typename VectorCostPool::PoolRef;
|
||||||
typedef typename MatrixCostPool::PoolRef MatrixPtr;
|
using MatrixPtr = typename MatrixCostPool::PoolRef;
|
||||||
|
|
||||||
template <typename VectorKeyT>
|
template <typename VectorKeyT> VectorPtr getVector(VectorKeyT v) {
|
||||||
VectorPtr getVector(VectorKeyT v) { return VectorPool.getValue(std::move(v)); }
|
return VectorPool.getValue(std::move(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename MatrixKeyT> MatrixPtr getMatrix(MatrixKeyT m) {
|
||||||
|
return MatrixPool.getValue(std::move(m));
|
||||||
|
}
|
||||||
|
|
||||||
template <typename MatrixKeyT>
|
|
||||||
MatrixPtr getMatrix(MatrixKeyT m) { return MatrixPool.getValue(std::move(m)); }
|
|
||||||
private:
|
private:
|
||||||
VectorCostPool VectorPool;
|
VectorCostPool VectorPool;
|
||||||
MatrixCostPool MatrixPool;
|
MatrixCostPool MatrixPool;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace PBQP
|
} // end namespace PBQP
|
||||||
} // namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif
|
#endif // LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===-------------------- Graph.h - PBQP Graph ------------------*- C++ -*-===//
|
//===- Graph.h - PBQP Graph -------------------------------------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -11,16 +11,14 @@
|
|||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
|
||||||
#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
|
#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
|
||||||
#define LLVM_CODEGEN_PBQP_GRAPH_H
|
#define LLVM_CODEGEN_PBQP_GRAPH_H
|
||||||
|
|
||||||
#include "llvm/ADT/STLExtras.h"
|
#include "llvm/ADT/STLExtras.h"
|
||||||
#include "llvm/Support/Debug.h"
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
#include <iterator>
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <utility>
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
@ -28,8 +26,8 @@ namespace PBQP {
|
|||||||
|
|
||||||
class GraphBase {
|
class GraphBase {
|
||||||
public:
|
public:
|
||||||
typedef unsigned NodeId;
|
using NodeId = unsigned;
|
||||||
typedef unsigned EdgeId;
|
using EdgeId = unsigned;
|
||||||
|
|
||||||
/// @brief Returns a value representing an invalid (non-existent) node.
|
/// @brief Returns a value representing an invalid (non-existent) node.
|
||||||
static NodeId invalidNodeId() {
|
static NodeId invalidNodeId() {
|
||||||
@ -48,32 +46,32 @@ namespace PBQP {
|
|||||||
template <typename SolverT>
|
template <typename SolverT>
|
||||||
class Graph : public GraphBase {
|
class Graph : public GraphBase {
|
||||||
private:
|
private:
|
||||||
typedef typename SolverT::CostAllocator CostAllocator;
|
using CostAllocator = typename SolverT::CostAllocator;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef typename SolverT::RawVector RawVector;
|
using RawVector = typename SolverT::RawVector;
|
||||||
typedef typename SolverT::RawMatrix RawMatrix;
|
using RawMatrix = typename SolverT::RawMatrix;
|
||||||
typedef typename SolverT::Vector Vector;
|
using Vector = typename SolverT::Vector;
|
||||||
typedef typename SolverT::Matrix Matrix;
|
using Matrix = typename SolverT::Matrix;
|
||||||
typedef typename CostAllocator::VectorPtr VectorPtr;
|
using VectorPtr = typename CostAllocator::VectorPtr;
|
||||||
typedef typename CostAllocator::MatrixPtr MatrixPtr;
|
using MatrixPtr = typename CostAllocator::MatrixPtr;
|
||||||
typedef typename SolverT::NodeMetadata NodeMetadata;
|
using NodeMetadata = typename SolverT::NodeMetadata;
|
||||||
typedef typename SolverT::EdgeMetadata EdgeMetadata;
|
using EdgeMetadata = typename SolverT::EdgeMetadata;
|
||||||
typedef typename SolverT::GraphMetadata GraphMetadata;
|
using GraphMetadata = typename SolverT::GraphMetadata;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
class NodeEntry {
|
class NodeEntry {
|
||||||
public:
|
public:
|
||||||
typedef std::vector<EdgeId> AdjEdgeList;
|
using AdjEdgeList = std::vector<EdgeId>;
|
||||||
typedef AdjEdgeList::size_type AdjEdgeIdx;
|
using AdjEdgeIdx = AdjEdgeList::size_type;
|
||||||
typedef AdjEdgeList::const_iterator AdjEdgeItr;
|
using AdjEdgeItr = AdjEdgeList::const_iterator;
|
||||||
|
|
||||||
|
NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}
|
||||||
|
|
||||||
static AdjEdgeIdx getInvalidAdjEdgeIdx() {
|
static AdjEdgeIdx getInvalidAdjEdgeIdx() {
|
||||||
return std::numeric_limits<AdjEdgeIdx>::max();
|
return std::numeric_limits<AdjEdgeIdx>::max();
|
||||||
}
|
}
|
||||||
|
|
||||||
NodeEntry(VectorPtr Costs) : Costs(std::move(Costs)) {}
|
|
||||||
|
|
||||||
AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
|
AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
|
||||||
AdjEdgeIdx Idx = AdjEdgeIds.size();
|
AdjEdgeIdx Idx = AdjEdgeIds.size();
|
||||||
AdjEdgeIds.push_back(EId);
|
AdjEdgeIds.push_back(EId);
|
||||||
@ -96,6 +94,7 @@ namespace PBQP {
|
|||||||
|
|
||||||
VectorPtr Costs;
|
VectorPtr Costs;
|
||||||
NodeMetadata Metadata;
|
NodeMetadata Metadata;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
AdjEdgeList AdjEdgeIds;
|
AdjEdgeList AdjEdgeIds;
|
||||||
};
|
};
|
||||||
@ -150,8 +149,10 @@ namespace PBQP {
|
|||||||
|
|
||||||
NodeId getN1Id() const { return NIds[0]; }
|
NodeId getN1Id() const { return NIds[0]; }
|
||||||
NodeId getN2Id() const { return NIds[1]; }
|
NodeId getN2Id() const { return NIds[1]; }
|
||||||
|
|
||||||
MatrixPtr Costs;
|
MatrixPtr Costs;
|
||||||
EdgeMetadata Metadata;
|
EdgeMetadata Metadata;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
NodeId NIds[2];
|
NodeId NIds[2];
|
||||||
typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
|
typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
|
||||||
@ -161,18 +162,20 @@ namespace PBQP {
|
|||||||
|
|
||||||
GraphMetadata Metadata;
|
GraphMetadata Metadata;
|
||||||
CostAllocator CostAlloc;
|
CostAllocator CostAlloc;
|
||||||
SolverT *Solver;
|
SolverT *Solver = nullptr;
|
||||||
|
|
||||||
typedef std::vector<NodeEntry> NodeVector;
|
using NodeVector = std::vector<NodeEntry>;
|
||||||
typedef std::vector<NodeId> FreeNodeVector;
|
using FreeNodeVector = std::vector<NodeId>;
|
||||||
NodeVector Nodes;
|
NodeVector Nodes;
|
||||||
FreeNodeVector FreeNodeIds;
|
FreeNodeVector FreeNodeIds;
|
||||||
|
|
||||||
typedef std::vector<EdgeEntry> EdgeVector;
|
using EdgeVector = std::vector<EdgeEntry>;
|
||||||
typedef std::vector<EdgeId> FreeEdgeVector;
|
using FreeEdgeVector = std::vector<EdgeId>;
|
||||||
EdgeVector Edges;
|
EdgeVector Edges;
|
||||||
FreeEdgeVector FreeEdgeIds;
|
FreeEdgeVector FreeEdgeIds;
|
||||||
|
|
||||||
|
Graph(const Graph &Other) {}
|
||||||
|
|
||||||
// ----- INTERNAL METHODS -----
|
// ----- INTERNAL METHODS -----
|
||||||
|
|
||||||
NodeEntry &getNode(NodeId NId) {
|
NodeEntry &getNode(NodeId NId) {
|
||||||
@ -220,20 +223,18 @@ namespace PBQP {
|
|||||||
return EId;
|
return EId;
|
||||||
}
|
}
|
||||||
|
|
||||||
Graph(const Graph &Other) {}
|
|
||||||
void operator=(const Graph &Other) {}
|
void operator=(const Graph &Other) {}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
using AdjEdgeItr = typename NodeEntry::AdjEdgeItr;
|
||||||
typedef typename NodeEntry::AdjEdgeItr AdjEdgeItr;
|
|
||||||
|
|
||||||
class NodeItr {
|
class NodeItr {
|
||||||
public:
|
public:
|
||||||
typedef std::forward_iterator_tag iterator_category;
|
using iterator_category = std::forward_iterator_tag;
|
||||||
typedef NodeId value_type;
|
using value_type = NodeId;
|
||||||
typedef int difference_type;
|
using difference_type = int;
|
||||||
typedef NodeId* pointer;
|
using pointer = NodeId *;
|
||||||
typedef NodeId& reference;
|
using reference = NodeId &;
|
||||||
|
|
||||||
NodeItr(NodeId CurNId, const Graph &G)
|
NodeItr(NodeId CurNId, const Graph &G)
|
||||||
: CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
|
: CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
|
||||||
@ -283,53 +284,65 @@ namespace PBQP {
|
|||||||
|
|
||||||
class NodeIdSet {
|
class NodeIdSet {
|
||||||
public:
|
public:
|
||||||
NodeIdSet(const Graph &G) : G(G) { }
|
NodeIdSet(const Graph &G) : G(G) {}
|
||||||
|
|
||||||
NodeItr begin() const { return NodeItr(0, G); }
|
NodeItr begin() const { return NodeItr(0, G); }
|
||||||
NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
|
NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
|
||||||
|
|
||||||
bool empty() const { return G.Nodes.empty(); }
|
bool empty() const { return G.Nodes.empty(); }
|
||||||
|
|
||||||
typename NodeVector::size_type size() const {
|
typename NodeVector::size_type size() const {
|
||||||
return G.Nodes.size() - G.FreeNodeIds.size();
|
return G.Nodes.size() - G.FreeNodeIds.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Graph& G;
|
const Graph& G;
|
||||||
};
|
};
|
||||||
|
|
||||||
class EdgeIdSet {
|
class EdgeIdSet {
|
||||||
public:
|
public:
|
||||||
EdgeIdSet(const Graph &G) : G(G) { }
|
EdgeIdSet(const Graph &G) : G(G) {}
|
||||||
|
|
||||||
EdgeItr begin() const { return EdgeItr(0, G); }
|
EdgeItr begin() const { return EdgeItr(0, G); }
|
||||||
EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
|
EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
|
||||||
|
|
||||||
bool empty() const { return G.Edges.empty(); }
|
bool empty() const { return G.Edges.empty(); }
|
||||||
|
|
||||||
typename NodeVector::size_type size() const {
|
typename NodeVector::size_type size() const {
|
||||||
return G.Edges.size() - G.FreeEdgeIds.size();
|
return G.Edges.size() - G.FreeEdgeIds.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const Graph& G;
|
const Graph& G;
|
||||||
};
|
};
|
||||||
|
|
||||||
class AdjEdgeIdSet {
|
class AdjEdgeIdSet {
|
||||||
public:
|
public:
|
||||||
AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) { }
|
AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) {}
|
||||||
|
|
||||||
typename NodeEntry::AdjEdgeItr begin() const {
|
typename NodeEntry::AdjEdgeItr begin() const {
|
||||||
return NE.getAdjEdgeIds().begin();
|
return NE.getAdjEdgeIds().begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
typename NodeEntry::AdjEdgeItr end() const {
|
typename NodeEntry::AdjEdgeItr end() const {
|
||||||
return NE.getAdjEdgeIds().end();
|
return NE.getAdjEdgeIds().end();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool empty() const { return NE.getAdjEdgeIds().empty(); }
|
bool empty() const { return NE.getAdjEdgeIds().empty(); }
|
||||||
|
|
||||||
typename NodeEntry::AdjEdgeList::size_type size() const {
|
typename NodeEntry::AdjEdgeList::size_type size() const {
|
||||||
return NE.getAdjEdgeIds().size();
|
return NE.getAdjEdgeIds().size();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const NodeEntry &NE;
|
const NodeEntry &NE;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// @brief Construct an empty PBQP graph.
|
/// @brief Construct an empty PBQP graph.
|
||||||
Graph() : Solver(nullptr) {}
|
Graph() = default;
|
||||||
|
|
||||||
/// @brief Construct an empty PBQP graph with the given graph metadata.
|
/// @brief Construct an empty PBQP graph with the given graph metadata.
|
||||||
Graph(GraphMetadata Metadata)
|
Graph(GraphMetadata Metadata) : Metadata(std::move(Metadata)) {}
|
||||||
: Metadata(std::move(Metadata)), Solver(nullptr) {}
|
|
||||||
|
|
||||||
/// @brief Get a reference to the graph metadata.
|
/// @brief Get a reference to the graph metadata.
|
||||||
GraphMetadata& getMetadata() { return Metadata; }
|
GraphMetadata& getMetadata() { return Metadata; }
|
||||||
@ -656,7 +669,7 @@ namespace PBQP {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace PBQP
|
} // end namespace PBQP
|
||||||
} // namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif // LLVM_CODEGEN_PBQP_GRAPH_HPP
|
#endif // LLVM_CODEGEN_PBQP_GRAPH_HPP
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===------ Math.h - PBQP Vector and Matrix classes -------------*- C++ -*-===//
|
//===- Math.h - PBQP Vector and Matrix classes ------------------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -11,20 +11,22 @@
|
|||||||
#define LLVM_CODEGEN_PBQP_MATH_H
|
#define LLVM_CODEGEN_PBQP_MATH_H
|
||||||
|
|
||||||
#include "llvm/ADT/Hashing.h"
|
#include "llvm/ADT/Hashing.h"
|
||||||
|
#include "llvm/ADT/STLExtras.h"
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <functional>
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
namespace PBQP {
|
namespace PBQP {
|
||||||
|
|
||||||
typedef float PBQPNum;
|
using PBQPNum = float;
|
||||||
|
|
||||||
/// \brief PBQP Vector class.
|
/// \brief PBQP Vector class.
|
||||||
class Vector {
|
class Vector {
|
||||||
friend hash_code hash_value(const Vector &);
|
friend hash_code hash_value(const Vector &);
|
||||||
public:
|
|
||||||
|
|
||||||
|
public:
|
||||||
/// \brief Construct a PBQP vector of the given size.
|
/// \brief Construct a PBQP vector of the given size.
|
||||||
explicit Vector(unsigned Length)
|
explicit Vector(unsigned Length)
|
||||||
: Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {}
|
: Length(Length), Data(llvm::make_unique<PBQPNum []>(Length)) {}
|
||||||
@ -120,8 +122,8 @@ OStream& operator<<(OStream &OS, const Vector &V) {
|
|||||||
class Matrix {
|
class Matrix {
|
||||||
private:
|
private:
|
||||||
friend hash_code hash_value(const Matrix &);
|
friend hash_code hash_value(const Matrix &);
|
||||||
public:
|
|
||||||
|
|
||||||
|
public:
|
||||||
/// \brief Construct a PBQP Matrix with the given dimensions.
|
/// \brief Construct a PBQP Matrix with the given dimensions.
|
||||||
Matrix(unsigned Rows, unsigned Cols) :
|
Matrix(unsigned Rows, unsigned Cols) :
|
||||||
Rows(Rows), Cols(Cols), Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
|
Rows(Rows), Cols(Cols), Data(llvm::make_unique<PBQPNum []>(Rows * Cols)) {
|
||||||
@ -253,9 +255,11 @@ OStream& operator<<(OStream &OS, const Matrix &M) {
|
|||||||
template <typename Metadata>
|
template <typename Metadata>
|
||||||
class MDVector : public Vector {
|
class MDVector : public Vector {
|
||||||
public:
|
public:
|
||||||
MDVector(const Vector &v) : Vector(v), md(*this) { }
|
MDVector(const Vector &v) : Vector(v), md(*this) {}
|
||||||
MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
|
MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
|
||||||
|
|
||||||
const Metadata& getMetadata() const { return md; }
|
const Metadata& getMetadata() const { return md; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Metadata md;
|
Metadata md;
|
||||||
};
|
};
|
||||||
@ -268,9 +272,11 @@ inline hash_code hash_value(const MDVector<Metadata> &V) {
|
|||||||
template <typename Metadata>
|
template <typename Metadata>
|
||||||
class MDMatrix : public Matrix {
|
class MDMatrix : public Matrix {
|
||||||
public:
|
public:
|
||||||
MDMatrix(const Matrix &m) : Matrix(m), md(*this) { }
|
MDMatrix(const Matrix &m) : Matrix(m), md(*this) {}
|
||||||
MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
|
MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
|
||||||
|
|
||||||
const Metadata& getMetadata() const { return md; }
|
const Metadata& getMetadata() const { return md; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Metadata md;
|
Metadata md;
|
||||||
};
|
};
|
||||||
@ -280,7 +286,7 @@ inline hash_code hash_value(const MDMatrix<Metadata> &M) {
|
|||||||
return hash_value(static_cast<const Matrix&>(M));
|
return hash_value(static_cast<const Matrix&>(M));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace PBQP
|
} // end namespace PBQP
|
||||||
} // namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif // LLVM_CODEGEN_PBQP_MATH_H
|
#endif // LLVM_CODEGEN_PBQP_MATH_H
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===----------- ReductionRules.h - Reduction Rules -------------*- C++ -*-===//
|
//===- ReductionRules.h - Reduction Rules -----------------------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -17,6 +17,8 @@
|
|||||||
#include "Graph.h"
|
#include "Graph.h"
|
||||||
#include "Math.h"
|
#include "Math.h"
|
||||||
#include "Solution.h"
|
#include "Solution.h"
|
||||||
|
#include <cassert>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
namespace PBQP {
|
namespace PBQP {
|
||||||
@ -27,11 +29,11 @@ namespace PBQP {
|
|||||||
/// neighbor. Notify the problem domain.
|
/// neighbor. Notify the problem domain.
|
||||||
template <typename GraphT>
|
template <typename GraphT>
|
||||||
void applyR1(GraphT &G, typename GraphT::NodeId NId) {
|
void applyR1(GraphT &G, typename GraphT::NodeId NId) {
|
||||||
typedef typename GraphT::NodeId NodeId;
|
using NodeId = typename GraphT::NodeId;
|
||||||
typedef typename GraphT::EdgeId EdgeId;
|
using EdgeId = typename GraphT::EdgeId;
|
||||||
typedef typename GraphT::Vector Vector;
|
using Vector = typename GraphT::Vector;
|
||||||
typedef typename GraphT::Matrix Matrix;
|
using Matrix = typename GraphT::Matrix;
|
||||||
typedef typename GraphT::RawVector RawVector;
|
using RawVector = typename GraphT::RawVector;
|
||||||
|
|
||||||
assert(G.getNodeDegree(NId) == 1 &&
|
assert(G.getNodeDegree(NId) == 1 &&
|
||||||
"R1 applied to node with degree != 1.");
|
"R1 applied to node with degree != 1.");
|
||||||
@ -71,11 +73,11 @@ namespace PBQP {
|
|||||||
|
|
||||||
template <typename GraphT>
|
template <typename GraphT>
|
||||||
void applyR2(GraphT &G, typename GraphT::NodeId NId) {
|
void applyR2(GraphT &G, typename GraphT::NodeId NId) {
|
||||||
typedef typename GraphT::NodeId NodeId;
|
using NodeId = typename GraphT::NodeId;
|
||||||
typedef typename GraphT::EdgeId EdgeId;
|
using EdgeId = typename GraphT::EdgeId;
|
||||||
typedef typename GraphT::Vector Vector;
|
using Vector = typename GraphT::Vector;
|
||||||
typedef typename GraphT::Matrix Matrix;
|
using Matrix = typename GraphT::Matrix;
|
||||||
typedef typename GraphT::RawMatrix RawMatrix;
|
using RawMatrix = typename GraphT::RawMatrix;
|
||||||
|
|
||||||
assert(G.getNodeDegree(NId) == 2 &&
|
assert(G.getNodeDegree(NId) == 2 &&
|
||||||
"R2 applied to node with degree != 2.");
|
"R2 applied to node with degree != 2.");
|
||||||
@ -177,9 +179,9 @@ namespace PBQP {
|
|||||||
// state.
|
// state.
|
||||||
template <typename GraphT, typename StackT>
|
template <typename GraphT, typename StackT>
|
||||||
Solution backpropagate(GraphT& G, StackT stack) {
|
Solution backpropagate(GraphT& G, StackT stack) {
|
||||||
typedef GraphBase::NodeId NodeId;
|
using NodeId = GraphBase::NodeId;
|
||||||
typedef typename GraphT::Matrix Matrix;
|
using Matrix = typename GraphT::Matrix;
|
||||||
typedef typename GraphT::RawVector RawVector;
|
using RawVector = typename GraphT::RawVector;
|
||||||
|
|
||||||
Solution s;
|
Solution s;
|
||||||
|
|
||||||
@ -215,7 +217,7 @@ namespace PBQP {
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace PBQP
|
} // end namespace PBQP
|
||||||
} // namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif
|
#endif // LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
|
||||||
|
@ -26,7 +26,7 @@ namespace PBQP {
|
|||||||
/// To get the selection for each node in the problem use the getSelection method.
|
/// To get the selection for each node in the problem use the getSelection method.
|
||||||
class Solution {
|
class Solution {
|
||||||
private:
|
private:
|
||||||
typedef std::map<GraphBase::NodeId, unsigned> SelectionsMap;
|
using SelectionsMap = std::map<GraphBase::NodeId, unsigned>;
|
||||||
SelectionsMap selections;
|
SelectionsMap selections;
|
||||||
|
|
||||||
unsigned r0Reductions = 0;
|
unsigned r0Reductions = 0;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
|
//===- RegAllocPBQP.h -------------------------------------------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -16,23 +16,22 @@
|
|||||||
#ifndef LLVM_CODEGEN_PBQPRACONSTRAINT_H
|
#ifndef LLVM_CODEGEN_PBQPRACONSTRAINT_H
|
||||||
#define LLVM_CODEGEN_PBQPRACONSTRAINT_H
|
#define LLVM_CODEGEN_PBQPRACONSTRAINT_H
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
namespace PBQP {
|
namespace PBQP {
|
||||||
namespace RegAlloc {
|
namespace RegAlloc {
|
||||||
|
|
||||||
// Forward declare PBQP graph class.
|
// Forward declare PBQP graph class.
|
||||||
class PBQPRAGraph;
|
class PBQPRAGraph;
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class LiveIntervals;
|
} // end namespace RegAlloc
|
||||||
class MachineBlockFrequencyInfo;
|
} // end namespace PBQP
|
||||||
class MachineFunction;
|
|
||||||
class TargetRegisterInfo;
|
|
||||||
|
|
||||||
typedef PBQP::RegAlloc::PBQPRAGraph PBQPRAGraph;
|
using PBQPRAGraph = PBQP::RegAlloc::PBQPRAGraph;
|
||||||
|
|
||||||
/// @brief Abstract base for classes implementing PBQP register allocation
|
/// @brief Abstract base for classes implementing PBQP register allocation
|
||||||
/// constraints (e.g. Spill-costs, interference, coalescing).
|
/// constraints (e.g. Spill-costs, interference, coalescing).
|
||||||
@ -40,6 +39,7 @@ class PBQPRAConstraint {
|
|||||||
public:
|
public:
|
||||||
virtual ~PBQPRAConstraint() = 0;
|
virtual ~PBQPRAConstraint() = 0;
|
||||||
virtual void apply(PBQPRAGraph &G) = 0;
|
virtual void apply(PBQPRAGraph &G) = 0;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
virtual void anchor();
|
virtual void anchor();
|
||||||
};
|
};
|
||||||
@ -59,11 +59,13 @@ public:
|
|||||||
if (C)
|
if (C)
|
||||||
Constraints.push_back(std::move(C));
|
Constraints.push_back(std::move(C));
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<std::unique_ptr<PBQPRAConstraint>> Constraints;
|
std::vector<std::unique_ptr<PBQPRAConstraint>> Constraints;
|
||||||
|
|
||||||
void anchor() override;
|
void anchor() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif /* LLVM_CODEGEN_PBQPRACONSTRAINT_H */
|
#endif // LLVM_CODEGEN_PBQPRACONSTRAINT_H
|
||||||
|
@ -140,6 +140,9 @@ namespace llvm {
|
|||||||
/// Greedy register allocator.
|
/// Greedy register allocator.
|
||||||
extern char &RAGreedyID;
|
extern char &RAGreedyID;
|
||||||
|
|
||||||
|
/// Basic register allocator.
|
||||||
|
extern char &RABasicID;
|
||||||
|
|
||||||
/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
|
/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
|
||||||
/// assigned in VirtRegMap.
|
/// assigned in VirtRegMap.
|
||||||
extern char &VirtRegRewriterID;
|
extern char &VirtRegRewriterID;
|
||||||
|
@ -130,10 +130,10 @@ inline hash_code hash_value(const AllowedRegVector &OptRegs) {
|
|||||||
/// \brief Holds graph-level metadata relevant to PBQP RA problems.
|
/// \brief Holds graph-level metadata relevant to PBQP RA problems.
|
||||||
class GraphMetadata {
|
class GraphMetadata {
|
||||||
private:
|
private:
|
||||||
typedef ValuePool<AllowedRegVector> AllowedRegVecPool;
|
using AllowedRegVecPool = ValuePool<AllowedRegVector>;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef AllowedRegVecPool::PoolRef AllowedRegVecRef;
|
using AllowedRegVecRef = AllowedRegVecPool::PoolRef;
|
||||||
|
|
||||||
GraphMetadata(MachineFunction &MF,
|
GraphMetadata(MachineFunction &MF,
|
||||||
LiveIntervals &LIS,
|
LiveIntervals &LIS,
|
||||||
@ -167,17 +167,17 @@ private:
|
|||||||
/// \brief Holds solver state and other metadata relevant to each PBQP RA node.
|
/// \brief Holds solver state and other metadata relevant to each PBQP RA node.
|
||||||
class NodeMetadata {
|
class NodeMetadata {
|
||||||
public:
|
public:
|
||||||
typedef RegAlloc::AllowedRegVector AllowedRegVector;
|
using AllowedRegVector = RegAlloc::AllowedRegVector;
|
||||||
|
|
||||||
// The node's reduction state. The order in this enum is important,
|
// The node's reduction state. The order in this enum is important,
|
||||||
// as it is assumed nodes can only progress up (i.e. towards being
|
// as it is assumed nodes can only progress up (i.e. towards being
|
||||||
// optimally reducible) when reducing the graph.
|
// optimally reducible) when reducing the graph.
|
||||||
typedef enum {
|
using ReductionState = enum {
|
||||||
Unprocessed,
|
Unprocessed,
|
||||||
NotProvablyAllocatable,
|
NotProvablyAllocatable,
|
||||||
ConservativelyAllocatable,
|
ConservativelyAllocatable,
|
||||||
OptimallyReducible
|
OptimallyReducible
|
||||||
} ReductionState;
|
};
|
||||||
|
|
||||||
NodeMetadata() = default;
|
NodeMetadata() = default;
|
||||||
|
|
||||||
@ -267,23 +267,23 @@ private:
|
|||||||
|
|
||||||
class RegAllocSolverImpl {
|
class RegAllocSolverImpl {
|
||||||
private:
|
private:
|
||||||
typedef MDMatrix<MatrixMetadata> RAMatrix;
|
using RAMatrix = MDMatrix<MatrixMetadata>;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef PBQP::Vector RawVector;
|
using RawVector = PBQP::Vector;
|
||||||
typedef PBQP::Matrix RawMatrix;
|
using RawMatrix = PBQP::Matrix;
|
||||||
typedef PBQP::Vector Vector;
|
using Vector = PBQP::Vector;
|
||||||
typedef RAMatrix Matrix;
|
using Matrix = RAMatrix;
|
||||||
typedef PBQP::PoolCostAllocator<Vector, Matrix> CostAllocator;
|
using CostAllocator = PBQP::PoolCostAllocator<Vector, Matrix>;
|
||||||
|
|
||||||
typedef GraphBase::NodeId NodeId;
|
using NodeId = GraphBase::NodeId;
|
||||||
typedef GraphBase::EdgeId EdgeId;
|
using EdgeId = GraphBase::EdgeId;
|
||||||
|
|
||||||
typedef RegAlloc::NodeMetadata NodeMetadata;
|
using NodeMetadata = RegAlloc::NodeMetadata;
|
||||||
struct EdgeMetadata { };
|
struct EdgeMetadata {};
|
||||||
typedef RegAlloc::GraphMetadata GraphMetadata;
|
using GraphMetadata = RegAlloc::GraphMetadata;
|
||||||
|
|
||||||
typedef PBQP::Graph<RegAllocSolverImpl> Graph;
|
using Graph = PBQP::Graph<RegAllocSolverImpl>;
|
||||||
|
|
||||||
RegAllocSolverImpl(Graph &G) : G(G) {}
|
RegAllocSolverImpl(Graph &G) : G(G) {}
|
||||||
|
|
||||||
@ -426,7 +426,7 @@ private:
|
|||||||
std::vector<GraphBase::NodeId> reduce() {
|
std::vector<GraphBase::NodeId> reduce() {
|
||||||
assert(!G.empty() && "Cannot reduce empty graph.");
|
assert(!G.empty() && "Cannot reduce empty graph.");
|
||||||
|
|
||||||
typedef GraphBase::NodeId NodeId;
|
using NodeId = GraphBase::NodeId;
|
||||||
std::vector<NodeId> NodeStack;
|
std::vector<NodeId> NodeStack;
|
||||||
|
|
||||||
// Consume worklists.
|
// Consume worklists.
|
||||||
@ -459,7 +459,6 @@ private:
|
|||||||
ConservativelyAllocatableNodes.erase(NItr);
|
ConservativelyAllocatableNodes.erase(NItr);
|
||||||
NodeStack.push_back(NId);
|
NodeStack.push_back(NId);
|
||||||
G.disconnectAllNeighborsFromNode(NId);
|
G.disconnectAllNeighborsFromNode(NId);
|
||||||
|
|
||||||
} else if (!NotProvablyAllocatableNodes.empty()) {
|
} else if (!NotProvablyAllocatableNodes.empty()) {
|
||||||
NodeSet::iterator NItr =
|
NodeSet::iterator NItr =
|
||||||
std::min_element(NotProvablyAllocatableNodes.begin(),
|
std::min_element(NotProvablyAllocatableNodes.begin(),
|
||||||
@ -493,7 +492,7 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
Graph& G;
|
Graph& G;
|
||||||
typedef std::set<NodeId> NodeSet;
|
using NodeSet = std::set<NodeId>;
|
||||||
NodeSet OptimallyReducibleNodes;
|
NodeSet OptimallyReducibleNodes;
|
||||||
NodeSet ConservativelyAllocatableNodes;
|
NodeSet ConservativelyAllocatableNodes;
|
||||||
NodeSet NotProvablyAllocatableNodes;
|
NodeSet NotProvablyAllocatableNodes;
|
||||||
@ -501,7 +500,7 @@ private:
|
|||||||
|
|
||||||
class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
|
class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
|
||||||
private:
|
private:
|
||||||
typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
|
using BaseT = PBQP::Graph<RegAllocSolverImpl>;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
|
PBQPRAGraph(GraphMetadata Metadata) : BaseT(std::move(Metadata)) {}
|
||||||
|
@ -204,6 +204,10 @@ private:
|
|||||||
void setLiveInsUsed(const MachineBasicBlock &MBB);
|
void setLiveInsUsed(const MachineBasicBlock &MBB);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Replaces all frame index virtual registers with physical registers. Uses the
|
||||||
|
/// register scavenger to find an appropriate register to use.
|
||||||
|
void scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS);
|
||||||
|
|
||||||
} // end namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif // LLVM_CODEGEN_REGISTERSCAVENGING_H
|
#endif // LLVM_CODEGEN_REGISTERSCAVENGING_H
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//==- ScheduleDAGInstrs.h - MachineInstr Scheduling --------------*- C++ -*-==//
|
//===- ScheduleDAGInstrs.h - MachineInstr Scheduling ------------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -15,22 +15,38 @@
|
|||||||
#ifndef LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
|
#ifndef LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
|
||||||
#define LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
|
#define LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
|
||||||
|
|
||||||
#include "llvm/ADT/MapVector.h"
|
#include "llvm/ADT/DenseMap.h"
|
||||||
|
#include "llvm/ADT/PointerIntPair.h"
|
||||||
|
#include "llvm/ADT/SmallVector.h"
|
||||||
#include "llvm/ADT/SparseMultiSet.h"
|
#include "llvm/ADT/SparseMultiSet.h"
|
||||||
#include "llvm/ADT/SparseSet.h"
|
#include "llvm/ADT/SparseSet.h"
|
||||||
|
#include "llvm/ADT/STLExtras.h"
|
||||||
#include "llvm/CodeGen/LivePhysRegs.h"
|
#include "llvm/CodeGen/LivePhysRegs.h"
|
||||||
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
#include "llvm/CodeGen/ScheduleDAG.h"
|
#include "llvm/CodeGen/ScheduleDAG.h"
|
||||||
#include "llvm/CodeGen/TargetSchedule.h"
|
#include "llvm/CodeGen/TargetSchedule.h"
|
||||||
#include "llvm/Support/Compiler.h"
|
#include "llvm/MC/LaneBitmask.h"
|
||||||
#include "llvm/Target/TargetRegisterInfo.h"
|
#include "llvm/Target/TargetRegisterInfo.h"
|
||||||
|
#include <cassert>
|
||||||
|
#include <cstdint>
|
||||||
#include <list>
|
#include <list>
|
||||||
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
|
class LiveIntervals;
|
||||||
class MachineFrameInfo;
|
class MachineFrameInfo;
|
||||||
|
class MachineFunction;
|
||||||
|
class MachineInstr;
|
||||||
class MachineLoopInfo;
|
class MachineLoopInfo;
|
||||||
class MachineDominatorTree;
|
class MachineOperand;
|
||||||
class RegPressureTracker;
|
struct MCSchedClassDesc;
|
||||||
class PressureDiffs;
|
class PressureDiffs;
|
||||||
|
class PseudoSourceValue;
|
||||||
|
class RegPressureTracker;
|
||||||
|
class UndefValue;
|
||||||
|
class Value;
|
||||||
|
|
||||||
/// An individual mapping from virtual register number to SUnit.
|
/// An individual mapping from virtual register number to SUnit.
|
||||||
struct VReg2SUnit {
|
struct VReg2SUnit {
|
||||||
@ -70,31 +86,34 @@ namespace llvm {
|
|||||||
/// Use a SparseMultiSet to track physical registers. Storage is only
|
/// Use a SparseMultiSet to track physical registers. Storage is only
|
||||||
/// allocated once for the pass. It can be cleared in constant time and reused
|
/// allocated once for the pass. It can be cleared in constant time and reused
|
||||||
/// without any frees.
|
/// without any frees.
|
||||||
typedef SparseMultiSet<PhysRegSUOper, llvm::identity<unsigned>, uint16_t>
|
using Reg2SUnitsMap =
|
||||||
Reg2SUnitsMap;
|
SparseMultiSet<PhysRegSUOper, identity<unsigned>, uint16_t>;
|
||||||
|
|
||||||
/// Use SparseSet as a SparseMap by relying on the fact that it never
|
/// Use SparseSet as a SparseMap by relying on the fact that it never
|
||||||
/// compares ValueT's, only unsigned keys. This allows the set to be cleared
|
/// compares ValueT's, only unsigned keys. This allows the set to be cleared
|
||||||
/// between scheduling regions in constant time as long as ValueT does not
|
/// between scheduling regions in constant time as long as ValueT does not
|
||||||
/// require a destructor.
|
/// require a destructor.
|
||||||
typedef SparseSet<VReg2SUnit, VirtReg2IndexFunctor> VReg2SUnitMap;
|
using VReg2SUnitMap = SparseSet<VReg2SUnit, VirtReg2IndexFunctor>;
|
||||||
|
|
||||||
/// Track local uses of virtual registers. These uses are gathered by the DAG
|
/// Track local uses of virtual registers. These uses are gathered by the DAG
|
||||||
/// builder and may be consulted by the scheduler to avoid iterating an entire
|
/// builder and may be consulted by the scheduler to avoid iterating an entire
|
||||||
/// vreg use list.
|
/// vreg use list.
|
||||||
typedef SparseMultiSet<VReg2SUnit, VirtReg2IndexFunctor> VReg2SUnitMultiMap;
|
using VReg2SUnitMultiMap = SparseMultiSet<VReg2SUnit, VirtReg2IndexFunctor>;
|
||||||
|
|
||||||
typedef SparseMultiSet<VReg2SUnitOperIdx, VirtReg2IndexFunctor>
|
using VReg2SUnitOperIdxMultiMap =
|
||||||
VReg2SUnitOperIdxMultiMap;
|
SparseMultiSet<VReg2SUnitOperIdx, VirtReg2IndexFunctor>;
|
||||||
|
|
||||||
|
using ValueType = PointerUnion<const Value *, const PseudoSourceValue *>;
|
||||||
|
|
||||||
typedef PointerUnion<const Value *, const PseudoSourceValue *> ValueType;
|
|
||||||
struct UnderlyingObject : PointerIntPair<ValueType, 1, bool> {
|
struct UnderlyingObject : PointerIntPair<ValueType, 1, bool> {
|
||||||
UnderlyingObject(ValueType V, bool MayAlias)
|
UnderlyingObject(ValueType V, bool MayAlias)
|
||||||
: PointerIntPair<ValueType, 1, bool>(V, MayAlias) {}
|
: PointerIntPair<ValueType, 1, bool>(V, MayAlias) {}
|
||||||
|
|
||||||
ValueType getValue() const { return getPointer(); }
|
ValueType getValue() const { return getPointer(); }
|
||||||
bool mayAlias() const { return getInt(); }
|
bool mayAlias() const { return getInt(); }
|
||||||
};
|
};
|
||||||
typedef SmallVector<UnderlyingObject, 4> UnderlyingObjectsVector;
|
|
||||||
|
using UnderlyingObjectsVector = SmallVector<UnderlyingObject, 4>;
|
||||||
|
|
||||||
/// A ScheduleDAG for scheduling lists of MachineInstr.
|
/// A ScheduleDAG for scheduling lists of MachineInstr.
|
||||||
class ScheduleDAGInstrs : public ScheduleDAG {
|
class ScheduleDAGInstrs : public ScheduleDAG {
|
||||||
@ -114,10 +133,10 @@ namespace llvm {
|
|||||||
/// reordering. A specialized scheduler can override
|
/// reordering. A specialized scheduler can override
|
||||||
/// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
|
/// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
|
||||||
/// it has taken responsibility for scheduling the terminator correctly.
|
/// it has taken responsibility for scheduling the terminator correctly.
|
||||||
bool CanHandleTerminators;
|
bool CanHandleTerminators = false;
|
||||||
|
|
||||||
/// Whether lane masks should get tracked.
|
/// Whether lane masks should get tracked.
|
||||||
bool TrackLaneMasks;
|
bool TrackLaneMasks = false;
|
||||||
|
|
||||||
// State specific to the current scheduling region.
|
// State specific to the current scheduling region.
|
||||||
// ------------------------------------------------
|
// ------------------------------------------------
|
||||||
@ -155,12 +174,12 @@ namespace llvm {
|
|||||||
/// Tracks the last instructions in this region using each virtual register.
|
/// Tracks the last instructions in this region using each virtual register.
|
||||||
VReg2SUnitOperIdxMultiMap CurrentVRegUses;
|
VReg2SUnitOperIdxMultiMap CurrentVRegUses;
|
||||||
|
|
||||||
AliasAnalysis *AAForDep;
|
AliasAnalysis *AAForDep = nullptr;
|
||||||
|
|
||||||
/// Remember a generic side-effecting instruction as we proceed.
|
/// Remember a generic side-effecting instruction as we proceed.
|
||||||
/// No other SU ever gets scheduled around it (except in the special
|
/// No other SU ever gets scheduled around it (except in the special
|
||||||
/// case of a huge region that gets reduced).
|
/// case of a huge region that gets reduced).
|
||||||
SUnit *BarrierChain;
|
SUnit *BarrierChain = nullptr;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/// A list of SUnits, used in Value2SUsMap, during DAG construction.
|
/// A list of SUnits, used in Value2SUsMap, during DAG construction.
|
||||||
@ -168,7 +187,7 @@ namespace llvm {
|
|||||||
/// implementation of this data structure, such as a singly linked list
|
/// implementation of this data structure, such as a singly linked list
|
||||||
/// with a memory pool (SmallVector was tried but slow and SparseSet is not
|
/// with a memory pool (SmallVector was tried but slow and SparseSet is not
|
||||||
/// applicable).
|
/// applicable).
|
||||||
typedef std::list<SUnit *> SUList;
|
using SUList = std::list<SUnit *>;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// \brief A map from ValueType to SUList, used during DAG construction, as
|
/// \brief A map from ValueType to SUList, used during DAG construction, as
|
||||||
@ -216,13 +235,13 @@ namespace llvm {
|
|||||||
/// For an unanalyzable memory access, this Value is used in maps.
|
/// For an unanalyzable memory access, this Value is used in maps.
|
||||||
UndefValue *UnknownValue;
|
UndefValue *UnknownValue;
|
||||||
|
|
||||||
typedef std::vector<std::pair<MachineInstr *, MachineInstr *>>
|
using DbgValueVector =
|
||||||
DbgValueVector;
|
std::vector<std::pair<MachineInstr *, MachineInstr *>>;
|
||||||
/// Remember instruction that precedes DBG_VALUE.
|
/// Remember instruction that precedes DBG_VALUE.
|
||||||
/// These are generated by buildSchedGraph but persist so they can be
|
/// These are generated by buildSchedGraph but persist so they can be
|
||||||
/// referenced when emitting the final schedule.
|
/// referenced when emitting the final schedule.
|
||||||
DbgValueVector DbgValues;
|
DbgValueVector DbgValues;
|
||||||
MachineInstr *FirstDbgValue;
|
MachineInstr *FirstDbgValue = nullptr;
|
||||||
|
|
||||||
/// Set of live physical registers for updating kill flags.
|
/// Set of live physical registers for updating kill flags.
|
||||||
LivePhysRegs LiveRegs;
|
LivePhysRegs LiveRegs;
|
||||||
@ -232,7 +251,7 @@ namespace llvm {
|
|||||||
const MachineLoopInfo *mli,
|
const MachineLoopInfo *mli,
|
||||||
bool RemoveKillFlags = false);
|
bool RemoveKillFlags = false);
|
||||||
|
|
||||||
~ScheduleDAGInstrs() override {}
|
~ScheduleDAGInstrs() override = default;
|
||||||
|
|
||||||
/// Gets the machine model for instruction scheduling.
|
/// Gets the machine model for instruction scheduling.
|
||||||
const TargetSchedModel *getSchedModel() const { return &SchedModel; }
|
const TargetSchedModel *getSchedModel() const { return &SchedModel; }
|
||||||
@ -354,6 +373,7 @@ namespace llvm {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
return I->second;
|
return I->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // end namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif
|
#endif // LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===-- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ---------*- C++ -*-===//
|
//===- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ----------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -15,35 +15,72 @@
|
|||||||
#ifndef LLVM_CODEGEN_SELECTIONDAG_H
|
#ifndef LLVM_CODEGEN_SELECTIONDAG_H
|
||||||
#define LLVM_CODEGEN_SELECTIONDAG_H
|
#define LLVM_CODEGEN_SELECTIONDAG_H
|
||||||
|
|
||||||
|
#include "llvm/ADT/APFloat.h"
|
||||||
|
#include "llvm/ADT/APInt.h"
|
||||||
|
#include "llvm/ADT/ArrayRef.h"
|
||||||
|
#include "llvm/ADT/DenseMap.h"
|
||||||
#include "llvm/ADT/DenseSet.h"
|
#include "llvm/ADT/DenseSet.h"
|
||||||
#include "llvm/ADT/SetVector.h"
|
#include "llvm/ADT/FoldingSet.h"
|
||||||
#include "llvm/ADT/StringMap.h"
|
|
||||||
#include "llvm/ADT/ilist.h"
|
#include "llvm/ADT/ilist.h"
|
||||||
|
#include "llvm/ADT/iterator.h"
|
||||||
|
#include "llvm/ADT/iterator_range.h"
|
||||||
|
#include "llvm/ADT/SetVector.h"
|
||||||
|
#include "llvm/ADT/SmallVector.h"
|
||||||
|
#include "llvm/ADT/StringMap.h"
|
||||||
#include "llvm/Analysis/AliasAnalysis.h"
|
#include "llvm/Analysis/AliasAnalysis.h"
|
||||||
#include "llvm/CodeGen/DAGCombine.h"
|
#include "llvm/CodeGen/DAGCombine.h"
|
||||||
|
#include "llvm/CodeGen/ISDOpcodes.h"
|
||||||
#include "llvm/CodeGen/MachineFunction.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
||||||
|
#include "llvm/CodeGen/MachineValueType.h"
|
||||||
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
||||||
|
#include "llvm/CodeGen/ValueTypes.h"
|
||||||
|
#include "llvm/IR/DebugLoc.h"
|
||||||
|
#include "llvm/IR/Instructions.h"
|
||||||
|
#include "llvm/IR/Metadata.h"
|
||||||
|
#include "llvm/Support/Allocator.h"
|
||||||
#include "llvm/Support/ArrayRecycler.h"
|
#include "llvm/Support/ArrayRecycler.h"
|
||||||
|
#include "llvm/Support/AtomicOrdering.h"
|
||||||
|
#include "llvm/Support/Casting.h"
|
||||||
|
#include "llvm/Support/CodeGen.h"
|
||||||
|
#include "llvm/Support/ErrorHandling.h"
|
||||||
#include "llvm/Support/RecyclingAllocator.h"
|
#include "llvm/Support/RecyclingAllocator.h"
|
||||||
#include "llvm/Target/TargetMachine.h"
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <functional>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <tuple>
|
||||||
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
|
class BlockAddress;
|
||||||
|
class Constant;
|
||||||
|
class ConstantFP;
|
||||||
|
class ConstantInt;
|
||||||
|
class DataLayout;
|
||||||
|
struct fltSemantics;
|
||||||
|
class GlobalValue;
|
||||||
struct KnownBits;
|
struct KnownBits;
|
||||||
|
class LLVMContext;
|
||||||
|
class MachineBasicBlock;
|
||||||
class MachineConstantPoolValue;
|
class MachineConstantPoolValue;
|
||||||
class MachineFunction;
|
class MCSymbol;
|
||||||
class MDNode;
|
|
||||||
class OptimizationRemarkEmitter;
|
class OptimizationRemarkEmitter;
|
||||||
class SDDbgValue;
|
class SDDbgValue;
|
||||||
class TargetLowering;
|
class SelectionDAG;
|
||||||
class SelectionDAGTargetInfo;
|
class SelectionDAGTargetInfo;
|
||||||
|
class TargetLowering;
|
||||||
|
class TargetMachine;
|
||||||
|
class TargetSubtargetInfo;
|
||||||
|
class Value;
|
||||||
|
|
||||||
class SDVTListNode : public FoldingSetNode {
|
class SDVTListNode : public FoldingSetNode {
|
||||||
friend struct FoldingSetTrait<SDVTListNode>;
|
friend struct FoldingSetTrait<SDVTListNode>;
|
||||||
|
|
||||||
/// A reference to an Interned FoldingSetNodeID for this node.
|
/// A reference to an Interned FoldingSetNodeID for this node.
|
||||||
/// The Allocator in SelectionDAG holds the data.
|
/// The Allocator in SelectionDAG holds the data.
|
||||||
/// SDVTList contains all types which are frequently accessed in SelectionDAG.
|
/// SDVTList contains all types which are frequently accessed in SelectionDAG.
|
||||||
@ -55,11 +92,13 @@ class SDVTListNode : public FoldingSetNode {
|
|||||||
/// The hash value for SDVTList is fixed, so cache it to avoid
|
/// The hash value for SDVTList is fixed, so cache it to avoid
|
||||||
/// hash calculation.
|
/// hash calculation.
|
||||||
unsigned HashValue;
|
unsigned HashValue;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
|
SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
|
||||||
FastID(ID), VTs(VT), NumVTs(Num) {
|
FastID(ID), VTs(VT), NumVTs(Num) {
|
||||||
HashValue = ID.ComputeHash();
|
HashValue = ID.ComputeHash();
|
||||||
}
|
}
|
||||||
|
|
||||||
SDVTList getSDVTList() {
|
SDVTList getSDVTList() {
|
||||||
SDVTList result = {VTs, NumVTs};
|
SDVTList result = {VTs, NumVTs};
|
||||||
return result;
|
return result;
|
||||||
@ -72,12 +111,14 @@ template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTLis
|
|||||||
static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
|
static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
|
||||||
ID = X.FastID;
|
ID = X.FastID;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
|
static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
|
||||||
unsigned IDHash, FoldingSetNodeID &TempID) {
|
unsigned IDHash, FoldingSetNodeID &TempID) {
|
||||||
if (X.HashValue != IDHash)
|
if (X.HashValue != IDHash)
|
||||||
return false;
|
return false;
|
||||||
return ID == X.FastID;
|
return ID == X.FastID;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
|
static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
|
||||||
return X.HashValue;
|
return X.HashValue;
|
||||||
}
|
}
|
||||||
@ -104,13 +145,13 @@ class SDDbgInfo {
|
|||||||
BumpPtrAllocator Alloc;
|
BumpPtrAllocator Alloc;
|
||||||
SmallVector<SDDbgValue*, 32> DbgValues;
|
SmallVector<SDDbgValue*, 32> DbgValues;
|
||||||
SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
|
SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
|
||||||
typedef DenseMap<const SDNode*, SmallVector<SDDbgValue*, 2> > DbgValMapType;
|
using DbgValMapType = DenseMap<const SDNode *, SmallVector<SDDbgValue *, 2>>;
|
||||||
DbgValMapType DbgValMap;
|
DbgValMapType DbgValMap;
|
||||||
|
|
||||||
void operator=(const SDDbgInfo&) = delete;
|
|
||||||
SDDbgInfo(const SDDbgInfo&) = delete;
|
|
||||||
public:
|
public:
|
||||||
SDDbgInfo() {}
|
SDDbgInfo() = default;
|
||||||
|
SDDbgInfo(const SDDbgInfo &) = delete;
|
||||||
|
SDDbgInfo &operator=(const SDDbgInfo &) = delete;
|
||||||
|
|
||||||
void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
|
void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
|
||||||
if (isParameter) {
|
if (isParameter) {
|
||||||
@ -144,14 +185,14 @@ public:
|
|||||||
return ArrayRef<SDDbgValue*>();
|
return ArrayRef<SDDbgValue*>();
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef SmallVectorImpl<SDDbgValue*>::iterator DbgIterator;
|
using DbgIterator = SmallVectorImpl<SDDbgValue*>::iterator;
|
||||||
|
|
||||||
DbgIterator DbgBegin() { return DbgValues.begin(); }
|
DbgIterator DbgBegin() { return DbgValues.begin(); }
|
||||||
DbgIterator DbgEnd() { return DbgValues.end(); }
|
DbgIterator DbgEnd() { return DbgValues.end(); }
|
||||||
DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
|
DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
|
||||||
DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
|
DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
class SelectionDAG;
|
|
||||||
void checkForCycles(const SelectionDAG *DAG, bool force = false);
|
void checkForCycles(const SelectionDAG *DAG, bool force = false);
|
||||||
|
|
||||||
/// This is used to represent a portion of an LLVM function in a low-level
|
/// This is used to represent a portion of an LLVM function in a low-level
|
||||||
@ -167,8 +208,8 @@ void checkForCycles(const SelectionDAG *DAG, bool force = false);
|
|||||||
///
|
///
|
||||||
class SelectionDAG {
|
class SelectionDAG {
|
||||||
const TargetMachine &TM;
|
const TargetMachine &TM;
|
||||||
const SelectionDAGTargetInfo *TSI;
|
const SelectionDAGTargetInfo *TSI = nullptr;
|
||||||
const TargetLowering *TLI;
|
const TargetLowering *TLI = nullptr;
|
||||||
MachineFunction *MF;
|
MachineFunction *MF;
|
||||||
LLVMContext *Context;
|
LLVMContext *Context;
|
||||||
CodeGenOpt::Level OptLevel;
|
CodeGenOpt::Level OptLevel;
|
||||||
@ -188,9 +229,9 @@ class SelectionDAG {
|
|||||||
|
|
||||||
/// The AllocatorType for allocating SDNodes. We use
|
/// The AllocatorType for allocating SDNodes. We use
|
||||||
/// pool allocation with recycling.
|
/// pool allocation with recycling.
|
||||||
typedef RecyclingAllocator<BumpPtrAllocator, SDNode, sizeof(LargestSDNode),
|
using NodeAllocatorType = RecyclingAllocator<BumpPtrAllocator, SDNode,
|
||||||
alignof(MostAlignedSDNode)>
|
sizeof(LargestSDNode),
|
||||||
NodeAllocatorType;
|
alignof(MostAlignedSDNode)>;
|
||||||
|
|
||||||
/// Pool allocation for nodes.
|
/// Pool allocation for nodes.
|
||||||
NodeAllocatorType NodeAllocator;
|
NodeAllocatorType NodeAllocator;
|
||||||
@ -243,9 +284,11 @@ public:
|
|||||||
|
|
||||||
struct DAGNodeDeletedListener : public DAGUpdateListener {
|
struct DAGNodeDeletedListener : public DAGUpdateListener {
|
||||||
std::function<void(SDNode *, SDNode *)> Callback;
|
std::function<void(SDNode *, SDNode *)> Callback;
|
||||||
|
|
||||||
DAGNodeDeletedListener(SelectionDAG &DAG,
|
DAGNodeDeletedListener(SelectionDAG &DAG,
|
||||||
std::function<void(SDNode *, SDNode *)> Callback)
|
std::function<void(SDNode *, SDNode *)> Callback)
|
||||||
: DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
|
: DAGUpdateListener(DAG), Callback(std::move(Callback)) {}
|
||||||
|
|
||||||
void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
|
void NodeDeleted(SDNode *N, SDNode *E) override { Callback(N, E); }
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -254,7 +297,7 @@ public:
|
|||||||
/// have legal types. This is important after type legalization since
|
/// have legal types. This is important after type legalization since
|
||||||
/// any illegally typed nodes generated after this point will not experience
|
/// any illegally typed nodes generated after this point will not experience
|
||||||
/// type legalization.
|
/// type legalization.
|
||||||
bool NewNodesMustHaveLegalTypes;
|
bool NewNodesMustHaveLegalTypes = false;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// DAGUpdateListener is a friend so it can manipulate the listener stack.
|
/// DAGUpdateListener is a friend so it can manipulate the listener stack.
|
||||||
@ -262,7 +305,7 @@ private:
|
|||||||
|
|
||||||
/// Linked list of registered DAGUpdateListener instances.
|
/// Linked list of registered DAGUpdateListener instances.
|
||||||
/// This stack is maintained by DAGUpdateListener RAII.
|
/// This stack is maintained by DAGUpdateListener RAII.
|
||||||
DAGUpdateListener *UpdateListeners;
|
DAGUpdateListener *UpdateListeners = nullptr;
|
||||||
|
|
||||||
/// Implementation of setSubgraphColor.
|
/// Implementation of setSubgraphColor.
|
||||||
/// Return whether we had to truncate the search.
|
/// Return whether we had to truncate the search.
|
||||||
@ -316,11 +359,10 @@ private:
|
|||||||
Node->OperandList = nullptr;
|
Node->OperandList = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void operator=(const SelectionDAG&) = delete;
|
|
||||||
SelectionDAG(const SelectionDAG&) = delete;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit SelectionDAG(const TargetMachine &TM, llvm::CodeGenOpt::Level);
|
explicit SelectionDAG(const TargetMachine &TM, CodeGenOpt::Level);
|
||||||
|
SelectionDAG(const SelectionDAG &) = delete;
|
||||||
|
SelectionDAG &operator=(const SelectionDAG &) = delete;
|
||||||
~SelectionDAG();
|
~SelectionDAG();
|
||||||
|
|
||||||
/// Prepare this SelectionDAG to process code in the given MachineFunction.
|
/// Prepare this SelectionDAG to process code in the given MachineFunction.
|
||||||
@ -364,12 +406,16 @@ public:
|
|||||||
/// Convenience for setting subgraph color attribute.
|
/// Convenience for setting subgraph color attribute.
|
||||||
void setSubgraphColor(SDNode *N, const char *Color);
|
void setSubgraphColor(SDNode *N, const char *Color);
|
||||||
|
|
||||||
typedef ilist<SDNode>::const_iterator allnodes_const_iterator;
|
using allnodes_const_iterator = ilist<SDNode>::const_iterator;
|
||||||
|
|
||||||
allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
|
allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
|
||||||
allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
|
allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
|
||||||
typedef ilist<SDNode>::iterator allnodes_iterator;
|
|
||||||
|
using allnodes_iterator = ilist<SDNode>::iterator;
|
||||||
|
|
||||||
allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
|
allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
|
||||||
allnodes_iterator allnodes_end() { return AllNodes.end(); }
|
allnodes_iterator allnodes_end() { return AllNodes.end(); }
|
||||||
|
|
||||||
ilist<SDNode>::size_type allnodes_size() const {
|
ilist<SDNode>::size_type allnodes_size() const {
|
||||||
return AllNodes.size();
|
return AllNodes.size();
|
||||||
}
|
}
|
||||||
@ -475,7 +521,6 @@ public:
|
|||||||
|
|
||||||
//===--------------------------------------------------------------------===//
|
//===--------------------------------------------------------------------===//
|
||||||
// Node creation methods.
|
// Node creation methods.
|
||||||
//
|
|
||||||
|
|
||||||
/// \brief Create a ConstantSDNode wrapping a constant value.
|
/// \brief Create a ConstantSDNode wrapping a constant value.
|
||||||
/// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
|
/// If VT is a vector type, the constant is splatted into a BUILD_VECTOR.
|
||||||
@ -1251,9 +1296,11 @@ public:
|
|||||||
|
|
||||||
SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); }
|
SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); }
|
||||||
SDDbgInfo::DbgIterator DbgEnd() { return DbgInfo->DbgEnd(); }
|
SDDbgInfo::DbgIterator DbgEnd() { return DbgInfo->DbgEnd(); }
|
||||||
|
|
||||||
SDDbgInfo::DbgIterator ByvalParmDbgBegin() {
|
SDDbgInfo::DbgIterator ByvalParmDbgBegin() {
|
||||||
return DbgInfo->ByvalParmDbgBegin();
|
return DbgInfo->ByvalParmDbgBegin();
|
||||||
}
|
}
|
||||||
|
|
||||||
SDDbgInfo::DbgIterator ByvalParmDbgEnd() {
|
SDDbgInfo::DbgIterator ByvalParmDbgEnd() {
|
||||||
return DbgInfo->ByvalParmDbgEnd();
|
return DbgInfo->ByvalParmDbgEnd();
|
||||||
}
|
}
|
||||||
@ -1479,10 +1526,12 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
|
template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
|
||||||
typedef pointer_iterator<SelectionDAG::allnodes_iterator> nodes_iterator;
|
using nodes_iterator = pointer_iterator<SelectionDAG::allnodes_iterator>;
|
||||||
|
|
||||||
static nodes_iterator nodes_begin(SelectionDAG *G) {
|
static nodes_iterator nodes_begin(SelectionDAG *G) {
|
||||||
return nodes_iterator(G->allnodes_begin());
|
return nodes_iterator(G->allnodes_begin());
|
||||||
}
|
}
|
||||||
|
|
||||||
static nodes_iterator nodes_end(SelectionDAG *G) {
|
static nodes_iterator nodes_end(SelectionDAG *G) {
|
||||||
return nodes_iterator(G->allnodes_end());
|
return nodes_iterator(G->allnodes_end());
|
||||||
}
|
}
|
||||||
@ -1493,7 +1542,6 @@ SDValue SelectionDAG::getTargetMemSDNode(SDVTList VTs,
|
|||||||
ArrayRef<SDValue> Ops,
|
ArrayRef<SDValue> Ops,
|
||||||
const SDLoc &dl, EVT MemVT,
|
const SDLoc &dl, EVT MemVT,
|
||||||
MachineMemOperand *MMO) {
|
MachineMemOperand *MMO) {
|
||||||
|
|
||||||
/// Compose node ID and try to find an existing node.
|
/// Compose node ID and try to find an existing node.
|
||||||
FoldingSetNodeID ID;
|
FoldingSetNodeID ID;
|
||||||
unsigned Opcode =
|
unsigned Opcode =
|
||||||
@ -1524,6 +1572,6 @@ SDValue SelectionDAG::getTargetMemSDNode(SDVTList VTs,
|
|||||||
return SDValue(N, 0);
|
return SDValue(N, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // end namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif
|
#endif // LLVM_CODEGEN_SELECTIONDAG_H
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include "llvm/IR/DebugLoc.h"
|
#include "llvm/IR/DebugLoc.h"
|
||||||
#include "llvm/IR/Instruction.h"
|
#include "llvm/IR/Instruction.h"
|
||||||
#include "llvm/IR/Instructions.h"
|
#include "llvm/IR/Instructions.h"
|
||||||
|
#include "llvm/IR/Metadata.h"
|
||||||
#include "llvm/Support/AlignOf.h"
|
#include "llvm/Support/AlignOf.h"
|
||||||
#include "llvm/Support/AtomicOrdering.h"
|
#include "llvm/Support/AtomicOrdering.h"
|
||||||
#include "llvm/Support/Casting.h"
|
#include "llvm/Support/Casting.h"
|
||||||
@ -53,14 +54,18 @@
|
|||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
class SelectionDAG;
|
class APInt;
|
||||||
|
class Constant;
|
||||||
|
template <typename T> struct DenseMapInfo;
|
||||||
class GlobalValue;
|
class GlobalValue;
|
||||||
class MachineBasicBlock;
|
class MachineBasicBlock;
|
||||||
class MachineConstantPoolValue;
|
class MachineConstantPoolValue;
|
||||||
class SDNode;
|
|
||||||
class Value;
|
|
||||||
class MCSymbol;
|
class MCSymbol;
|
||||||
template <typename T> struct DenseMapInfo;
|
class raw_ostream;
|
||||||
|
class SDNode;
|
||||||
|
class SelectionDAG;
|
||||||
|
class Type;
|
||||||
|
class Value;
|
||||||
|
|
||||||
void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
|
void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
|
||||||
bool force = false);
|
bool force = false);
|
||||||
@ -229,13 +234,15 @@ template <> struct isPodLike<SDValue> { static const bool value = true; };
|
|||||||
/// Allow casting operators to work directly on
|
/// Allow casting operators to work directly on
|
||||||
/// SDValues as if they were SDNode*'s.
|
/// SDValues as if they were SDNode*'s.
|
||||||
template<> struct simplify_type<SDValue> {
|
template<> struct simplify_type<SDValue> {
|
||||||
typedef SDNode* SimpleType;
|
using SimpleType = SDNode *;
|
||||||
|
|
||||||
static SimpleType getSimplifiedValue(SDValue &Val) {
|
static SimpleType getSimplifiedValue(SDValue &Val) {
|
||||||
return Val.getNode();
|
return Val.getNode();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
template<> struct simplify_type<const SDValue> {
|
template<> struct simplify_type<const SDValue> {
|
||||||
typedef /*const*/ SDNode* SimpleType;
|
using SimpleType = /*const*/ SDNode *;
|
||||||
|
|
||||||
static SimpleType getSimplifiedValue(const SDValue &Val) {
|
static SimpleType getSimplifiedValue(const SDValue &Val) {
|
||||||
return Val.getNode();
|
return Val.getNode();
|
||||||
}
|
}
|
||||||
@ -330,7 +337,8 @@ private:
|
|||||||
/// simplify_type specializations - Allow casting operators to work directly on
|
/// simplify_type specializations - Allow casting operators to work directly on
|
||||||
/// SDValues as if they were SDNode*'s.
|
/// SDValues as if they were SDNode*'s.
|
||||||
template<> struct simplify_type<SDUse> {
|
template<> struct simplify_type<SDUse> {
|
||||||
typedef SDNode* SimpleType;
|
using SimpleType = SDNode *;
|
||||||
|
|
||||||
static SimpleType getSimplifiedValue(SDUse &Val) {
|
static SimpleType getSimplifiedValue(SDUse &Val) {
|
||||||
return Val.getNode();
|
return Val.getNode();
|
||||||
}
|
}
|
||||||
@ -695,10 +703,10 @@ public:
|
|||||||
explicit use_iterator(SDUse *op) : Op(op) {}
|
explicit use_iterator(SDUse *op) : Op(op) {}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef std::iterator<std::forward_iterator_tag,
|
using reference = std::iterator<std::forward_iterator_tag,
|
||||||
SDUse, ptrdiff_t>::reference reference;
|
SDUse, ptrdiff_t>::reference;
|
||||||
typedef std::iterator<std::forward_iterator_tag,
|
using pointer = std::iterator<std::forward_iterator_tag,
|
||||||
SDUse, ptrdiff_t>::pointer pointer;
|
SDUse, ptrdiff_t>::pointer;
|
||||||
|
|
||||||
use_iterator() = default;
|
use_iterator() = default;
|
||||||
use_iterator(const use_iterator &I) : Op(I.Op) {}
|
use_iterator(const use_iterator &I) : Op(I.Op) {}
|
||||||
@ -824,7 +832,7 @@ public:
|
|||||||
return OperandList[Num];
|
return OperandList[Num];
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef SDUse* op_iterator;
|
using op_iterator = SDUse *;
|
||||||
|
|
||||||
op_iterator op_begin() const { return OperandList; }
|
op_iterator op_begin() const { return OperandList; }
|
||||||
op_iterator op_end() const { return OperandList+NumOperands; }
|
op_iterator op_end() const { return OperandList+NumOperands; }
|
||||||
@ -896,7 +904,8 @@ public:
|
|||||||
return getValueType(ResNo).getSizeInBits();
|
return getValueType(ResNo).getSizeInBits();
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef const EVT* value_iterator;
|
using value_iterator = const EVT *;
|
||||||
|
|
||||||
value_iterator value_begin() const { return ValueList; }
|
value_iterator value_begin() const { return ValueList; }
|
||||||
value_iterator value_end() const { return ValueList+NumValues; }
|
value_iterator value_end() const { return ValueList+NumValues; }
|
||||||
|
|
||||||
@ -1822,8 +1831,7 @@ class BlockAddressSDNode : public SDNode {
|
|||||||
BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
|
BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
|
||||||
int64_t o, unsigned char Flags)
|
int64_t o, unsigned char Flags)
|
||||||
: SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
|
: SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
|
||||||
BA(ba), Offset(o), TargetFlags(Flags) {
|
BA(ba), Offset(o), TargetFlags(Flags) {}
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
const BlockAddress *getBlockAddress() const { return BA; }
|
const BlockAddress *getBlockAddress() const { return BA; }
|
||||||
@ -2154,7 +2162,7 @@ public:
|
|||||||
/// instruction selection proper phase.
|
/// instruction selection proper phase.
|
||||||
class MachineSDNode : public SDNode {
|
class MachineSDNode : public SDNode {
|
||||||
public:
|
public:
|
||||||
typedef MachineMemOperand **mmo_iterator;
|
using mmo_iterator = MachineMemOperand **;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class SelectionDAG;
|
friend class SelectionDAG;
|
||||||
@ -2226,8 +2234,8 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <> struct GraphTraits<SDNode*> {
|
template <> struct GraphTraits<SDNode*> {
|
||||||
typedef SDNode *NodeRef;
|
using NodeRef = SDNode *;
|
||||||
typedef SDNodeIterator ChildIteratorType;
|
using ChildIteratorType = SDNodeIterator;
|
||||||
|
|
||||||
static NodeRef getEntryNode(SDNode *N) { return N; }
|
static NodeRef getEntryNode(SDNode *N) { return N; }
|
||||||
|
|
||||||
@ -2244,12 +2252,12 @@ template <> struct GraphTraits<SDNode*> {
|
|||||||
///
|
///
|
||||||
/// This needs to be a union because the largest node differs on 32 bit systems
|
/// This needs to be a union because the largest node differs on 32 bit systems
|
||||||
/// with 4 and 8 byte pointer alignment, respectively.
|
/// with 4 and 8 byte pointer alignment, respectively.
|
||||||
typedef AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
|
using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
|
||||||
BlockAddressSDNode, GlobalAddressSDNode>
|
BlockAddressSDNode,
|
||||||
LargestSDNode;
|
GlobalAddressSDNode>;
|
||||||
|
|
||||||
/// The SDNode class with the greatest alignment requirement.
|
/// The SDNode class with the greatest alignment requirement.
|
||||||
typedef GlobalAddressSDNode MostAlignedSDNode;
|
using MostAlignedSDNode = GlobalAddressSDNode;
|
||||||
|
|
||||||
namespace ISD {
|
namespace ISD {
|
||||||
|
|
||||||
|
@ -20,17 +20,26 @@
|
|||||||
#define LLVM_CODEGEN_SLOTINDEXES_H
|
#define LLVM_CODEGEN_SLOTINDEXES_H
|
||||||
|
|
||||||
#include "llvm/ADT/DenseMap.h"
|
#include "llvm/ADT/DenseMap.h"
|
||||||
|
#include "llvm/ADT/ilist.h"
|
||||||
#include "llvm/ADT/IntervalMap.h"
|
#include "llvm/ADT/IntervalMap.h"
|
||||||
#include "llvm/ADT/PointerIntPair.h"
|
#include "llvm/ADT/PointerIntPair.h"
|
||||||
#include "llvm/ADT/SmallVector.h"
|
#include "llvm/ADT/SmallVector.h"
|
||||||
#include "llvm/ADT/ilist.h"
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
#include "llvm/CodeGen/MachineFunction.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||||
|
#include "llvm/CodeGen/MachineInstr.h"
|
||||||
#include "llvm/CodeGen/MachineInstrBundle.h"
|
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||||
|
#include "llvm/Pass.h"
|
||||||
#include "llvm/Support/Allocator.h"
|
#include "llvm/Support/Allocator.h"
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cassert>
|
||||||
|
#include <iterator>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
|
class raw_ostream;
|
||||||
|
|
||||||
/// This class represents an entry in the slot index list held in the
|
/// This class represents an entry in the slot index list held in the
|
||||||
/// SlotIndexes pass. It should not be used directly. See the
|
/// SlotIndexes pass. It should not be used directly. See the
|
||||||
/// SlotIndex & SlotIndexes classes for the public interface to this
|
/// SlotIndex & SlotIndexes classes for the public interface to this
|
||||||
@ -40,7 +49,6 @@ namespace llvm {
|
|||||||
unsigned index;
|
unsigned index;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}
|
IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}
|
||||||
|
|
||||||
MachineInstr* getInstr() const { return mi; }
|
MachineInstr* getInstr() const { return mi; }
|
||||||
@ -301,7 +309,7 @@ namespace llvm {
|
|||||||
return os;
|
return os;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef std::pair<SlotIndex, MachineBasicBlock*> IdxMBBPair;
|
using IdxMBBPair = std::pair<SlotIndex, MachineBasicBlock *>;
|
||||||
|
|
||||||
inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
|
inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
|
||||||
return V < IM.first;
|
return V < IM.first;
|
||||||
@ -325,7 +333,7 @@ namespace llvm {
|
|||||||
// IndexListEntry allocator.
|
// IndexListEntry allocator.
|
||||||
BumpPtrAllocator ileAllocator;
|
BumpPtrAllocator ileAllocator;
|
||||||
|
|
||||||
typedef ilist<IndexListEntry> IndexList;
|
using IndexList = ilist<IndexListEntry>;
|
||||||
IndexList indexList;
|
IndexList indexList;
|
||||||
|
|
||||||
#ifdef EXPENSIVE_CHECKS
|
#ifdef EXPENSIVE_CHECKS
|
||||||
@ -334,7 +342,7 @@ namespace llvm {
|
|||||||
|
|
||||||
MachineFunction *mf;
|
MachineFunction *mf;
|
||||||
|
|
||||||
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
|
using Mi2IndexMap = DenseMap<const MachineInstr *, SlotIndex>;
|
||||||
Mi2IndexMap mi2iMap;
|
Mi2IndexMap mi2iMap;
|
||||||
|
|
||||||
/// MBBRanges - Map MBB number to (start, stop) indexes.
|
/// MBBRanges - Map MBB number to (start, stop) indexes.
|
||||||
@ -436,7 +444,7 @@ namespace llvm {
|
|||||||
const MachineBasicBlock *MBB = MI.getParent();
|
const MachineBasicBlock *MBB = MI.getParent();
|
||||||
assert(MBB && "MI must be inserted inna basic block");
|
assert(MBB && "MI must be inserted inna basic block");
|
||||||
MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
|
MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
|
||||||
for (;;) {
|
while (true) {
|
||||||
if (I == B)
|
if (I == B)
|
||||||
return getMBBStartIdx(MBB);
|
return getMBBStartIdx(MBB);
|
||||||
--I;
|
--I;
|
||||||
@ -453,7 +461,7 @@ namespace llvm {
|
|||||||
const MachineBasicBlock *MBB = MI.getParent();
|
const MachineBasicBlock *MBB = MI.getParent();
|
||||||
assert(MBB && "MI must be inserted inna basic block");
|
assert(MBB && "MI must be inserted inna basic block");
|
||||||
MachineBasicBlock::const_iterator I = MI, E = MBB->end();
|
MachineBasicBlock::const_iterator I = MI, E = MBB->end();
|
||||||
for (;;) {
|
while (true) {
|
||||||
++I;
|
++I;
|
||||||
if (I == E)
|
if (I == E)
|
||||||
return getMBBEndIdx(MBB);
|
return getMBBEndIdx(MBB);
|
||||||
@ -497,21 +505,25 @@ namespace llvm {
|
|||||||
|
|
||||||
/// Iterator over the idx2MBBMap (sorted pairs of slot index of basic block
|
/// Iterator over the idx2MBBMap (sorted pairs of slot index of basic block
|
||||||
/// begin and basic block)
|
/// begin and basic block)
|
||||||
typedef SmallVectorImpl<IdxMBBPair>::const_iterator MBBIndexIterator;
|
using MBBIndexIterator = SmallVectorImpl<IdxMBBPair>::const_iterator;
|
||||||
|
|
||||||
/// Move iterator to the next IdxMBBPair where the SlotIndex is greater or
|
/// Move iterator to the next IdxMBBPair where the SlotIndex is greater or
|
||||||
/// equal to \p To.
|
/// equal to \p To.
|
||||||
MBBIndexIterator advanceMBBIndex(MBBIndexIterator I, SlotIndex To) const {
|
MBBIndexIterator advanceMBBIndex(MBBIndexIterator I, SlotIndex To) const {
|
||||||
return std::lower_bound(I, idx2MBBMap.end(), To);
|
return std::lower_bound(I, idx2MBBMap.end(), To);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get an iterator pointing to the IdxMBBPair with the biggest SlotIndex
|
/// Get an iterator pointing to the IdxMBBPair with the biggest SlotIndex
|
||||||
/// that is greater or equal to \p Idx.
|
/// that is greater or equal to \p Idx.
|
||||||
MBBIndexIterator findMBBIndex(SlotIndex Idx) const {
|
MBBIndexIterator findMBBIndex(SlotIndex Idx) const {
|
||||||
return advanceMBBIndex(idx2MBBMap.begin(), Idx);
|
return advanceMBBIndex(idx2MBBMap.begin(), Idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator for the begin of the idx2MBBMap.
|
/// Returns an iterator for the begin of the idx2MBBMap.
|
||||||
MBBIndexIterator MBBIndexBegin() const {
|
MBBIndexIterator MBBIndexBegin() const {
|
||||||
return idx2MBBMap.begin();
|
return idx2MBBMap.begin();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return an iterator for the end of the idx2MBBMap.
|
/// Return an iterator for the end of the idx2MBBMap.
|
||||||
MBBIndexIterator MBBIndexEnd() const {
|
MBBIndexIterator MBBIndexEnd() const {
|
||||||
return idx2MBBMap.end();
|
return idx2MBBMap.end();
|
||||||
|
@ -145,21 +145,27 @@ public:
|
|||||||
///
|
///
|
||||||
/// Statepoint operands take the form:
|
/// Statepoint operands take the form:
|
||||||
/// <id>, <num patch bytes >, <num call arguments>, <call target>,
|
/// <id>, <num patch bytes >, <num call arguments>, <call target>,
|
||||||
/// [call arguments], <StackMaps::ConstantOp>, <calling convention>,
|
/// [call arguments...],
|
||||||
|
/// <StackMaps::ConstantOp>, <calling convention>,
|
||||||
/// <StackMaps::ConstantOp>, <statepoint flags>,
|
/// <StackMaps::ConstantOp>, <statepoint flags>,
|
||||||
/// <StackMaps::ConstantOp>, <num other args>, [other args],
|
/// <StackMaps::ConstantOp>, <num deopt args>, [deopt args...],
|
||||||
/// [gc values]
|
/// <gc base/derived pairs...> <gc allocas...>
|
||||||
|
/// Note that the last two sets of arguments are not currently length
|
||||||
|
/// prefixed.
|
||||||
class StatepointOpers {
|
class StatepointOpers {
|
||||||
private:
|
// TODO:: we should change the STATEPOINT representation so that CC and
|
||||||
|
// Flags should be part of meta operands, with args and deopt operands, and
|
||||||
|
// gc operands all prefixed by their length and a type code. This would be
|
||||||
|
// much more consistent.
|
||||||
|
public:
|
||||||
// These values are aboolute offsets into the operands of the statepoint
|
// These values are aboolute offsets into the operands of the statepoint
|
||||||
// instruction.
|
// instruction.
|
||||||
enum { IDPos, NBytesPos, NCallArgsPos, CallTargetPos, MetaEnd };
|
enum { IDPos, NBytesPos, NCallArgsPos, CallTargetPos, MetaEnd };
|
||||||
|
|
||||||
// These values are relative offests from the start of the statepoint meta
|
// These values are relative offests from the start of the statepoint meta
|
||||||
// arguments (i.e. the end of the call arguments).
|
// arguments (i.e. the end of the call arguments).
|
||||||
enum { CCOffset = 1, FlagsOffset = 3, NumVMSArgsOffset = 5 };
|
enum { CCOffset = 1, FlagsOffset = 3, NumDeoptOperandsOffset = 5 };
|
||||||
|
|
||||||
public:
|
|
||||||
explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {}
|
explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {}
|
||||||
|
|
||||||
/// Get starting index of non call related arguments
|
/// Get starting index of non call related arguments
|
||||||
@ -220,7 +226,7 @@ public:
|
|||||||
// OpTypes are used to encode information about the following logical
|
// OpTypes are used to encode information about the following logical
|
||||||
// operand (which may consist of several MachineOperands) for the
|
// operand (which may consist of several MachineOperands) for the
|
||||||
// OpParser.
|
// OpParser.
|
||||||
typedef enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp } OpType;
|
using OpType = enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp };
|
||||||
|
|
||||||
StackMaps(AsmPrinter &AP);
|
StackMaps(AsmPrinter &AP);
|
||||||
|
|
||||||
@ -248,9 +254,10 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
static const char *WSMP;
|
static const char *WSMP;
|
||||||
typedef SmallVector<Location, 8> LocationVec;
|
|
||||||
typedef SmallVector<LiveOutReg, 8> LiveOutVec;
|
using LocationVec = SmallVector<Location, 8>;
|
||||||
typedef MapVector<uint64_t, uint64_t> ConstantPool;
|
using LiveOutVec = SmallVector<LiveOutReg, 8>;
|
||||||
|
using ConstantPool = MapVector<uint64_t, uint64_t>;
|
||||||
|
|
||||||
struct FunctionInfo {
|
struct FunctionInfo {
|
||||||
uint64_t StackSize = 0;
|
uint64_t StackSize = 0;
|
||||||
@ -273,8 +280,8 @@ private:
|
|||||||
LiveOuts(std::move(LiveOuts)) {}
|
LiveOuts(std::move(LiveOuts)) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef MapVector<const MCSymbol *, FunctionInfo> FnInfoMap;
|
using FnInfoMap = MapVector<const MCSymbol *, FunctionInfo>;
|
||||||
typedef std::vector<CallsiteInfo> CallsiteInfoList;
|
using CallsiteInfoList = std::vector<CallsiteInfo>;
|
||||||
|
|
||||||
AsmPrinter &AP;
|
AsmPrinter &AP;
|
||||||
CallsiteInfoList CSInfos;
|
CallsiteInfoList CSInfos;
|
||||||
|
@ -55,6 +55,9 @@ public:
|
|||||||
/// Return the MCSchedClassDesc for this instruction.
|
/// Return the MCSchedClassDesc for this instruction.
|
||||||
const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
|
const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
|
||||||
|
|
||||||
|
/// \brief TargetSubtargetInfo getter.
|
||||||
|
const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
|
||||||
|
|
||||||
/// \brief TargetInstrInfo getter.
|
/// \brief TargetInstrInfo getter.
|
||||||
const TargetInstrInfo *getInstrInfo() const { return TII; }
|
const TargetInstrInfo *getInstrInfo() const { return TII; }
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===-- llvm/CodeGen/WinEHFuncInfo.h ----------------------------*- C++ -*-===//
|
//===- llvm/CodeGen/WinEHFuncInfo.h -----------------------------*- C++ -*-===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -17,28 +17,26 @@
|
|||||||
#include "llvm/ADT/DenseMap.h"
|
#include "llvm/ADT/DenseMap.h"
|
||||||
#include "llvm/ADT/PointerUnion.h"
|
#include "llvm/ADT/PointerUnion.h"
|
||||||
#include "llvm/ADT/SmallVector.h"
|
#include "llvm/ADT/SmallVector.h"
|
||||||
#include "llvm/ADT/TinyPtrVector.h"
|
#include <cstdint>
|
||||||
#include "llvm/IR/Instructions.h"
|
#include <limits>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
class AllocaInst;
|
class AllocaInst;
|
||||||
class BasicBlock;
|
class BasicBlock;
|
||||||
class CatchReturnInst;
|
class FuncletPadInst;
|
||||||
class Constant;
|
|
||||||
class Function;
|
class Function;
|
||||||
class GlobalVariable;
|
class GlobalVariable;
|
||||||
|
class Instruction;
|
||||||
class InvokeInst;
|
class InvokeInst;
|
||||||
class IntrinsicInst;
|
|
||||||
class LandingPadInst;
|
|
||||||
class MCExpr;
|
|
||||||
class MCSymbol;
|
|
||||||
class MachineBasicBlock;
|
class MachineBasicBlock;
|
||||||
class Value;
|
class MCSymbol;
|
||||||
|
|
||||||
// The following structs respresent the .xdata tables for various
|
// The following structs respresent the .xdata tables for various
|
||||||
// Windows-related EH personalities.
|
// Windows-related EH personalities.
|
||||||
|
|
||||||
typedef PointerUnion<const BasicBlock *, MachineBasicBlock *> MBBOrBasicBlock;
|
using MBBOrBasicBlock = PointerUnion<const BasicBlock *, MachineBasicBlock *>;
|
||||||
|
|
||||||
struct CxxUnwindMapEntry {
|
struct CxxUnwindMapEntry {
|
||||||
int ToState;
|
int ToState;
|
||||||
@ -99,18 +97,18 @@ struct WinEHFuncInfo {
|
|||||||
SmallVector<WinEHTryBlockMapEntry, 4> TryBlockMap;
|
SmallVector<WinEHTryBlockMapEntry, 4> TryBlockMap;
|
||||||
SmallVector<SEHUnwindMapEntry, 4> SEHUnwindMap;
|
SmallVector<SEHUnwindMapEntry, 4> SEHUnwindMap;
|
||||||
SmallVector<ClrEHUnwindMapEntry, 4> ClrEHUnwindMap;
|
SmallVector<ClrEHUnwindMapEntry, 4> ClrEHUnwindMap;
|
||||||
int UnwindHelpFrameIdx = INT_MAX;
|
int UnwindHelpFrameIdx = std::numeric_limits<int>::max();
|
||||||
int PSPSymFrameIdx = INT_MAX;
|
int PSPSymFrameIdx = std::numeric_limits<int>::max();
|
||||||
|
|
||||||
int getLastStateNumber() const { return CxxUnwindMap.size() - 1; }
|
int getLastStateNumber() const { return CxxUnwindMap.size() - 1; }
|
||||||
|
|
||||||
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin,
|
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin,
|
||||||
MCSymbol *InvokeEnd);
|
MCSymbol *InvokeEnd);
|
||||||
|
|
||||||
int EHRegNodeFrameIndex = INT_MAX;
|
int EHRegNodeFrameIndex = std::numeric_limits<int>::max();
|
||||||
int EHRegNodeEndOffset = INT_MAX;
|
int EHRegNodeEndOffset = std::numeric_limits<int>::max();
|
||||||
int EHGuardFrameIndex = INT_MAX;
|
int EHGuardFrameIndex = std::numeric_limits<int>::max();
|
||||||
int SEHSetFrameOffset = INT_MAX;
|
int SEHSetFrameOffset = std::numeric_limits<int>::max();
|
||||||
|
|
||||||
WinEHFuncInfo();
|
WinEHFuncInfo();
|
||||||
};
|
};
|
||||||
@ -125,5 +123,7 @@ void calculateSEHStateNumbers(const Function *ParentFn,
|
|||||||
WinEHFuncInfo &FuncInfo);
|
WinEHFuncInfo &FuncInfo);
|
||||||
|
|
||||||
void calculateClrEHStateNumbers(const Function *Fn, WinEHFuncInfo &FuncInfo);
|
void calculateClrEHStateNumbers(const Function *Fn, WinEHFuncInfo &FuncInfo);
|
||||||
}
|
|
||||||
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif // LLVM_CODEGEN_WINEHFUNCINFO_H
|
#endif // LLVM_CODEGEN_WINEHFUNCINFO_H
|
||||||
|
@ -574,6 +574,14 @@ struct FrameData {
|
|||||||
IsFunctionStart = 1 << 2,
|
IsFunctionStart = 1 << 2,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum class CodeViewContainer { ObjectFile, Pdb };
|
||||||
|
|
||||||
|
inline uint32_t alignOf(CodeViewContainer Container) {
|
||||||
|
if (Container == CodeViewContainer::ObjectFile)
|
||||||
|
return 1;
|
||||||
|
return 4;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,6 +136,7 @@ public:
|
|||||||
Error mapByteVectorTail(ArrayRef<uint8_t> &Bytes);
|
Error mapByteVectorTail(ArrayRef<uint8_t> &Bytes);
|
||||||
Error mapByteVectorTail(std::vector<uint8_t> &Bytes);
|
Error mapByteVectorTail(std::vector<uint8_t> &Bytes);
|
||||||
|
|
||||||
|
Error padToAlignment(uint32_t Align);
|
||||||
Error skipPadding();
|
Error skipPadding();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -60,8 +60,8 @@ public:
|
|||||||
Error initialize(BinaryStreamReader Reader);
|
Error initialize(BinaryStreamReader Reader);
|
||||||
Error initialize(BinaryStreamRef Stream);
|
Error initialize(BinaryStreamRef Stream);
|
||||||
|
|
||||||
Iterator begin() { return Checksums.begin(); }
|
Iterator begin() const { return Checksums.begin(); }
|
||||||
Iterator end() { return Checksums.end(); }
|
Iterator end() const { return Checksums.end(); }
|
||||||
|
|
||||||
const FileChecksumArray &getArray() const { return Checksums; }
|
const FileChecksumArray &getArray() const { return Checksums; }
|
||||||
|
|
||||||
|
@ -74,8 +74,13 @@ private:
|
|||||||
|
|
||||||
class DebugInlineeLinesSubsection final : public DebugSubsection {
|
class DebugInlineeLinesSubsection final : public DebugSubsection {
|
||||||
public:
|
public:
|
||||||
|
struct Entry {
|
||||||
|
std::vector<support::ulittle32_t> ExtraFiles;
|
||||||
|
InlineeSourceLineHeader Header;
|
||||||
|
};
|
||||||
|
|
||||||
DebugInlineeLinesSubsection(DebugChecksumsSubsection &Checksums,
|
DebugInlineeLinesSubsection(DebugChecksumsSubsection &Checksums,
|
||||||
bool HasExtraFiles);
|
bool HasExtraFiles = false);
|
||||||
|
|
||||||
static bool classof(const DebugSubsection *S) {
|
static bool classof(const DebugSubsection *S) {
|
||||||
return S->kind() == DebugSubsectionKind::InlineeLines;
|
return S->kind() == DebugSubsectionKind::InlineeLines;
|
||||||
@ -87,16 +92,18 @@ public:
|
|||||||
void addInlineSite(TypeIndex FuncId, StringRef FileName, uint32_t SourceLine);
|
void addInlineSite(TypeIndex FuncId, StringRef FileName, uint32_t SourceLine);
|
||||||
void addExtraFile(StringRef FileName);
|
void addExtraFile(StringRef FileName);
|
||||||
|
|
||||||
|
bool hasExtraFiles() const { return HasExtraFiles; }
|
||||||
|
void setHasExtraFiles(bool Has) { HasExtraFiles = Has; }
|
||||||
|
|
||||||
|
std::vector<Entry>::const_iterator begin() const { return Entries.begin(); }
|
||||||
|
std::vector<Entry>::const_iterator end() const { return Entries.end(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DebugChecksumsSubsection &Checksums;
|
DebugChecksumsSubsection &Checksums;
|
||||||
|
|
||||||
bool HasExtraFiles = false;
|
bool HasExtraFiles = false;
|
||||||
uint32_t ExtraFileCount = 0;
|
uint32_t ExtraFileCount = 0;
|
||||||
|
|
||||||
struct Entry {
|
|
||||||
std::vector<support::ulittle32_t> ExtraFiles;
|
|
||||||
InlineeSourceLineHeader Header;
|
|
||||||
};
|
|
||||||
std::vector<Entry> Entries;
|
std::vector<Entry> Entries;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -31,28 +31,32 @@ struct DebugSubsectionHeader {
|
|||||||
class DebugSubsectionRecord {
|
class DebugSubsectionRecord {
|
||||||
public:
|
public:
|
||||||
DebugSubsectionRecord();
|
DebugSubsectionRecord();
|
||||||
DebugSubsectionRecord(DebugSubsectionKind Kind, BinaryStreamRef Data);
|
DebugSubsectionRecord(DebugSubsectionKind Kind, BinaryStreamRef Data,
|
||||||
|
CodeViewContainer Container);
|
||||||
|
|
||||||
static Error initialize(BinaryStreamRef Stream, DebugSubsectionRecord &Info);
|
static Error initialize(BinaryStreamRef Stream, DebugSubsectionRecord &Info,
|
||||||
|
CodeViewContainer Container);
|
||||||
|
|
||||||
uint32_t getRecordLength() const;
|
uint32_t getRecordLength() const;
|
||||||
DebugSubsectionKind kind() const;
|
DebugSubsectionKind kind() const;
|
||||||
BinaryStreamRef getRecordData() const;
|
BinaryStreamRef getRecordData() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
CodeViewContainer Container;
|
||||||
DebugSubsectionKind Kind;
|
DebugSubsectionKind Kind;
|
||||||
BinaryStreamRef Data;
|
BinaryStreamRef Data;
|
||||||
};
|
};
|
||||||
|
|
||||||
class DebugSubsectionRecordBuilder {
|
class DebugSubsectionRecordBuilder {
|
||||||
public:
|
public:
|
||||||
DebugSubsectionRecordBuilder(DebugSubsectionKind Kind, DebugSubsection &Frag);
|
DebugSubsectionRecordBuilder(std::unique_ptr<DebugSubsection> Subsection,
|
||||||
|
CodeViewContainer Container);
|
||||||
uint32_t calculateSerializedLength();
|
uint32_t calculateSerializedLength();
|
||||||
Error commit(BinaryStreamWriter &Writer);
|
Error commit(BinaryStreamWriter &Writer);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
DebugSubsectionKind Kind;
|
std::unique_ptr<DebugSubsection> Subsection;
|
||||||
DebugSubsection &Frag;
|
CodeViewContainer Container;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace codeview
|
} // namespace codeview
|
||||||
@ -62,7 +66,12 @@ template <> struct VarStreamArrayExtractor<codeview::DebugSubsectionRecord> {
|
|||||||
|
|
||||||
static Error extract(BinaryStreamRef Stream, uint32_t &Length,
|
static Error extract(BinaryStreamRef Stream, uint32_t &Length,
|
||||||
codeview::DebugSubsectionRecord &Info) {
|
codeview::DebugSubsectionRecord &Info) {
|
||||||
if (auto EC = codeview::DebugSubsectionRecord::initialize(Stream, Info))
|
// FIXME: We need to pass the container type through to this function, but
|
||||||
|
// VarStreamArray doesn't easily support stateful contexts. In practice
|
||||||
|
// this isn't super important since the subsection header describes its
|
||||||
|
// length and we can just skip it. It's more important when writing.
|
||||||
|
if (auto EC = codeview::DebugSubsectionRecord::initialize(
|
||||||
|
Stream, Info, codeview::CodeViewContainer::Pdb))
|
||||||
return EC;
|
return EC;
|
||||||
Length = Info.getRecordLength();
|
Length = Info.getRecordLength();
|
||||||
return Error::success();
|
return Error::success();
|
||||||
|
@ -24,9 +24,9 @@ namespace codeview {
|
|||||||
class SymbolVisitorDelegate;
|
class SymbolVisitorDelegate;
|
||||||
class SymbolDeserializer : public SymbolVisitorCallbacks {
|
class SymbolDeserializer : public SymbolVisitorCallbacks {
|
||||||
struct MappingInfo {
|
struct MappingInfo {
|
||||||
explicit MappingInfo(ArrayRef<uint8_t> RecordData)
|
MappingInfo(ArrayRef<uint8_t> RecordData, CodeViewContainer Container)
|
||||||
: Stream(RecordData, llvm::support::little), Reader(Stream),
|
: Stream(RecordData, llvm::support::little), Reader(Stream),
|
||||||
Mapping(Reader) {}
|
Mapping(Reader, Container) {}
|
||||||
|
|
||||||
BinaryByteStream Stream;
|
BinaryByteStream Stream;
|
||||||
BinaryStreamReader Reader;
|
BinaryStreamReader Reader;
|
||||||
@ -35,7 +35,9 @@ class SymbolDeserializer : public SymbolVisitorCallbacks {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
template <typename T> static Error deserializeAs(CVSymbol Symbol, T &Record) {
|
template <typename T> static Error deserializeAs(CVSymbol Symbol, T &Record) {
|
||||||
SymbolDeserializer S(nullptr);
|
// If we're just deserializing one record, then don't worry about alignment
|
||||||
|
// as there's nothing that comes after.
|
||||||
|
SymbolDeserializer S(nullptr, CodeViewContainer::ObjectFile);
|
||||||
if (auto EC = S.visitSymbolBegin(Symbol))
|
if (auto EC = S.visitSymbolBegin(Symbol))
|
||||||
return EC;
|
return EC;
|
||||||
if (auto EC = S.visitKnownRecord(Symbol, Record))
|
if (auto EC = S.visitKnownRecord(Symbol, Record))
|
||||||
@ -45,12 +47,13 @@ public:
|
|||||||
return Error::success();
|
return Error::success();
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit SymbolDeserializer(SymbolVisitorDelegate *Delegate)
|
explicit SymbolDeserializer(SymbolVisitorDelegate *Delegate,
|
||||||
: Delegate(Delegate) {}
|
CodeViewContainer Container)
|
||||||
|
: Delegate(Delegate), Container(Container) {}
|
||||||
|
|
||||||
Error visitSymbolBegin(CVSymbol &Record) override {
|
Error visitSymbolBegin(CVSymbol &Record) override {
|
||||||
assert(!Mapping && "Already in a symbol mapping!");
|
assert(!Mapping && "Already in a symbol mapping!");
|
||||||
Mapping = llvm::make_unique<MappingInfo>(Record.content());
|
Mapping = llvm::make_unique<MappingInfo>(Record.content(), Container);
|
||||||
return Mapping->Mapping.visitSymbolBegin(Record);
|
return Mapping->Mapping.visitSymbolBegin(Record);
|
||||||
}
|
}
|
||||||
Error visitSymbolEnd(CVSymbol &Record) override {
|
Error visitSymbolEnd(CVSymbol &Record) override {
|
||||||
@ -78,6 +81,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
SymbolVisitorDelegate *Delegate;
|
SymbolVisitorDelegate *Delegate;
|
||||||
|
CodeViewContainer Container;
|
||||||
std::unique_ptr<MappingInfo> Mapping;
|
std::unique_ptr<MappingInfo> Mapping;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -26,9 +26,11 @@ class TypeCollection;
|
|||||||
class CVSymbolDumper {
|
class CVSymbolDumper {
|
||||||
public:
|
public:
|
||||||
CVSymbolDumper(ScopedPrinter &W, TypeCollection &Types,
|
CVSymbolDumper(ScopedPrinter &W, TypeCollection &Types,
|
||||||
|
CodeViewContainer Container,
|
||||||
std::unique_ptr<SymbolDumpDelegate> ObjDelegate,
|
std::unique_ptr<SymbolDumpDelegate> ObjDelegate,
|
||||||
bool PrintRecordBytes)
|
bool PrintRecordBytes)
|
||||||
: W(W), Types(Types), ObjDelegate(std::move(ObjDelegate)),
|
: W(W), Types(Types), Container(Container),
|
||||||
|
ObjDelegate(std::move(ObjDelegate)),
|
||||||
PrintRecordBytes(PrintRecordBytes) {}
|
PrintRecordBytes(PrintRecordBytes) {}
|
||||||
|
|
||||||
/// Dumps one type record. Returns false if there was a type parsing error,
|
/// Dumps one type record. Returns false if there was a type parsing error,
|
||||||
@ -44,6 +46,7 @@ public:
|
|||||||
private:
|
private:
|
||||||
ScopedPrinter &W;
|
ScopedPrinter &W;
|
||||||
TypeCollection &Types;
|
TypeCollection &Types;
|
||||||
|
CodeViewContainer Container;
|
||||||
std::unique_ptr<SymbolDumpDelegate> ObjDelegate;
|
std::unique_ptr<SymbolDumpDelegate> ObjDelegate;
|
||||||
|
|
||||||
bool PrintRecordBytes;
|
bool PrintRecordBytes;
|
||||||
|
@ -20,8 +20,12 @@ class BinaryStreamWriter;
|
|||||||
namespace codeview {
|
namespace codeview {
|
||||||
class SymbolRecordMapping : public SymbolVisitorCallbacks {
|
class SymbolRecordMapping : public SymbolVisitorCallbacks {
|
||||||
public:
|
public:
|
||||||
explicit SymbolRecordMapping(BinaryStreamReader &Reader) : IO(Reader) {}
|
explicit SymbolRecordMapping(BinaryStreamReader &Reader,
|
||||||
explicit SymbolRecordMapping(BinaryStreamWriter &Writer) : IO(Writer) {}
|
CodeViewContainer Container)
|
||||||
|
: IO(Reader), Container(Container) {}
|
||||||
|
explicit SymbolRecordMapping(BinaryStreamWriter &Writer,
|
||||||
|
CodeViewContainer Container)
|
||||||
|
: IO(Writer), Container(Container) {}
|
||||||
|
|
||||||
Error visitSymbolBegin(CVSymbol &Record) override;
|
Error visitSymbolBegin(CVSymbol &Record) override;
|
||||||
Error visitSymbolEnd(CVSymbol &Record) override;
|
Error visitSymbolEnd(CVSymbol &Record) override;
|
||||||
@ -35,6 +39,7 @@ private:
|
|||||||
Optional<SymbolKind> Kind;
|
Optional<SymbolKind> Kind;
|
||||||
|
|
||||||
CodeViewRecordIO IO;
|
CodeViewRecordIO IO;
|
||||||
|
CodeViewContainer Container;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,17 +46,18 @@ class SymbolSerializer : public SymbolVisitorCallbacks {
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
template <typename SymType>
|
template <typename SymType>
|
||||||
static CVSymbol writeOneSymbol(SymType &Sym, BumpPtrAllocator &Storage) {
|
static CVSymbol writeOneSymbol(SymType &Sym, BumpPtrAllocator &Storage,
|
||||||
|
CodeViewContainer Container) {
|
||||||
CVSymbol Result;
|
CVSymbol Result;
|
||||||
Result.Type = static_cast<SymbolKind>(Sym.Kind);
|
Result.Type = static_cast<SymbolKind>(Sym.Kind);
|
||||||
SymbolSerializer Serializer(Storage);
|
SymbolSerializer Serializer(Storage, Container);
|
||||||
consumeError(Serializer.visitSymbolBegin(Result));
|
consumeError(Serializer.visitSymbolBegin(Result));
|
||||||
consumeError(Serializer.visitKnownRecord(Result, Sym));
|
consumeError(Serializer.visitKnownRecord(Result, Sym));
|
||||||
consumeError(Serializer.visitSymbolEnd(Result));
|
consumeError(Serializer.visitSymbolEnd(Result));
|
||||||
return Result;
|
return Result;
|
||||||
}
|
}
|
||||||
|
|
||||||
explicit SymbolSerializer(BumpPtrAllocator &Storage);
|
SymbolSerializer(BumpPtrAllocator &Storage, CodeViewContainer Container);
|
||||||
|
|
||||||
virtual Error visitSymbolBegin(CVSymbol &Record) override;
|
virtual Error visitSymbolBegin(CVSymbol &Record) override;
|
||||||
virtual Error visitSymbolEnd(CVSymbol &Record) override;
|
virtual Error visitSymbolEnd(CVSymbol &Record) override;
|
||||||
|
@ -44,17 +44,19 @@ class MappedBlockStream : public BinaryStream {
|
|||||||
public:
|
public:
|
||||||
static std::unique_ptr<MappedBlockStream>
|
static std::unique_ptr<MappedBlockStream>
|
||||||
createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
|
createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
|
||||||
BinaryStreamRef MsfData);
|
BinaryStreamRef MsfData, BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
static std::unique_ptr<MappedBlockStream>
|
static std::unique_ptr<MappedBlockStream>
|
||||||
createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
|
createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
|
||||||
uint32_t StreamIndex);
|
uint32_t StreamIndex, BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
static std::unique_ptr<MappedBlockStream>
|
static std::unique_ptr<MappedBlockStream>
|
||||||
createFpmStream(const MSFLayout &Layout, BinaryStreamRef MsfData);
|
createFpmStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
|
||||||
|
BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
static std::unique_ptr<MappedBlockStream>
|
static std::unique_ptr<MappedBlockStream>
|
||||||
createDirectoryStream(const MSFLayout &Layout, BinaryStreamRef MsfData);
|
createDirectoryStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
|
||||||
|
BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
llvm::support::endianness getEndian() const override {
|
llvm::support::endianness getEndian() const override {
|
||||||
return llvm::support::little;
|
return llvm::support::little;
|
||||||
@ -67,9 +69,7 @@ public:
|
|||||||
|
|
||||||
uint32_t getLength() override;
|
uint32_t getLength() override;
|
||||||
|
|
||||||
uint32_t getNumBytesCopied() const;
|
llvm::BumpPtrAllocator &getAllocator() { return Allocator; }
|
||||||
|
|
||||||
llvm::BumpPtrAllocator &getAllocator() { return Pool; }
|
|
||||||
|
|
||||||
void invalidateCache();
|
void invalidateCache();
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ public:
|
|||||||
|
|
||||||
protected:
|
protected:
|
||||||
MappedBlockStream(uint32_t BlockSize, const MSFStreamLayout &StreamLayout,
|
MappedBlockStream(uint32_t BlockSize, const MSFStreamLayout &StreamLayout,
|
||||||
BinaryStreamRef MsfData);
|
BinaryStreamRef MsfData, BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const MSFStreamLayout &getStreamLayout() const { return StreamLayout; }
|
const MSFStreamLayout &getStreamLayout() const { return StreamLayout; }
|
||||||
@ -94,7 +94,15 @@ private:
|
|||||||
BinaryStreamRef MsfData;
|
BinaryStreamRef MsfData;
|
||||||
|
|
||||||
typedef MutableArrayRef<uint8_t> CacheEntry;
|
typedef MutableArrayRef<uint8_t> CacheEntry;
|
||||||
llvm::BumpPtrAllocator Pool;
|
|
||||||
|
// We just store the allocator by reference. We use this to allocate
|
||||||
|
// contiguous memory for things like arrays or strings that cross a block
|
||||||
|
// boundary, and this memory is expected to outlive the stream. For example,
|
||||||
|
// someone could create a stream, read some stuff, then close the stream, and
|
||||||
|
// we would like outstanding references to fields to remain valid since the
|
||||||
|
// entire file is mapped anyway. Because of that, the user must supply the
|
||||||
|
// allocator to allocate broken records from.
|
||||||
|
BumpPtrAllocator &Allocator;
|
||||||
DenseMap<uint32_t, std::vector<CacheEntry>> CacheMap;
|
DenseMap<uint32_t, std::vector<CacheEntry>> CacheMap;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -102,18 +110,20 @@ class WritableMappedBlockStream : public WritableBinaryStream {
|
|||||||
public:
|
public:
|
||||||
static std::unique_ptr<WritableMappedBlockStream>
|
static std::unique_ptr<WritableMappedBlockStream>
|
||||||
createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
|
createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
|
||||||
WritableBinaryStreamRef MsfData);
|
WritableBinaryStreamRef MsfData, BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
static std::unique_ptr<WritableMappedBlockStream>
|
static std::unique_ptr<WritableMappedBlockStream>
|
||||||
createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
|
createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
|
||||||
uint32_t StreamIndex);
|
uint32_t StreamIndex, BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
static std::unique_ptr<WritableMappedBlockStream>
|
static std::unique_ptr<WritableMappedBlockStream>
|
||||||
createDirectoryStream(const MSFLayout &Layout,
|
createDirectoryStream(const MSFLayout &Layout,
|
||||||
WritableBinaryStreamRef MsfData);
|
WritableBinaryStreamRef MsfData,
|
||||||
|
BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
static std::unique_ptr<WritableMappedBlockStream>
|
static std::unique_ptr<WritableMappedBlockStream>
|
||||||
createFpmStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData);
|
createFpmStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
|
||||||
|
BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
llvm::support::endianness getEndian() const override {
|
llvm::support::endianness getEndian() const override {
|
||||||
return llvm::support::little;
|
return llvm::support::little;
|
||||||
@ -139,7 +149,8 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
WritableMappedBlockStream(uint32_t BlockSize,
|
WritableMappedBlockStream(uint32_t BlockSize,
|
||||||
const MSFStreamLayout &StreamLayout,
|
const MSFStreamLayout &StreamLayout,
|
||||||
WritableBinaryStreamRef MsfData);
|
WritableBinaryStreamRef MsfData,
|
||||||
|
BumpPtrAllocator &Allocator);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MappedBlockStream ReadInterface;
|
MappedBlockStream ReadInterface;
|
||||||
|
@ -49,11 +49,8 @@ public:
|
|||||||
void setObjFileName(StringRef Name);
|
void setObjFileName(StringRef Name);
|
||||||
void addSymbol(codeview::CVSymbol Symbol);
|
void addSymbol(codeview::CVSymbol Symbol);
|
||||||
|
|
||||||
void addC13Fragment(std::unique_ptr<codeview::DebugLinesSubsection> Lines);
|
void
|
||||||
void addC13Fragment(
|
addDebugSubsection(std::unique_ptr<codeview::DebugSubsection> Subsection);
|
||||||
std::unique_ptr<codeview::DebugInlineeLinesSubsection> Inlinees);
|
|
||||||
void setC13FileChecksums(
|
|
||||||
std::unique_ptr<codeview::DebugChecksumsSubsection> Checksums);
|
|
||||||
|
|
||||||
uint16_t getStreamIndex() const;
|
uint16_t getStreamIndex() const;
|
||||||
StringRef getModuleName() const { return ModuleName; }
|
StringRef getModuleName() const { return ModuleName; }
|
||||||
@ -83,10 +80,6 @@ private:
|
|||||||
std::vector<std::string> SourceFiles;
|
std::vector<std::string> SourceFiles;
|
||||||
std::vector<codeview::CVSymbol> Symbols;
|
std::vector<codeview::CVSymbol> Symbols;
|
||||||
|
|
||||||
std::unique_ptr<codeview::DebugChecksumsSubsection> ChecksumInfo;
|
|
||||||
std::vector<std::unique_ptr<codeview::DebugLinesSubsection>> LineInfo;
|
|
||||||
std::vector<std::unique_ptr<codeview::DebugInlineeLinesSubsection>> Inlinees;
|
|
||||||
|
|
||||||
std::vector<std::unique_ptr<codeview::DebugSubsectionRecordBuilder>>
|
std::vector<std::unique_ptr<codeview::DebugSubsectionRecordBuilder>>
|
||||||
C13Builders;
|
C13Builders;
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
|
|
||||||
#include "llvm/ADT/iterator_range.h"
|
#include "llvm/ADT/iterator_range.h"
|
||||||
#include "llvm/DebugInfo/CodeView/CVRecord.h"
|
#include "llvm/DebugInfo/CodeView/CVRecord.h"
|
||||||
|
#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
|
||||||
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
|
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
|
||||||
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
|
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
|
||||||
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
|
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
|
||||||
@ -25,7 +26,7 @@ class PDBFile;
|
|||||||
class DbiModuleDescriptor;
|
class DbiModuleDescriptor;
|
||||||
|
|
||||||
class ModuleDebugStreamRef {
|
class ModuleDebugStreamRef {
|
||||||
typedef codeview::DebugSubsectionArray::Iterator LinesAndChecksumsIterator;
|
typedef codeview::DebugSubsectionArray::Iterator DebugSubsectionIterator;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ModuleDebugStreamRef(const DbiModuleDescriptor &Module,
|
ModuleDebugStreamRef(const DbiModuleDescriptor &Module,
|
||||||
@ -39,12 +40,15 @@ public:
|
|||||||
iterator_range<codeview::CVSymbolArray::Iterator>
|
iterator_range<codeview::CVSymbolArray::Iterator>
|
||||||
symbols(bool *HadError) const;
|
symbols(bool *HadError) const;
|
||||||
|
|
||||||
llvm::iterator_range<LinesAndChecksumsIterator> linesAndChecksums() const;
|
llvm::iterator_range<DebugSubsectionIterator> subsections() const;
|
||||||
|
|
||||||
bool hasLineInfo() const;
|
bool hasDebugSubsections() const;
|
||||||
|
|
||||||
Error commit();
|
Error commit();
|
||||||
|
|
||||||
|
Expected<codeview::DebugChecksumsSubsectionRef>
|
||||||
|
findChecksumsSubsection() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const DbiModuleDescriptor &Mod;
|
const DbiModuleDescriptor &Mod;
|
||||||
|
|
||||||
@ -57,7 +61,7 @@ private:
|
|||||||
BinaryStreamRef C13LinesSubstream;
|
BinaryStreamRef C13LinesSubstream;
|
||||||
BinaryStreamRef GlobalRefsSubstream;
|
BinaryStreamRef GlobalRefsSubstream;
|
||||||
|
|
||||||
codeview::DebugSubsectionArray LinesAndChecksums;
|
codeview::DebugSubsectionArray Subsections;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,8 @@ public:
|
|||||||
|
|
||||||
FixedStreamArray<support::ulittle32_t> name_ids() const;
|
FixedStreamArray<support::ulittle32_t> name_ids() const;
|
||||||
|
|
||||||
|
codeview::DebugStringTableSubsectionRef getStringTable() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Error readHeader(BinaryStreamReader &Reader);
|
Error readHeader(BinaryStreamReader &Reader);
|
||||||
Error readStrings(BinaryStreamReader &Reader);
|
Error readStrings(BinaryStreamReader &Reader);
|
||||||
|
@ -34,8 +34,7 @@ class TpiStream {
|
|||||||
friend class TpiStreamBuilder;
|
friend class TpiStreamBuilder;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
TpiStream(const PDBFile &File,
|
TpiStream(PDBFile &File, std::unique_ptr<msf::MappedBlockStream> Stream);
|
||||||
std::unique_ptr<msf::MappedBlockStream> Stream);
|
|
||||||
~TpiStream();
|
~TpiStream();
|
||||||
Error reload();
|
Error reload();
|
||||||
|
|
||||||
@ -61,7 +60,7 @@ public:
|
|||||||
Error commit();
|
Error commit();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const PDBFile &Pdb;
|
PDBFile &Pdb;
|
||||||
std::unique_ptr<msf::MappedBlockStream> Stream;
|
std::unique_ptr<msf::MappedBlockStream> Stream;
|
||||||
|
|
||||||
std::unique_ptr<codeview::LazyRandomTypeCollection> Types;
|
std::unique_ptr<codeview::LazyRandomTypeCollection> Types;
|
||||||
|
@ -86,6 +86,10 @@ namespace llvm {
|
|||||||
/// Construct any deferred debug info descriptors.
|
/// Construct any deferred debug info descriptors.
|
||||||
void finalize();
|
void finalize();
|
||||||
|
|
||||||
|
/// Finalize a specific subprogram - no new variables may be added to this
|
||||||
|
/// subprogram afterwards.
|
||||||
|
void finalizeSubprogram(DISubprogram *SP);
|
||||||
|
|
||||||
/// A CompileUnit provides an anchor for all debugging
|
/// A CompileUnit provides an anchor for all debugging
|
||||||
/// information generated during this instance of compilation.
|
/// information generated during this instance of compilation.
|
||||||
/// \param Lang Source programming language, eg. dwarf::DW_LANG_C99
|
/// \param Lang Source programming language, eg. dwarf::DW_LANG_C99
|
||||||
|
@ -90,12 +90,6 @@ namespace llvm {
|
|||||||
DenseMap<const MDNode *, MDNode *> &Cache,
|
DenseMap<const MDNode *, MDNode *> &Cache,
|
||||||
bool ReplaceLast = false);
|
bool ReplaceLast = false);
|
||||||
|
|
||||||
/// Reparent all debug locations referenced by \c I that belong to \c OrigSP
|
|
||||||
/// to become (possibly indirect) children of \c NewSP.
|
|
||||||
static void reparentDebugInfo(Instruction &I, DISubprogram *OrigSP,
|
|
||||||
DISubprogram *NewSP,
|
|
||||||
DenseMap<const MDNode *, MDNode *> &Cache);
|
|
||||||
|
|
||||||
unsigned getLine() const;
|
unsigned getLine() const;
|
||||||
unsigned getCol() const;
|
unsigned getCol() const;
|
||||||
MDNode *getScope() const;
|
MDNode *getScope() const;
|
||||||
|
@ -134,16 +134,18 @@ public:
|
|||||||
/// be renamed or references something that can't be renamed).
|
/// be renamed or references something that can't be renamed).
|
||||||
unsigned NotEligibleToImport : 1;
|
unsigned NotEligibleToImport : 1;
|
||||||
|
|
||||||
/// Indicate that the global value must be considered a live root for
|
/// In per-module summary, indicate that the global value must be considered
|
||||||
/// index-based liveness analysis. Used for special LLVM values such as
|
/// a live root for index-based liveness analysis. Used for special LLVM
|
||||||
/// llvm.global_ctors that the linker does not know about.
|
/// values such as llvm.global_ctors that the linker does not know about.
|
||||||
unsigned LiveRoot : 1;
|
///
|
||||||
|
/// In combined summary, indicate that the global value is live.
|
||||||
|
unsigned Live : 1;
|
||||||
|
|
||||||
/// Convenience Constructors
|
/// Convenience Constructors
|
||||||
explicit GVFlags(GlobalValue::LinkageTypes Linkage,
|
explicit GVFlags(GlobalValue::LinkageTypes Linkage,
|
||||||
bool NotEligibleToImport, bool LiveRoot)
|
bool NotEligibleToImport, bool Live)
|
||||||
: Linkage(Linkage), NotEligibleToImport(NotEligibleToImport),
|
: Linkage(Linkage), NotEligibleToImport(NotEligibleToImport),
|
||||||
LiveRoot(LiveRoot) {}
|
Live(Live) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -172,6 +174,8 @@ private:
|
|||||||
/// are listed in the derived FunctionSummary object.
|
/// are listed in the derived FunctionSummary object.
|
||||||
std::vector<ValueInfo> RefEdgeList;
|
std::vector<ValueInfo> RefEdgeList;
|
||||||
|
|
||||||
|
bool isLive() const { return Flags.Live; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
GlobalValueSummary(SummaryKind K, GVFlags Flags, std::vector<ValueInfo> Refs)
|
GlobalValueSummary(SummaryKind K, GVFlags Flags, std::vector<ValueInfo> Refs)
|
||||||
: Kind(K), Flags(Flags), RefEdgeList(std::move(Refs)) {}
|
: Kind(K), Flags(Flags), RefEdgeList(std::move(Refs)) {}
|
||||||
@ -213,19 +217,17 @@ public:
|
|||||||
/// Return true if this global value can't be imported.
|
/// Return true if this global value can't be imported.
|
||||||
bool notEligibleToImport() const { return Flags.NotEligibleToImport; }
|
bool notEligibleToImport() const { return Flags.NotEligibleToImport; }
|
||||||
|
|
||||||
/// Return true if this global value must be considered a root for live
|
void setLive(bool Live) { Flags.Live = Live; }
|
||||||
/// value analysis on the index.
|
|
||||||
bool liveRoot() const { return Flags.LiveRoot; }
|
|
||||||
|
|
||||||
/// Flag that this global value must be considered a root for live
|
|
||||||
/// value analysis on the index.
|
|
||||||
void setLiveRoot() { Flags.LiveRoot = true; }
|
|
||||||
|
|
||||||
/// Flag that this global value cannot be imported.
|
/// Flag that this global value cannot be imported.
|
||||||
void setNotEligibleToImport() { Flags.NotEligibleToImport = true; }
|
void setNotEligibleToImport() { Flags.NotEligibleToImport = true; }
|
||||||
|
|
||||||
/// Return the list of values referenced by this global value definition.
|
/// Return the list of values referenced by this global value definition.
|
||||||
ArrayRef<ValueInfo> refs() const { return RefEdgeList; }
|
ArrayRef<ValueInfo> refs() const { return RefEdgeList; }
|
||||||
|
|
||||||
|
friend class ModuleSummaryIndex;
|
||||||
|
friend void computeDeadSymbols(class ModuleSummaryIndex &,
|
||||||
|
const DenseSet<GlobalValue::GUID> &);
|
||||||
};
|
};
|
||||||
|
|
||||||
/// \brief Alias summary information.
|
/// \brief Alias summary information.
|
||||||
@ -535,6 +537,11 @@ private:
|
|||||||
/// GUIDs, it will be mapped to 0.
|
/// GUIDs, it will be mapped to 0.
|
||||||
std::map<GlobalValue::GUID, GlobalValue::GUID> OidGuidMap;
|
std::map<GlobalValue::GUID, GlobalValue::GUID> OidGuidMap;
|
||||||
|
|
||||||
|
/// Indicates that summary-based GlobalValue GC has run, and values with
|
||||||
|
/// GVFlags::Live==false are really dead. Otherwise, all values must be
|
||||||
|
/// considered live.
|
||||||
|
bool WithGlobalValueDeadStripping = false;
|
||||||
|
|
||||||
// YAML I/O support.
|
// YAML I/O support.
|
||||||
friend yaml::MappingTraits<ModuleSummaryIndex>;
|
friend yaml::MappingTraits<ModuleSummaryIndex>;
|
||||||
|
|
||||||
@ -550,6 +557,17 @@ public:
|
|||||||
const_gvsummary_iterator end() const { return GlobalValueMap.end(); }
|
const_gvsummary_iterator end() const { return GlobalValueMap.end(); }
|
||||||
size_t size() const { return GlobalValueMap.size(); }
|
size_t size() const { return GlobalValueMap.size(); }
|
||||||
|
|
||||||
|
bool withGlobalValueDeadStripping() const {
|
||||||
|
return WithGlobalValueDeadStripping;
|
||||||
|
}
|
||||||
|
void setWithGlobalValueDeadStripping() {
|
||||||
|
WithGlobalValueDeadStripping = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isGlobalValueLive(const GlobalValueSummary *GVS) const {
|
||||||
|
return !WithGlobalValueDeadStripping || GVS->isLive();
|
||||||
|
}
|
||||||
|
|
||||||
/// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
|
/// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
|
||||||
ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
|
ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
|
||||||
auto I = GlobalValueMap.find(GUID);
|
auto I = GlobalValueMap.find(GUID);
|
||||||
|
@ -128,6 +128,8 @@ template <> struct MappingTraits<TypeIdSummary> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct FunctionSummaryYaml {
|
struct FunctionSummaryYaml {
|
||||||
|
unsigned Linkage;
|
||||||
|
bool NotEligibleToImport, Live;
|
||||||
std::vector<uint64_t> TypeTests;
|
std::vector<uint64_t> TypeTests;
|
||||||
std::vector<FunctionSummary::VFuncId> TypeTestAssumeVCalls,
|
std::vector<FunctionSummary::VFuncId> TypeTestAssumeVCalls,
|
||||||
TypeCheckedLoadVCalls;
|
TypeCheckedLoadVCalls;
|
||||||
@ -168,6 +170,9 @@ namespace yaml {
|
|||||||
|
|
||||||
template <> struct MappingTraits<FunctionSummaryYaml> {
|
template <> struct MappingTraits<FunctionSummaryYaml> {
|
||||||
static void mapping(IO &io, FunctionSummaryYaml& summary) {
|
static void mapping(IO &io, FunctionSummaryYaml& summary) {
|
||||||
|
io.mapOptional("Linkage", summary.Linkage);
|
||||||
|
io.mapOptional("NotEligibleToImport", summary.NotEligibleToImport);
|
||||||
|
io.mapOptional("Live", summary.Live);
|
||||||
io.mapOptional("TypeTests", summary.TypeTests);
|
io.mapOptional("TypeTests", summary.TypeTests);
|
||||||
io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls);
|
io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls);
|
||||||
io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls);
|
io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls);
|
||||||
@ -199,12 +204,12 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
|
|||||||
}
|
}
|
||||||
auto &Elem = V[KeyInt];
|
auto &Elem = V[KeyInt];
|
||||||
for (auto &FSum : FSums) {
|
for (auto &FSum : FSums) {
|
||||||
GlobalValueSummary::GVFlags GVFlags(GlobalValue::ExternalLinkage, false,
|
|
||||||
false);
|
|
||||||
Elem.SummaryList.push_back(llvm::make_unique<FunctionSummary>(
|
Elem.SummaryList.push_back(llvm::make_unique<FunctionSummary>(
|
||||||
GVFlags, 0, ArrayRef<ValueInfo>{},
|
GlobalValueSummary::GVFlags(
|
||||||
ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
|
static_cast<GlobalValue::LinkageTypes>(FSum.Linkage),
|
||||||
std::move(FSum.TypeTestAssumeVCalls),
|
FSum.NotEligibleToImport, FSum.Live),
|
||||||
|
0, ArrayRef<ValueInfo>{}, ArrayRef<FunctionSummary::EdgeTy>{},
|
||||||
|
std::move(FSum.TypeTests), std::move(FSum.TypeTestAssumeVCalls),
|
||||||
std::move(FSum.TypeCheckedLoadVCalls),
|
std::move(FSum.TypeCheckedLoadVCalls),
|
||||||
std::move(FSum.TypeTestAssumeConstVCalls),
|
std::move(FSum.TypeTestAssumeConstVCalls),
|
||||||
std::move(FSum.TypeCheckedLoadConstVCalls)));
|
std::move(FSum.TypeCheckedLoadConstVCalls)));
|
||||||
@ -216,8 +221,10 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
|
|||||||
for (auto &Sum : P.second.SummaryList) {
|
for (auto &Sum : P.second.SummaryList) {
|
||||||
if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get()))
|
if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get()))
|
||||||
FSums.push_back(FunctionSummaryYaml{
|
FSums.push_back(FunctionSummaryYaml{
|
||||||
FSum->type_tests(), FSum->type_test_assume_vcalls(),
|
FSum->flags().Linkage,
|
||||||
FSum->type_checked_load_vcalls(),
|
static_cast<bool>(FSum->flags().NotEligibleToImport),
|
||||||
|
static_cast<bool>(FSum->flags().Live), FSum->type_tests(),
|
||||||
|
FSum->type_test_assume_vcalls(), FSum->type_checked_load_vcalls(),
|
||||||
FSum->type_test_assume_const_vcalls(),
|
FSum->type_test_assume_const_vcalls(),
|
||||||
FSum->type_checked_load_const_vcalls()});
|
FSum->type_checked_load_const_vcalls()});
|
||||||
}
|
}
|
||||||
@ -231,6 +238,8 @@ template <> struct MappingTraits<ModuleSummaryIndex> {
|
|||||||
static void mapping(IO &io, ModuleSummaryIndex& index) {
|
static void mapping(IO &io, ModuleSummaryIndex& index) {
|
||||||
io.mapOptional("GlobalValueMap", index.GlobalValueMap);
|
io.mapOptional("GlobalValueMap", index.GlobalValueMap);
|
||||||
io.mapOptional("TypeIdMap", index.TypeIdMap);
|
io.mapOptional("TypeIdMap", index.TypeIdMap);
|
||||||
|
io.mapOptional("WithGlobalValueDeadStripping",
|
||||||
|
index.WithGlobalValueDeadStripping);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -228,24 +228,24 @@ public:
|
|||||||
return cast<ConstantInt>(NumVMSArgs)->getZExtValue();
|
return cast<ConstantInt>(NumVMSArgs)->getZExtValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
typename CallSiteTy::arg_iterator vm_state_begin() const {
|
typename CallSiteTy::arg_iterator deopt_begin() const {
|
||||||
auto I = gc_transition_args_end() + 1;
|
auto I = gc_transition_args_end() + 1;
|
||||||
assert((getCallSite().arg_end() - I) >= 0);
|
assert((getCallSite().arg_end() - I) >= 0);
|
||||||
return I;
|
return I;
|
||||||
}
|
}
|
||||||
typename CallSiteTy::arg_iterator vm_state_end() const {
|
typename CallSiteTy::arg_iterator deopt_end() const {
|
||||||
auto I = vm_state_begin() + getNumTotalVMSArgs();
|
auto I = deopt_begin() + getNumTotalVMSArgs();
|
||||||
assert((getCallSite().arg_end() - I) >= 0);
|
assert((getCallSite().arg_end() - I) >= 0);
|
||||||
return I;
|
return I;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// range adapter for vm state arguments
|
/// range adapter for vm state arguments
|
||||||
iterator_range<arg_iterator> vm_state_args() const {
|
iterator_range<arg_iterator> deopt_operands() const {
|
||||||
return make_range(vm_state_begin(), vm_state_end());
|
return make_range(deopt_begin(), deopt_end());
|
||||||
}
|
}
|
||||||
|
|
||||||
typename CallSiteTy::arg_iterator gc_args_begin() const {
|
typename CallSiteTy::arg_iterator gc_args_begin() const {
|
||||||
return vm_state_end();
|
return deopt_end();
|
||||||
}
|
}
|
||||||
typename CallSiteTy::arg_iterator gc_args_end() const {
|
typename CallSiteTy::arg_iterator gc_args_end() const {
|
||||||
return getCallSite().arg_end();
|
return getCallSite().arg_end();
|
||||||
@ -289,8 +289,8 @@ public:
|
|||||||
(void)arg_end();
|
(void)arg_end();
|
||||||
(void)gc_transition_args_begin();
|
(void)gc_transition_args_begin();
|
||||||
(void)gc_transition_args_end();
|
(void)gc_transition_args_end();
|
||||||
(void)vm_state_begin();
|
(void)deopt_begin();
|
||||||
(void)vm_state_end();
|
(void)deopt_end();
|
||||||
(void)gc_args_begin();
|
(void)gc_args_begin();
|
||||||
(void)gc_args_end();
|
(void)gc_args_end();
|
||||||
}
|
}
|
||||||
|
@ -86,7 +86,6 @@ void initializeCFGOnlyPrinterLegacyPassPass(PassRegistry&);
|
|||||||
void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
|
void initializeCFGOnlyViewerLegacyPassPass(PassRegistry&);
|
||||||
void initializeCFGPrinterLegacyPassPass(PassRegistry&);
|
void initializeCFGPrinterLegacyPassPass(PassRegistry&);
|
||||||
void initializeCFGSimplifyPassPass(PassRegistry&);
|
void initializeCFGSimplifyPassPass(PassRegistry&);
|
||||||
void initializeLateCFGSimplifyPassPass(PassRegistry&);
|
|
||||||
void initializeCFGViewerLegacyPassPass(PassRegistry&);
|
void initializeCFGViewerLegacyPassPass(PassRegistry&);
|
||||||
void initializeCFLAndersAAWrapperPassPass(PassRegistry&);
|
void initializeCFLAndersAAWrapperPassPass(PassRegistry&);
|
||||||
void initializeCFLSteensAAWrapperPassPass(PassRegistry&);
|
void initializeCFLSteensAAWrapperPassPass(PassRegistry&);
|
||||||
@ -144,8 +143,8 @@ void initializeGCMachineCodeAnalysisPass(PassRegistry&);
|
|||||||
void initializeGCModuleInfoPass(PassRegistry&);
|
void initializeGCModuleInfoPass(PassRegistry&);
|
||||||
void initializeGCOVProfilerLegacyPassPass(PassRegistry&);
|
void initializeGCOVProfilerLegacyPassPass(PassRegistry&);
|
||||||
void initializeGVNHoistLegacyPassPass(PassRegistry&);
|
void initializeGVNHoistLegacyPassPass(PassRegistry&);
|
||||||
void initializeGVNSinkLegacyPassPass(PassRegistry&);
|
|
||||||
void initializeGVNLegacyPassPass(PassRegistry&);
|
void initializeGVNLegacyPassPass(PassRegistry&);
|
||||||
|
void initializeGVNSinkLegacyPassPass(PassRegistry&);
|
||||||
void initializeGlobalDCELegacyPassPass(PassRegistry&);
|
void initializeGlobalDCELegacyPassPass(PassRegistry&);
|
||||||
void initializeGlobalMergePass(PassRegistry&);
|
void initializeGlobalMergePass(PassRegistry&);
|
||||||
void initializeGlobalOptLegacyPassPass(PassRegistry&);
|
void initializeGlobalOptLegacyPassPass(PassRegistry&);
|
||||||
@ -175,13 +174,14 @@ void initializeIntervalPartitionPass(PassRegistry&);
|
|||||||
void initializeJumpThreadingPass(PassRegistry&);
|
void initializeJumpThreadingPass(PassRegistry&);
|
||||||
void initializeLCSSAVerificationPassPass(PassRegistry&);
|
void initializeLCSSAVerificationPassPass(PassRegistry&);
|
||||||
void initializeLCSSAWrapperPassPass(PassRegistry&);
|
void initializeLCSSAWrapperPassPass(PassRegistry&);
|
||||||
|
void initializeLateCFGSimplifyPassPass(PassRegistry&);
|
||||||
void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&);
|
void initializeLazyBlockFrequencyInfoPassPass(PassRegistry&);
|
||||||
void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&);
|
void initializeLazyBranchProbabilityInfoPassPass(PassRegistry&);
|
||||||
void initializeLazyMachineBlockFrequencyInfoPassPass(PassRegistry&);
|
void initializeLazyMachineBlockFrequencyInfoPassPass(PassRegistry&);
|
||||||
|
void initializeLazyValueInfoPrinterPass(PassRegistry&);
|
||||||
void initializeLazyValueInfoWrapperPassPass(PassRegistry&);
|
void initializeLazyValueInfoWrapperPassPass(PassRegistry&);
|
||||||
void initializeLegacyLICMPassPass(PassRegistry&);
|
void initializeLegacyLICMPassPass(PassRegistry&);
|
||||||
void initializeLegacyLoopSinkPassPass(PassRegistry&);
|
void initializeLegacyLoopSinkPassPass(PassRegistry&);
|
||||||
void initializeLazyValueInfoPrinterPass(PassRegistry&);
|
|
||||||
void initializeLegalizerPass(PassRegistry&);
|
void initializeLegalizerPass(PassRegistry&);
|
||||||
void initializeLibCallsShrinkWrapLegacyPassPass(PassRegistry&);
|
void initializeLibCallsShrinkWrapLegacyPassPass(PassRegistry&);
|
||||||
void initializeLintPass(PassRegistry&);
|
void initializeLintPass(PassRegistry&);
|
||||||
@ -195,8 +195,8 @@ void initializeLiveVariablesPass(PassRegistry&);
|
|||||||
void initializeLoadCombinePass(PassRegistry&);
|
void initializeLoadCombinePass(PassRegistry&);
|
||||||
void initializeLoadStoreVectorizerPass(PassRegistry&);
|
void initializeLoadStoreVectorizerPass(PassRegistry&);
|
||||||
void initializeLoaderPassPass(PassRegistry&);
|
void initializeLoaderPassPass(PassRegistry&);
|
||||||
void initializeLocalizerPass(PassRegistry&);
|
|
||||||
void initializeLocalStackSlotPassPass(PassRegistry&);
|
void initializeLocalStackSlotPassPass(PassRegistry&);
|
||||||
|
void initializeLocalizerPass(PassRegistry&);
|
||||||
void initializeLoopAccessLegacyAnalysisPass(PassRegistry&);
|
void initializeLoopAccessLegacyAnalysisPass(PassRegistry&);
|
||||||
void initializeLoopDataPrefetchLegacyPassPass(PassRegistry&);
|
void initializeLoopDataPrefetchLegacyPassPass(PassRegistry&);
|
||||||
void initializeLoopDeletionLegacyPassPass(PassRegistry&);
|
void initializeLoopDeletionLegacyPassPass(PassRegistry&);
|
||||||
@ -304,6 +304,7 @@ void initializeProcessImplicitDefsPass(PassRegistry&);
|
|||||||
void initializeProfileSummaryInfoWrapperPassPass(PassRegistry&);
|
void initializeProfileSummaryInfoWrapperPassPass(PassRegistry&);
|
||||||
void initializePromoteLegacyPassPass(PassRegistry&);
|
void initializePromoteLegacyPassPass(PassRegistry&);
|
||||||
void initializePruneEHPass(PassRegistry&);
|
void initializePruneEHPass(PassRegistry&);
|
||||||
|
void initializeRABasicPass(PassRegistry&);
|
||||||
void initializeRAGreedyPass(PassRegistry&);
|
void initializeRAGreedyPass(PassRegistry&);
|
||||||
void initializeReassociateLegacyPassPass(PassRegistry&);
|
void initializeReassociateLegacyPassPass(PassRegistry&);
|
||||||
void initializeRegBankSelectPass(PassRegistry&);
|
void initializeRegBankSelectPass(PassRegistry&);
|
||||||
@ -327,8 +328,9 @@ void initializeSafeStackLegacyPassPass(PassRegistry&);
|
|||||||
void initializeSampleProfileLoaderLegacyPassPass(PassRegistry&);
|
void initializeSampleProfileLoaderLegacyPassPass(PassRegistry&);
|
||||||
void initializeSanitizerCoverageModulePass(PassRegistry&);
|
void initializeSanitizerCoverageModulePass(PassRegistry&);
|
||||||
void initializeScalarEvolutionWrapperPassPass(PassRegistry&);
|
void initializeScalarEvolutionWrapperPassPass(PassRegistry&);
|
||||||
void initializeScalarizerPass(PassRegistry&);
|
|
||||||
void initializeScalarizeMaskedMemIntrinPass(PassRegistry&);
|
void initializeScalarizeMaskedMemIntrinPass(PassRegistry&);
|
||||||
|
void initializeScalarizerPass(PassRegistry&);
|
||||||
|
void initializeScavengerTestPass(PassRegistry&);
|
||||||
void initializeScopedNoAliasAAWrapperPassPass(PassRegistry&);
|
void initializeScopedNoAliasAAWrapperPassPass(PassRegistry&);
|
||||||
void initializeSeparateConstOffsetFromGEPPass(PassRegistry&);
|
void initializeSeparateConstOffsetFromGEPPass(PassRegistry&);
|
||||||
void initializeShadowStackGCLoweringPass(PassRegistry&);
|
void initializeShadowStackGCLoweringPass(PassRegistry&);
|
||||||
|
@ -46,6 +46,9 @@ struct Config {
|
|||||||
unsigned OptLevel = 2;
|
unsigned OptLevel = 2;
|
||||||
bool DisableVerify = false;
|
bool DisableVerify = false;
|
||||||
|
|
||||||
|
/// Use the new pass manager
|
||||||
|
bool UseNewPM = false;
|
||||||
|
|
||||||
/// Disable entirely the optimizer, including importing for ThinLTO
|
/// Disable entirely the optimizer, including importing for ThinLTO
|
||||||
bool CodeGenOnly = false;
|
bool CodeGenOnly = false;
|
||||||
|
|
||||||
|
@ -17,12 +17,20 @@
|
|||||||
|
|
||||||
#include "llvm/DebugInfo/CodeView/CodeView.h"
|
#include "llvm/DebugInfo/CodeView/CodeView.h"
|
||||||
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
|
#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
|
||||||
|
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
|
||||||
#include "llvm/ObjectYAML/YAML.h"
|
#include "llvm/ObjectYAML/YAML.h"
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
|
namespace codeview {
|
||||||
|
class DebugStringTableSubsection;
|
||||||
|
class DebugStringTableSubsectionRef;
|
||||||
|
class DebugChecksumsSubsectionRef;
|
||||||
|
}
|
||||||
namespace CodeViewYAML {
|
namespace CodeViewYAML {
|
||||||
|
|
||||||
namespace detail {
|
namespace detail {
|
||||||
struct C13FragmentBase;
|
struct YAMLSubsectionBase;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SourceLineEntry {
|
struct SourceLineEntry {
|
||||||
@ -74,18 +82,24 @@ struct InlineeInfo {
|
|||||||
std::vector<InlineeSite> Sites;
|
std::vector<InlineeSite> Sites;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct SourceFileInfo {
|
struct YAMLDebugSubsection {
|
||||||
std::vector<SourceFileChecksumEntry> FileChecksums;
|
static Expected<YAMLDebugSubsection>
|
||||||
std::vector<SourceLineInfo> LineFragments;
|
fromCodeViewSubection(const codeview::DebugStringTableSubsectionRef &Strings,
|
||||||
std::vector<InlineeInfo> Inlinees;
|
const codeview::DebugChecksumsSubsectionRef &Checksums,
|
||||||
|
const codeview::DebugSubsectionRecord &SS);
|
||||||
|
|
||||||
|
std::shared_ptr<detail::YAMLSubsectionBase> Subsection;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct C13DebugSection {
|
Expected<std::vector<std::unique_ptr<codeview::DebugSubsection>>>
|
||||||
std::vector<detail::C13FragmentBase> Fragments;
|
convertSubsectionList(ArrayRef<YAMLDebugSubsection> Subsections,
|
||||||
};
|
codeview::DebugStringTableSubsection &Strings);
|
||||||
|
|
||||||
} // namespace CodeViewYAML
|
} // namespace CodeViewYAML
|
||||||
} // namespace llvm
|
} // namespace llvm
|
||||||
|
|
||||||
LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::SourceFileInfo)
|
LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::YAMLDebugSubsection)
|
||||||
|
|
||||||
|
LLVM_YAML_IS_SEQUENCE_VECTOR(CodeViewYAML::YAMLDebugSubsection)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -28,7 +28,9 @@ struct SymbolRecordBase;
|
|||||||
struct SymbolRecord {
|
struct SymbolRecord {
|
||||||
std::shared_ptr<detail::SymbolRecordBase> Symbol;
|
std::shared_ptr<detail::SymbolRecordBase> Symbol;
|
||||||
|
|
||||||
codeview::CVSymbol toCodeViewSymbol(BumpPtrAllocator &Allocator) const;
|
codeview::CVSymbol
|
||||||
|
toCodeViewSymbol(BumpPtrAllocator &Allocator,
|
||||||
|
codeview::CodeViewContainer Container) const;
|
||||||
static Expected<SymbolRecord> fromCodeViewSymbol(codeview::CVSymbol Symbol);
|
static Expected<SymbolRecord> fromCodeViewSymbol(codeview::CVSymbol Symbol);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1361,10 +1361,6 @@ public:
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isTemplateArg(StringRef Name) const {
|
|
||||||
return isTemplateArg(StringInit::get(Name));
|
|
||||||
}
|
|
||||||
|
|
||||||
const RecordVal *getValue(const Init *Name) const {
|
const RecordVal *getValue(const Init *Name) const {
|
||||||
for (const RecordVal &Val : Values)
|
for (const RecordVal &Val : Values)
|
||||||
if (Val.Name == Name) return &Val;
|
if (Val.Name == Name) return &Val;
|
||||||
@ -1388,10 +1384,6 @@ public:
|
|||||||
TemplateArgs.push_back(Name);
|
TemplateArgs.push_back(Name);
|
||||||
}
|
}
|
||||||
|
|
||||||
void addTemplateArg(StringRef Name) {
|
|
||||||
addTemplateArg(StringInit::get(Name));
|
|
||||||
}
|
|
||||||
|
|
||||||
void addValue(const RecordVal &RV) {
|
void addValue(const RecordVal &RV) {
|
||||||
assert(getValue(RV.getNameInit()) == nullptr && "Value already added!");
|
assert(getValue(RV.getNameInit()) == nullptr && "Value already added!");
|
||||||
Values.push_back(RV);
|
Values.push_back(RV);
|
||||||
|
@ -81,15 +81,11 @@ public:
|
|||||||
/// \p ExportLists contains for each Module the set of globals (GUID) that will
|
/// \p ExportLists contains for each Module the set of globals (GUID) that will
|
||||||
/// be imported by another module, or referenced by such a function. I.e. this
|
/// be imported by another module, or referenced by such a function. I.e. this
|
||||||
/// is the set of globals that need to be promoted/renamed appropriately.
|
/// is the set of globals that need to be promoted/renamed appropriately.
|
||||||
///
|
|
||||||
/// \p DeadSymbols (optional) contains a list of GUID that are deemed "dead" and
|
|
||||||
/// will be ignored for the purpose of importing.
|
|
||||||
void ComputeCrossModuleImport(
|
void ComputeCrossModuleImport(
|
||||||
const ModuleSummaryIndex &Index,
|
const ModuleSummaryIndex &Index,
|
||||||
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
|
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
|
||||||
StringMap<FunctionImporter::ImportMapTy> &ImportLists,
|
StringMap<FunctionImporter::ImportMapTy> &ImportLists,
|
||||||
StringMap<FunctionImporter::ExportSetTy> &ExportLists,
|
StringMap<FunctionImporter::ExportSetTy> &ExportLists);
|
||||||
const DenseSet<GlobalValue::GUID> *DeadSymbols = nullptr);
|
|
||||||
|
|
||||||
/// Compute all the imports for the given module using the Index.
|
/// Compute all the imports for the given module using the Index.
|
||||||
///
|
///
|
||||||
@ -102,9 +98,9 @@ void ComputeCrossModuleImportForModule(
|
|||||||
/// Compute all the symbols that are "dead": i.e these that can't be reached
|
/// Compute all the symbols that are "dead": i.e these that can't be reached
|
||||||
/// in the graph from any of the given symbols listed in
|
/// in the graph from any of the given symbols listed in
|
||||||
/// \p GUIDPreservedSymbols.
|
/// \p GUIDPreservedSymbols.
|
||||||
DenseSet<GlobalValue::GUID>
|
void computeDeadSymbols(
|
||||||
computeDeadSymbols(const ModuleSummaryIndex &Index,
|
ModuleSummaryIndex &Index,
|
||||||
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols);
|
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols);
|
||||||
|
|
||||||
/// Compute the set of summaries needed for a ThinLTO backend compilation of
|
/// Compute the set of summaries needed for a ThinLTO backend compilation of
|
||||||
/// \p ModulePath.
|
/// \p ModulePath.
|
||||||
|
@ -177,6 +177,7 @@ struct SanitizerCoverageOptions {
|
|||||||
bool Use8bitCounters = false;
|
bool Use8bitCounters = false;
|
||||||
bool TracePC = false;
|
bool TracePC = false;
|
||||||
bool TracePCGuard = false;
|
bool TracePCGuard = false;
|
||||||
|
bool Inline8bitCounters = false;
|
||||||
bool NoPrune = false;
|
bool NoPrune = false;
|
||||||
|
|
||||||
SanitizerCoverageOptions() = default;
|
SanitizerCoverageOptions() = default;
|
||||||
|
@ -36,6 +36,7 @@ class BasicBlock;
|
|||||||
class BlockFrequencyInfo;
|
class BlockFrequencyInfo;
|
||||||
class CallInst;
|
class CallInst;
|
||||||
class CallGraph;
|
class CallGraph;
|
||||||
|
class DebugInfoFinder;
|
||||||
class DominatorTree;
|
class DominatorTree;
|
||||||
class Function;
|
class Function;
|
||||||
class Instruction;
|
class Instruction;
|
||||||
@ -110,7 +111,8 @@ struct ClonedCodeInfo {
|
|||||||
///
|
///
|
||||||
BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
|
BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
|
||||||
const Twine &NameSuffix = "", Function *F = nullptr,
|
const Twine &NameSuffix = "", Function *F = nullptr,
|
||||||
ClonedCodeInfo *CodeInfo = nullptr);
|
ClonedCodeInfo *CodeInfo = nullptr,
|
||||||
|
DebugInfoFinder *DIFinder = nullptr);
|
||||||
|
|
||||||
/// CloneFunction - Return a copy of the specified function and add it to that
|
/// CloneFunction - Return a copy of the specified function and add it to that
|
||||||
/// function's module. Also, any references specified in the VMap are changed
|
/// function's module. Also, any references specified in the VMap are changed
|
||||||
|
@ -1170,7 +1170,9 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
|||||||
const DataLayout &DL,
|
const DataLayout &DL,
|
||||||
const TargetLibraryInfo *TLI) {
|
const TargetLibraryInfo *TLI) {
|
||||||
// fold: icmp (inttoptr x), null -> icmp x, 0
|
// fold: icmp (inttoptr x), null -> icmp x, 0
|
||||||
|
// fold: icmp null, (inttoptr x) -> icmp 0, x
|
||||||
// fold: icmp (ptrtoint x), 0 -> icmp x, null
|
// fold: icmp (ptrtoint x), 0 -> icmp x, null
|
||||||
|
// fold: icmp 0, (ptrtoint x) -> icmp null, x
|
||||||
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
|
// fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
|
||||||
// fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
|
// fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
|
||||||
//
|
//
|
||||||
@ -1240,6 +1242,11 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
|||||||
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
|
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
|
||||||
return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
|
return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
|
||||||
}
|
}
|
||||||
|
} else if (isa<ConstantExpr>(Ops1)) {
|
||||||
|
// If RHS is a constant expression, but the left side isn't, swap the
|
||||||
|
// operands and try again.
|
||||||
|
Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
|
||||||
|
return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
|
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
|
||||||
|
@ -43,7 +43,7 @@ static cl::opt<unsigned>
|
|||||||
// The percent threshold for the direct-call target (this call site vs the
|
// The percent threshold for the direct-call target (this call site vs the
|
||||||
// total call count) for it to be considered as the promotion target.
|
// total call count) for it to be considered as the promotion target.
|
||||||
static cl::opt<unsigned>
|
static cl::opt<unsigned>
|
||||||
ICPPercentThreshold("icp-percent-threshold", cl::init(33), cl::Hidden,
|
ICPPercentThreshold("icp-percent-threshold", cl::init(30), cl::Hidden,
|
||||||
cl::ZeroOrMore,
|
cl::ZeroOrMore,
|
||||||
cl::desc("The percentage threshold for the promotion"));
|
cl::desc("The percentage threshold for the promotion"));
|
||||||
|
|
||||||
|
@ -54,11 +54,6 @@ static cl::opt<int>
|
|||||||
cl::init(45),
|
cl::init(45),
|
||||||
cl::desc("Threshold for inlining cold callsites"));
|
cl::desc("Threshold for inlining cold callsites"));
|
||||||
|
|
||||||
static cl::opt<bool>
|
|
||||||
EnableGenericSwitchCost("inline-generic-switch-cost", cl::Hidden,
|
|
||||||
cl::init(false),
|
|
||||||
cl::desc("Enable generic switch cost model"));
|
|
||||||
|
|
||||||
// We introduce this threshold to help performance of instrumentation based
|
// We introduce this threshold to help performance of instrumentation based
|
||||||
// PGO before we actually hook up inliner with analysis passes such as BPI and
|
// PGO before we actually hook up inliner with analysis passes such as BPI and
|
||||||
// BFI.
|
// BFI.
|
||||||
@ -1015,83 +1010,68 @@ bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
|
|||||||
if (isa<ConstantInt>(V))
|
if (isa<ConstantInt>(V))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (EnableGenericSwitchCost) {
|
// Assume the most general case where the swith is lowered into
|
||||||
// Assume the most general case where the swith is lowered into
|
// either a jump table, bit test, or a balanced binary tree consisting of
|
||||||
// either a jump table, bit test, or a balanced binary tree consisting of
|
// case clusters without merging adjacent clusters with the same
|
||||||
// case clusters without merging adjacent clusters with the same
|
// destination. We do not consider the switches that are lowered with a mix
|
||||||
// destination. We do not consider the switches that are lowered with a mix
|
// of jump table/bit test/binary search tree. The cost of the switch is
|
||||||
// of jump table/bit test/binary search tree. The cost of the switch is
|
// proportional to the size of the tree or the size of jump table range.
|
||||||
// proportional to the size of the tree or the size of jump table range.
|
//
|
||||||
|
|
||||||
// Exit early for a large switch, assuming one case needs at least one
|
|
||||||
// instruction.
|
|
||||||
// FIXME: This is not true for a bit test, but ignore such case for now to
|
|
||||||
// save compile-time.
|
|
||||||
int64_t CostLowerBound =
|
|
||||||
std::min((int64_t)INT_MAX,
|
|
||||||
(int64_t)SI.getNumCases() * InlineConstants::InstrCost + Cost);
|
|
||||||
|
|
||||||
if (CostLowerBound > Threshold) {
|
|
||||||
Cost = CostLowerBound;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned JumpTableSize = 0;
|
|
||||||
unsigned NumCaseCluster =
|
|
||||||
TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize);
|
|
||||||
|
|
||||||
// If suitable for a jump table, consider the cost for the table size and
|
|
||||||
// branch to destination.
|
|
||||||
if (JumpTableSize) {
|
|
||||||
int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
|
|
||||||
4 * InlineConstants::InstrCost;
|
|
||||||
Cost = std::min((int64_t)INT_MAX, JTCost + Cost);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Considering forming a binary search, we should find the number of nodes
|
|
||||||
// which is same as the number of comparisons when lowered. For a given
|
|
||||||
// number of clusters, n, we can define a recursive function, f(n), to find
|
|
||||||
// the number of nodes in the tree. The recursion is :
|
|
||||||
// f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
|
|
||||||
// and f(n) = n, when n <= 3.
|
|
||||||
// This will lead a binary tree where the leaf should be either f(2) or f(3)
|
|
||||||
// when n > 3. So, the number of comparisons from leaves should be n, while
|
|
||||||
// the number of non-leaf should be :
|
|
||||||
// 2^(log2(n) - 1) - 1
|
|
||||||
// = 2^log2(n) * 2^-1 - 1
|
|
||||||
// = n / 2 - 1.
|
|
||||||
// Considering comparisons from leaf and non-leaf nodes, we can estimate the
|
|
||||||
// number of comparisons in a simple closed form :
|
|
||||||
// n + n / 2 - 1 = n * 3 / 2 - 1
|
|
||||||
if (NumCaseCluster <= 3) {
|
|
||||||
// Suppose a comparison includes one compare and one conditional branch.
|
|
||||||
Cost += NumCaseCluster * 2 * InlineConstants::InstrCost;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
int64_t ExpectedNumberOfCompare = 3 * (uint64_t)NumCaseCluster / 2 - 1;
|
|
||||||
uint64_t SwitchCost =
|
|
||||||
ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
|
|
||||||
Cost = std::min((uint64_t)INT_MAX, SwitchCost + Cost);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use a simple switch cost model where we accumulate a cost proportional to
|
|
||||||
// the number of distinct successor blocks. This fan-out in the CFG cannot
|
|
||||||
// be represented for free even if we can represent the core switch as a
|
|
||||||
// jumptable that takes a single instruction.
|
|
||||||
///
|
|
||||||
// NB: We convert large switches which are just used to initialize large phi
|
// NB: We convert large switches which are just used to initialize large phi
|
||||||
// nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
|
// nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
|
||||||
// inlining those. It will prevent inlining in cases where the optimization
|
// inlining those. It will prevent inlining in cases where the optimization
|
||||||
// does not (yet) fire.
|
// does not (yet) fire.
|
||||||
SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
|
|
||||||
SuccessorBlocks.insert(SI.getDefaultDest());
|
// Exit early for a large switch, assuming one case needs at least one
|
||||||
for (auto Case : SI.cases())
|
// instruction.
|
||||||
SuccessorBlocks.insert(Case.getCaseSuccessor());
|
// FIXME: This is not true for a bit test, but ignore such case for now to
|
||||||
// Add cost corresponding to the number of distinct destinations. The first
|
// save compile-time.
|
||||||
// we model as free because of fallthrough.
|
int64_t CostLowerBound =
|
||||||
Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
|
std::min((int64_t)INT_MAX,
|
||||||
|
(int64_t)SI.getNumCases() * InlineConstants::InstrCost + Cost);
|
||||||
|
|
||||||
|
if (CostLowerBound > Threshold) {
|
||||||
|
Cost = CostLowerBound;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned JumpTableSize = 0;
|
||||||
|
unsigned NumCaseCluster =
|
||||||
|
TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize);
|
||||||
|
|
||||||
|
// If suitable for a jump table, consider the cost for the table size and
|
||||||
|
// branch to destination.
|
||||||
|
if (JumpTableSize) {
|
||||||
|
int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
|
||||||
|
4 * InlineConstants::InstrCost;
|
||||||
|
Cost = std::min((int64_t)INT_MAX, JTCost + Cost);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Considering forming a binary search, we should find the number of nodes
|
||||||
|
// which is same as the number of comparisons when lowered. For a given
|
||||||
|
// number of clusters, n, we can define a recursive function, f(n), to find
|
||||||
|
// the number of nodes in the tree. The recursion is :
|
||||||
|
// f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
|
||||||
|
// and f(n) = n, when n <= 3.
|
||||||
|
// This will lead a binary tree where the leaf should be either f(2) or f(3)
|
||||||
|
// when n > 3. So, the number of comparisons from leaves should be n, while
|
||||||
|
// the number of non-leaf should be :
|
||||||
|
// 2^(log2(n) - 1) - 1
|
||||||
|
// = 2^log2(n) * 2^-1 - 1
|
||||||
|
// = n / 2 - 1.
|
||||||
|
// Considering comparisons from leaf and non-leaf nodes, we can estimate the
|
||||||
|
// number of comparisons in a simple closed form :
|
||||||
|
// n + n / 2 - 1 = n * 3 / 2 - 1
|
||||||
|
if (NumCaseCluster <= 3) {
|
||||||
|
// Suppose a comparison includes one compare and one conditional branch.
|
||||||
|
Cost += NumCaseCluster * 2 * InlineConstants::InstrCost;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
int64_t ExpectedNumberOfCompare = 3 * (uint64_t)NumCaseCluster / 2 - 1;
|
||||||
|
uint64_t SwitchCost =
|
||||||
|
ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
|
||||||
|
Cost = std::min((uint64_t)INT_MAX, SwitchCost + Cost);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -662,13 +662,13 @@ namespace {
|
|||||||
bool solveBlockValuePHINode(LVILatticeVal &BBLV, PHINode *PN, BasicBlock *BB);
|
bool solveBlockValuePHINode(LVILatticeVal &BBLV, PHINode *PN, BasicBlock *BB);
|
||||||
bool solveBlockValueSelect(LVILatticeVal &BBLV, SelectInst *S,
|
bool solveBlockValueSelect(LVILatticeVal &BBLV, SelectInst *S,
|
||||||
BasicBlock *BB);
|
BasicBlock *BB);
|
||||||
bool solveBlockValueBinaryOp(LVILatticeVal &BBLV, Instruction *BBI,
|
bool solveBlockValueBinaryOp(LVILatticeVal &BBLV, BinaryOperator *BBI,
|
||||||
BasicBlock *BB);
|
BasicBlock *BB);
|
||||||
bool solveBlockValueCast(LVILatticeVal &BBLV, Instruction *BBI,
|
bool solveBlockValueCast(LVILatticeVal &BBLV, CastInst *CI,
|
||||||
BasicBlock *BB);
|
BasicBlock *BB);
|
||||||
void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
|
void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
|
||||||
LVILatticeVal &BBLV,
|
LVILatticeVal &BBLV,
|
||||||
Instruction *BBI);
|
Instruction *BBI);
|
||||||
|
|
||||||
void solve();
|
void solve();
|
||||||
|
|
||||||
@ -849,12 +849,12 @@ bool LazyValueInfoImpl::solveBlockValueImpl(LVILatticeVal &Res,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (BBI->getType()->isIntegerTy()) {
|
if (BBI->getType()->isIntegerTy()) {
|
||||||
if (isa<CastInst>(BBI))
|
if (auto *CI = dyn_cast<CastInst>(BBI))
|
||||||
return solveBlockValueCast(Res, BBI, BB);
|
return solveBlockValueCast(Res, CI, BB);
|
||||||
|
|
||||||
BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
|
BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
|
||||||
if (BO && isa<ConstantInt>(BO->getOperand(1)))
|
if (BO && isa<ConstantInt>(BO->getOperand(1)))
|
||||||
return solveBlockValueBinaryOp(Res, BBI, BB);
|
return solveBlockValueBinaryOp(Res, BO, BB);
|
||||||
}
|
}
|
||||||
|
|
||||||
DEBUG(dbgs() << " compute BB '" << BB->getName()
|
DEBUG(dbgs() << " compute BB '" << BB->getName()
|
||||||
@ -1168,9 +1168,9 @@ bool LazyValueInfoImpl::solveBlockValueSelect(LVILatticeVal &BBLV,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool LazyValueInfoImpl::solveBlockValueCast(LVILatticeVal &BBLV,
|
bool LazyValueInfoImpl::solveBlockValueCast(LVILatticeVal &BBLV,
|
||||||
Instruction *BBI,
|
CastInst *CI,
|
||||||
BasicBlock *BB) {
|
BasicBlock *BB) {
|
||||||
if (!BBI->getOperand(0)->getType()->isSized()) {
|
if (!CI->getOperand(0)->getType()->isSized()) {
|
||||||
// Without knowing how wide the input is, we can't analyze it in any useful
|
// Without knowing how wide the input is, we can't analyze it in any useful
|
||||||
// way.
|
// way.
|
||||||
BBLV = LVILatticeVal::getOverdefined();
|
BBLV = LVILatticeVal::getOverdefined();
|
||||||
@ -1180,7 +1180,7 @@ bool LazyValueInfoImpl::solveBlockValueCast(LVILatticeVal &BBLV,
|
|||||||
// Filter out casts we don't know how to reason about before attempting to
|
// Filter out casts we don't know how to reason about before attempting to
|
||||||
// recurse on our operand. This can cut a long search short if we know we're
|
// recurse on our operand. This can cut a long search short if we know we're
|
||||||
// not going to be able to get any useful information anways.
|
// not going to be able to get any useful information anways.
|
||||||
switch (BBI->getOpcode()) {
|
switch (CI->getOpcode()) {
|
||||||
case Instruction::Trunc:
|
case Instruction::Trunc:
|
||||||
case Instruction::SExt:
|
case Instruction::SExt:
|
||||||
case Instruction::ZExt:
|
case Instruction::ZExt:
|
||||||
@ -1197,44 +1197,43 @@ bool LazyValueInfoImpl::solveBlockValueCast(LVILatticeVal &BBLV,
|
|||||||
// Figure out the range of the LHS. If that fails, we still apply the
|
// Figure out the range of the LHS. If that fails, we still apply the
|
||||||
// transfer rule on the full set since we may be able to locally infer
|
// transfer rule on the full set since we may be able to locally infer
|
||||||
// interesting facts.
|
// interesting facts.
|
||||||
if (!hasBlockValue(BBI->getOperand(0), BB))
|
if (!hasBlockValue(CI->getOperand(0), BB))
|
||||||
if (pushBlockValue(std::make_pair(BB, BBI->getOperand(0))))
|
if (pushBlockValue(std::make_pair(BB, CI->getOperand(0))))
|
||||||
// More work to do before applying this transfer rule.
|
// More work to do before applying this transfer rule.
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
const unsigned OperandBitWidth =
|
const unsigned OperandBitWidth =
|
||||||
DL.getTypeSizeInBits(BBI->getOperand(0)->getType());
|
DL.getTypeSizeInBits(CI->getOperand(0)->getType());
|
||||||
ConstantRange LHSRange = ConstantRange(OperandBitWidth);
|
ConstantRange LHSRange = ConstantRange(OperandBitWidth);
|
||||||
if (hasBlockValue(BBI->getOperand(0), BB)) {
|
if (hasBlockValue(CI->getOperand(0), BB)) {
|
||||||
LVILatticeVal LHSVal = getBlockValue(BBI->getOperand(0), BB);
|
LVILatticeVal LHSVal = getBlockValue(CI->getOperand(0), BB);
|
||||||
intersectAssumeOrGuardBlockValueConstantRange(BBI->getOperand(0), LHSVal,
|
intersectAssumeOrGuardBlockValueConstantRange(CI->getOperand(0), LHSVal,
|
||||||
BBI);
|
CI);
|
||||||
if (LHSVal.isConstantRange())
|
if (LHSVal.isConstantRange())
|
||||||
LHSRange = LHSVal.getConstantRange();
|
LHSRange = LHSVal.getConstantRange();
|
||||||
}
|
}
|
||||||
|
|
||||||
const unsigned ResultBitWidth =
|
const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
|
||||||
cast<IntegerType>(BBI->getType())->getBitWidth();
|
|
||||||
|
|
||||||
// NOTE: We're currently limited by the set of operations that ConstantRange
|
// NOTE: We're currently limited by the set of operations that ConstantRange
|
||||||
// can evaluate symbolically. Enhancing that set will allows us to analyze
|
// can evaluate symbolically. Enhancing that set will allows us to analyze
|
||||||
// more definitions.
|
// more definitions.
|
||||||
auto CastOp = (Instruction::CastOps) BBI->getOpcode();
|
BBLV = LVILatticeVal::getRange(LHSRange.castOp(CI->getOpcode(),
|
||||||
BBLV = LVILatticeVal::getRange(LHSRange.castOp(CastOp, ResultBitWidth));
|
ResultBitWidth));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LazyValueInfoImpl::solveBlockValueBinaryOp(LVILatticeVal &BBLV,
|
bool LazyValueInfoImpl::solveBlockValueBinaryOp(LVILatticeVal &BBLV,
|
||||||
Instruction *BBI,
|
BinaryOperator *BO,
|
||||||
BasicBlock *BB) {
|
BasicBlock *BB) {
|
||||||
|
|
||||||
assert(BBI->getOperand(0)->getType()->isSized() &&
|
assert(BO->getOperand(0)->getType()->isSized() &&
|
||||||
"all operands to binary operators are sized");
|
"all operands to binary operators are sized");
|
||||||
|
|
||||||
// Filter out operators we don't know how to reason about before attempting to
|
// Filter out operators we don't know how to reason about before attempting to
|
||||||
// recurse on our operand(s). This can cut a long search short if we know
|
// recurse on our operand(s). This can cut a long search short if we know
|
||||||
// we're not going to be able to get any useful information anways.
|
// we're not going to be able to get any useful information anyways.
|
||||||
switch (BBI->getOpcode()) {
|
switch (BO->getOpcode()) {
|
||||||
case Instruction::Add:
|
case Instruction::Add:
|
||||||
case Instruction::Sub:
|
case Instruction::Sub:
|
||||||
case Instruction::Mul:
|
case Instruction::Mul:
|
||||||
@ -1256,29 +1255,29 @@ bool LazyValueInfoImpl::solveBlockValueBinaryOp(LVILatticeVal &BBLV,
|
|||||||
// Figure out the range of the LHS. If that fails, use a conservative range,
|
// Figure out the range of the LHS. If that fails, use a conservative range,
|
||||||
// but apply the transfer rule anyways. This lets us pick up facts from
|
// but apply the transfer rule anyways. This lets us pick up facts from
|
||||||
// expressions like "and i32 (call i32 @foo()), 32"
|
// expressions like "and i32 (call i32 @foo()), 32"
|
||||||
if (!hasBlockValue(BBI->getOperand(0), BB))
|
if (!hasBlockValue(BO->getOperand(0), BB))
|
||||||
if (pushBlockValue(std::make_pair(BB, BBI->getOperand(0))))
|
if (pushBlockValue(std::make_pair(BB, BO->getOperand(0))))
|
||||||
// More work to do before applying this transfer rule.
|
// More work to do before applying this transfer rule.
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
const unsigned OperandBitWidth =
|
const unsigned OperandBitWidth =
|
||||||
DL.getTypeSizeInBits(BBI->getOperand(0)->getType());
|
DL.getTypeSizeInBits(BO->getOperand(0)->getType());
|
||||||
ConstantRange LHSRange = ConstantRange(OperandBitWidth);
|
ConstantRange LHSRange = ConstantRange(OperandBitWidth);
|
||||||
if (hasBlockValue(BBI->getOperand(0), BB)) {
|
if (hasBlockValue(BO->getOperand(0), BB)) {
|
||||||
LVILatticeVal LHSVal = getBlockValue(BBI->getOperand(0), BB);
|
LVILatticeVal LHSVal = getBlockValue(BO->getOperand(0), BB);
|
||||||
intersectAssumeOrGuardBlockValueConstantRange(BBI->getOperand(0), LHSVal,
|
intersectAssumeOrGuardBlockValueConstantRange(BO->getOperand(0), LHSVal,
|
||||||
BBI);
|
BO);
|
||||||
if (LHSVal.isConstantRange())
|
if (LHSVal.isConstantRange())
|
||||||
LHSRange = LHSVal.getConstantRange();
|
LHSRange = LHSVal.getConstantRange();
|
||||||
}
|
}
|
||||||
|
|
||||||
ConstantInt *RHS = cast<ConstantInt>(BBI->getOperand(1));
|
ConstantInt *RHS = cast<ConstantInt>(BO->getOperand(1));
|
||||||
ConstantRange RHSRange = ConstantRange(RHS->getValue());
|
ConstantRange RHSRange = ConstantRange(RHS->getValue());
|
||||||
|
|
||||||
// NOTE: We're currently limited by the set of operations that ConstantRange
|
// NOTE: We're currently limited by the set of operations that ConstantRange
|
||||||
// can evaluate symbolically. Enhancing that set will allows us to analyze
|
// can evaluate symbolically. Enhancing that set will allows us to analyze
|
||||||
// more definitions.
|
// more definitions.
|
||||||
auto BinOp = (Instruction::BinaryOps) BBI->getOpcode();
|
Instruction::BinaryOps BinOp = BO->getOpcode();
|
||||||
BBLV = LVILatticeVal::getRange(LHSRange.binaryOp(BinOp, RHSRange));
|
BBLV = LVILatticeVal::getRange(LHSRange.binaryOp(BinOp, RHSRange));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -275,7 +275,7 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
|
|||||||
// FIXME: refactor this to use the same code that inliner is using.
|
// FIXME: refactor this to use the same code that inliner is using.
|
||||||
F.isVarArg();
|
F.isVarArg();
|
||||||
GlobalValueSummary::GVFlags Flags(F.getLinkage(), NotEligibleForImport,
|
GlobalValueSummary::GVFlags Flags(F.getLinkage(), NotEligibleForImport,
|
||||||
/* LiveRoot = */ false);
|
/* Live = */ false);
|
||||||
auto FuncSummary = llvm::make_unique<FunctionSummary>(
|
auto FuncSummary = llvm::make_unique<FunctionSummary>(
|
||||||
Flags, NumInsts, RefEdges.takeVector(), CallGraphEdges.takeVector(),
|
Flags, NumInsts, RefEdges.takeVector(), CallGraphEdges.takeVector(),
|
||||||
TypeTests.takeVector(), TypeTestAssumeVCalls.takeVector(),
|
TypeTests.takeVector(), TypeTestAssumeVCalls.takeVector(),
|
||||||
@ -295,7 +295,7 @@ computeVariableSummary(ModuleSummaryIndex &Index, const GlobalVariable &V,
|
|||||||
findRefEdges(Index, &V, RefEdges, Visited);
|
findRefEdges(Index, &V, RefEdges, Visited);
|
||||||
bool NonRenamableLocal = isNonRenamableLocal(V);
|
bool NonRenamableLocal = isNonRenamableLocal(V);
|
||||||
GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
|
GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
|
||||||
/* LiveRoot = */ false);
|
/* Live = */ false);
|
||||||
auto GVarSummary =
|
auto GVarSummary =
|
||||||
llvm::make_unique<GlobalVarSummary>(Flags, RefEdges.takeVector());
|
llvm::make_unique<GlobalVarSummary>(Flags, RefEdges.takeVector());
|
||||||
if (NonRenamableLocal)
|
if (NonRenamableLocal)
|
||||||
@ -308,7 +308,7 @@ computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
|
|||||||
DenseSet<GlobalValue::GUID> &CantBePromoted) {
|
DenseSet<GlobalValue::GUID> &CantBePromoted) {
|
||||||
bool NonRenamableLocal = isNonRenamableLocal(A);
|
bool NonRenamableLocal = isNonRenamableLocal(A);
|
||||||
GlobalValueSummary::GVFlags Flags(A.getLinkage(), NonRenamableLocal,
|
GlobalValueSummary::GVFlags Flags(A.getLinkage(), NonRenamableLocal,
|
||||||
/* LiveRoot = */ false);
|
/* Live = */ false);
|
||||||
auto AS = llvm::make_unique<AliasSummary>(Flags, ArrayRef<ValueInfo>{});
|
auto AS = llvm::make_unique<AliasSummary>(Flags, ArrayRef<ValueInfo>{});
|
||||||
auto *Aliasee = A.getBaseObject();
|
auto *Aliasee = A.getBaseObject();
|
||||||
auto *AliaseeSummary = Index.getGlobalValueSummary(*Aliasee);
|
auto *AliaseeSummary = Index.getGlobalValueSummary(*Aliasee);
|
||||||
@ -323,7 +323,7 @@ computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
|
|||||||
static void setLiveRoot(ModuleSummaryIndex &Index, StringRef Name) {
|
static void setLiveRoot(ModuleSummaryIndex &Index, StringRef Name) {
|
||||||
if (ValueInfo VI = Index.getValueInfo(GlobalValue::getGUID(Name)))
|
if (ValueInfo VI = Index.getValueInfo(GlobalValue::getGUID(Name)))
|
||||||
for (auto &Summary : VI.getSummaryList())
|
for (auto &Summary : VI.getSummaryList())
|
||||||
Summary->setLiveRoot();
|
Summary->setLive(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
|
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
|
||||||
@ -423,8 +423,8 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
|
|||||||
return;
|
return;
|
||||||
assert(GV->isDeclaration() && "Def in module asm already has definition");
|
assert(GV->isDeclaration() && "Def in module asm already has definition");
|
||||||
GlobalValueSummary::GVFlags GVFlags(GlobalValue::InternalLinkage,
|
GlobalValueSummary::GVFlags GVFlags(GlobalValue::InternalLinkage,
|
||||||
/* NotEligibleToImport */ true,
|
/* NotEligibleToImport = */ true,
|
||||||
/* LiveRoot */ true);
|
/* Live = */ true);
|
||||||
CantBePromoted.insert(GlobalValue::getGUID(Name));
|
CantBePromoted.insert(GlobalValue::getGUID(Name));
|
||||||
// Create the appropriate summary type.
|
// Create the appropriate summary type.
|
||||||
if (isa<Function>(GV)) {
|
if (isa<Function>(GV)) {
|
||||||
|
@ -55,7 +55,7 @@ bool OrderedBasicBlock::comesBefore(const Instruction *A,
|
|||||||
assert(II != IE && "Instruction not found?");
|
assert(II != IE && "Instruction not found?");
|
||||||
assert((Inst == A || Inst == B) && "Should find A or B");
|
assert((Inst == A || Inst == B) && "Should find A or B");
|
||||||
LastInstFound = II;
|
LastInstFound = II;
|
||||||
return Inst == A;
|
return Inst != B;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// \brief Find out whether \p A dominates \p B, meaning whether \p A
|
/// \brief Find out whether \p A dominates \p B, meaning whether \p A
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
#include "llvm/Analysis/RegionPass.h"
|
#include "llvm/Analysis/RegionPass.h"
|
||||||
#include "llvm/Analysis/RegionIterator.h"
|
#include "llvm/Analysis/RegionIterator.h"
|
||||||
|
#include "llvm/IR/OptBisect.h"
|
||||||
#include "llvm/Support/Debug.h"
|
#include "llvm/Support/Debug.h"
|
||||||
#include "llvm/Support/Timer.h"
|
#include "llvm/Support/Timer.h"
|
||||||
#include "llvm/Support/raw_ostream.h"
|
#include "llvm/Support/raw_ostream.h"
|
||||||
@ -280,3 +281,18 @@ Pass *RegionPass::createPrinterPass(raw_ostream &O,
|
|||||||
const std::string &Banner) const {
|
const std::string &Banner) const {
|
||||||
return new PrintRegionPass(Banner, O);
|
return new PrintRegionPass(Banner, O);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool RegionPass::skipRegion(Region &R) const {
|
||||||
|
Function &F = *R.getEntry()->getParent();
|
||||||
|
if (!F.getContext().getOptBisect().shouldRunPass(this, R))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (F.hasFnAttribute(Attribute::OptimizeNone)) {
|
||||||
|
// Report this only once per function.
|
||||||
|
if (R.getEntry() == &F.getEntryBlock())
|
||||||
|
DEBUG(dbgs() << "Skipping pass '" << getPassName()
|
||||||
|
<< "' on function " << F.getName() << "\n");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
@ -865,11 +865,11 @@ static GlobalValueSummary::GVFlags getDecodedGVSummaryFlags(uint64_t RawFlags,
|
|||||||
auto Linkage = GlobalValue::LinkageTypes(RawFlags & 0xF); // 4 bits
|
auto Linkage = GlobalValue::LinkageTypes(RawFlags & 0xF); // 4 bits
|
||||||
RawFlags = RawFlags >> 4;
|
RawFlags = RawFlags >> 4;
|
||||||
bool NotEligibleToImport = (RawFlags & 0x1) || Version < 3;
|
bool NotEligibleToImport = (RawFlags & 0x1) || Version < 3;
|
||||||
// The LiveRoot flag wasn't introduced until version 3. For dead stripping
|
// The Live flag wasn't introduced until version 3. For dead stripping
|
||||||
// to work correctly on earlier versions, we must conservatively treat all
|
// to work correctly on earlier versions, we must conservatively treat all
|
||||||
// values as live.
|
// values as live.
|
||||||
bool LiveRoot = (RawFlags & 0x2) || Version < 3;
|
bool Live = (RawFlags & 0x2) || Version < 3;
|
||||||
return GlobalValueSummary::GVFlags(Linkage, NotEligibleToImport, LiveRoot);
|
return GlobalValueSummary::GVFlags(Linkage, NotEligibleToImport, Live);
|
||||||
}
|
}
|
||||||
|
|
||||||
static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) {
|
static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) {
|
||||||
|
@ -351,7 +351,8 @@ public:
|
|||||||
/// Calls the callback for each value GUID and summary to be written to
|
/// Calls the callback for each value GUID and summary to be written to
|
||||||
/// bitcode. This hides the details of whether they are being pulled from the
|
/// bitcode. This hides the details of whether they are being pulled from the
|
||||||
/// entire index or just those in a provided ModuleToSummariesForIndex map.
|
/// entire index or just those in a provided ModuleToSummariesForIndex map.
|
||||||
void forEachSummary(std::function<void(GVInfo)> Callback) {
|
template<typename Functor>
|
||||||
|
void forEachSummary(Functor Callback) {
|
||||||
if (ModuleToSummariesForIndex) {
|
if (ModuleToSummariesForIndex) {
|
||||||
for (auto &M : *ModuleToSummariesForIndex)
|
for (auto &M : *ModuleToSummariesForIndex)
|
||||||
for (auto &Summary : M.second)
|
for (auto &Summary : M.second)
|
||||||
@ -363,6 +364,29 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Calls the callback for each entry in the modulePaths StringMap that
|
||||||
|
/// should be written to the module path string table. This hides the details
|
||||||
|
/// of whether they are being pulled from the entire index or just those in a
|
||||||
|
/// provided ModuleToSummariesForIndex map.
|
||||||
|
template <typename Functor> void forEachModule(Functor Callback) {
|
||||||
|
if (ModuleToSummariesForIndex) {
|
||||||
|
for (const auto &M : *ModuleToSummariesForIndex) {
|
||||||
|
const auto &MPI = Index.modulePaths().find(M.first);
|
||||||
|
if (MPI == Index.modulePaths().end()) {
|
||||||
|
// This should only happen if the bitcode file was empty, in which
|
||||||
|
// case we shouldn't be importing (the ModuleToSummariesForIndex
|
||||||
|
// would only include the module we are writing and index for).
|
||||||
|
assert(ModuleToSummariesForIndex->size() == 1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Callback(*MPI);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (const auto &MPSE : Index.modulePaths())
|
||||||
|
Callback(MPSE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Main entry point for writing a combined index to bitcode.
|
/// Main entry point for writing a combined index to bitcode.
|
||||||
void write();
|
void write();
|
||||||
|
|
||||||
@ -370,14 +394,6 @@ private:
|
|||||||
void writeModStrings();
|
void writeModStrings();
|
||||||
void writeCombinedGlobalValueSummary();
|
void writeCombinedGlobalValueSummary();
|
||||||
|
|
||||||
/// Indicates whether the provided \p ModulePath should be written into
|
|
||||||
/// the module string table, e.g. if full index written or if it is in
|
|
||||||
/// the provided subset.
|
|
||||||
bool doIncludeModule(StringRef ModulePath) {
|
|
||||||
return !ModuleToSummariesForIndex ||
|
|
||||||
ModuleToSummariesForIndex->count(ModulePath);
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<unsigned> getValueId(GlobalValue::GUID ValGUID) {
|
Optional<unsigned> getValueId(GlobalValue::GUID ValGUID) {
|
||||||
auto VMI = GUIDToValueIdMap.find(ValGUID);
|
auto VMI = GUIDToValueIdMap.find(ValGUID);
|
||||||
if (VMI == GUIDToValueIdMap.end())
|
if (VMI == GUIDToValueIdMap.end())
|
||||||
@ -864,7 +880,7 @@ static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags) {
|
|||||||
uint64_t RawFlags = 0;
|
uint64_t RawFlags = 0;
|
||||||
|
|
||||||
RawFlags |= Flags.NotEligibleToImport; // bool
|
RawFlags |= Flags.NotEligibleToImport; // bool
|
||||||
RawFlags |= (Flags.LiveRoot << 1);
|
RawFlags |= (Flags.Live << 1);
|
||||||
// Linkage don't need to be remapped at that time for the summary. Any future
|
// Linkage don't need to be remapped at that time for the summary. Any future
|
||||||
// change to the getEncodedLinkage() function will need to be taken into
|
// change to the getEncodedLinkage() function will need to be taken into
|
||||||
// account here as well.
|
// account here as well.
|
||||||
@ -968,19 +984,18 @@ void ModuleBitcodeWriter::writeValueSymbolTableForwardDecl() {
|
|||||||
enum StringEncoding { SE_Char6, SE_Fixed7, SE_Fixed8 };
|
enum StringEncoding { SE_Char6, SE_Fixed7, SE_Fixed8 };
|
||||||
|
|
||||||
/// Determine the encoding to use for the given string name and length.
|
/// Determine the encoding to use for the given string name and length.
|
||||||
static StringEncoding getStringEncoding(const char *Str, unsigned StrLen) {
|
static StringEncoding getStringEncoding(StringRef Str) {
|
||||||
bool isChar6 = true;
|
bool isChar6 = true;
|
||||||
for (const char *C = Str, *E = C + StrLen; C != E; ++C) {
|
for (char C : Str) {
|
||||||
if (isChar6)
|
if (isChar6)
|
||||||
isChar6 = BitCodeAbbrevOp::isChar6(*C);
|
isChar6 = BitCodeAbbrevOp::isChar6(C);
|
||||||
if ((unsigned char)*C & 128)
|
if ((unsigned char)C & 128)
|
||||||
// don't bother scanning the rest.
|
// don't bother scanning the rest.
|
||||||
return SE_Fixed8;
|
return SE_Fixed8;
|
||||||
}
|
}
|
||||||
if (isChar6)
|
if (isChar6)
|
||||||
return SE_Char6;
|
return SE_Char6;
|
||||||
else
|
return SE_Fixed7;
|
||||||
return SE_Fixed7;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Emit top-level description of module, including target triple, inline asm,
|
/// Emit top-level description of module, including target triple, inline asm,
|
||||||
@ -1073,8 +1088,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
|
|||||||
SmallVector<unsigned, 64> Vals;
|
SmallVector<unsigned, 64> Vals;
|
||||||
// Emit the module's source file name.
|
// Emit the module's source file name.
|
||||||
{
|
{
|
||||||
StringEncoding Bits = getStringEncoding(M.getSourceFileName().data(),
|
StringEncoding Bits = getStringEncoding(M.getSourceFileName());
|
||||||
M.getSourceFileName().size());
|
|
||||||
BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8);
|
BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8);
|
||||||
if (Bits == SE_Char6)
|
if (Bits == SE_Char6)
|
||||||
AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6);
|
AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6);
|
||||||
@ -2790,8 +2804,7 @@ void ModuleBitcodeWriter::writeFunctionLevelValueSymbolTable(
|
|||||||
|
|
||||||
for (const ValueName &Name : VST) {
|
for (const ValueName &Name : VST) {
|
||||||
// Figure out the encoding to use for the name.
|
// Figure out the encoding to use for the name.
|
||||||
StringEncoding Bits =
|
StringEncoding Bits = getStringEncoding(Name.getKey());
|
||||||
getStringEncoding(Name.getKeyData(), Name.getKeyLength());
|
|
||||||
|
|
||||||
unsigned AbbrevToUse = VST_ENTRY_8_ABBREV;
|
unsigned AbbrevToUse = VST_ENTRY_8_ABBREV;
|
||||||
NameVals.push_back(VE.getValueID(Name.getValue()));
|
NameVals.push_back(VE.getValueID(Name.getValue()));
|
||||||
@ -3149,41 +3162,33 @@ void IndexBitcodeWriter::writeModStrings() {
|
|||||||
unsigned AbbrevHash = Stream.EmitAbbrev(std::move(Abbv));
|
unsigned AbbrevHash = Stream.EmitAbbrev(std::move(Abbv));
|
||||||
|
|
||||||
SmallVector<unsigned, 64> Vals;
|
SmallVector<unsigned, 64> Vals;
|
||||||
for (const auto &MPSE : Index.modulePaths()) {
|
forEachModule(
|
||||||
if (!doIncludeModule(MPSE.getKey()))
|
[&](const StringMapEntry<std::pair<uint64_t, ModuleHash>> &MPSE) {
|
||||||
continue;
|
StringRef Key = MPSE.getKey();
|
||||||
StringEncoding Bits =
|
const auto &Value = MPSE.getValue();
|
||||||
getStringEncoding(MPSE.getKey().data(), MPSE.getKey().size());
|
StringEncoding Bits = getStringEncoding(Key);
|
||||||
unsigned AbbrevToUse = Abbrev8Bit;
|
unsigned AbbrevToUse = Abbrev8Bit;
|
||||||
if (Bits == SE_Char6)
|
if (Bits == SE_Char6)
|
||||||
AbbrevToUse = Abbrev6Bit;
|
AbbrevToUse = Abbrev6Bit;
|
||||||
else if (Bits == SE_Fixed7)
|
else if (Bits == SE_Fixed7)
|
||||||
AbbrevToUse = Abbrev7Bit;
|
AbbrevToUse = Abbrev7Bit;
|
||||||
|
|
||||||
Vals.push_back(MPSE.getValue().first);
|
Vals.push_back(Value.first);
|
||||||
|
Vals.append(Key.begin(), Key.end());
|
||||||
|
|
||||||
for (const auto P : MPSE.getKey())
|
// Emit the finished record.
|
||||||
Vals.push_back((unsigned char)P);
|
Stream.EmitRecord(bitc::MST_CODE_ENTRY, Vals, AbbrevToUse);
|
||||||
|
|
||||||
// Emit the finished record.
|
// Emit an optional hash for the module now
|
||||||
Stream.EmitRecord(bitc::MST_CODE_ENTRY, Vals, AbbrevToUse);
|
const auto &Hash = Value.second;
|
||||||
|
if (llvm::any_of(Hash, [](uint32_t H) { return H; })) {
|
||||||
|
Vals.assign(Hash.begin(), Hash.end());
|
||||||
|
// Emit the hash record.
|
||||||
|
Stream.EmitRecord(bitc::MST_CODE_HASH, Vals, AbbrevHash);
|
||||||
|
}
|
||||||
|
|
||||||
Vals.clear();
|
Vals.clear();
|
||||||
// Emit an optional hash for the module now
|
});
|
||||||
auto &Hash = MPSE.getValue().second;
|
|
||||||
bool AllZero = true; // Detect if the hash is empty, and do not generate it
|
|
||||||
for (auto Val : Hash) {
|
|
||||||
if (Val)
|
|
||||||
AllZero = false;
|
|
||||||
Vals.push_back(Val);
|
|
||||||
}
|
|
||||||
if (!AllZero) {
|
|
||||||
// Emit the hash record.
|
|
||||||
Stream.EmitRecord(bitc::MST_CODE_HASH, Vals, AbbrevHash);
|
|
||||||
}
|
|
||||||
|
|
||||||
Vals.clear();
|
|
||||||
}
|
|
||||||
Stream.ExitBlock();
|
Stream.ExitBlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,6 +194,10 @@ void llvm::calculateDbgValueHistory(const MachineFunction *MF,
|
|||||||
// some variables.
|
// some variables.
|
||||||
for (const MachineOperand &MO : MI.operands()) {
|
for (const MachineOperand &MO : MI.operands()) {
|
||||||
if (MO.isReg() && MO.isDef() && MO.getReg()) {
|
if (MO.isReg() && MO.isDef() && MO.getReg()) {
|
||||||
|
// Ignore call instructions that claim to clobber SP. The AArch64
|
||||||
|
// backend does this for aggregate function arguments.
|
||||||
|
if (MI.isCall() && MO.getReg() == SP)
|
||||||
|
continue;
|
||||||
// If this is a virtual register, only clobber it since it doesn't
|
// If this is a virtual register, only clobber it since it doesn't
|
||||||
// have aliases.
|
// have aliases.
|
||||||
if (TRI->isVirtualRegister(MO.getReg()))
|
if (TRI->isVirtualRegister(MO.getReg()))
|
||||||
|
@ -77,6 +77,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
|
|||||||
initializePostRASchedulerPass(Registry);
|
initializePostRASchedulerPass(Registry);
|
||||||
initializePreISelIntrinsicLoweringLegacyPassPass(Registry);
|
initializePreISelIntrinsicLoweringLegacyPassPass(Registry);
|
||||||
initializeProcessImplicitDefsPass(Registry);
|
initializeProcessImplicitDefsPass(Registry);
|
||||||
|
initializeRABasicPass(Registry);
|
||||||
initializeRAGreedyPass(Registry);
|
initializeRAGreedyPass(Registry);
|
||||||
initializeRegisterCoalescerPass(Registry);
|
initializeRegisterCoalescerPass(Registry);
|
||||||
initializeRenameIndependentSubregsPass(Registry);
|
initializeRenameIndependentSubregsPass(Registry);
|
||||||
|
@ -556,6 +556,10 @@ bool GlobalMerge::doInitialization(Module &M) {
|
|||||||
if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasSection())
|
if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasSection())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
// It's not safe to merge globals that may be preempted
|
||||||
|
if (TM && !TM->shouldAssumeDSOLocal(M, &GV))
|
||||||
|
continue;
|
||||||
|
|
||||||
if (!(MergeExternalGlobals && GV.hasExternalLinkage()) &&
|
if (!(MergeExternalGlobals && GV.hasExternalLinkage()) &&
|
||||||
!GV.hasInternalLinkage())
|
!GV.hasInternalLinkage())
|
||||||
continue;
|
continue;
|
||||||
|
@ -198,13 +198,12 @@ void LivePhysRegs::addLiveOutsNoPristines(const MachineBasicBlock &MBB) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void LivePhysRegs::addLiveOuts(const MachineBasicBlock &MBB) {
|
void LivePhysRegs::addLiveOuts(const MachineBasicBlock &MBB) {
|
||||||
|
const MachineFunction &MF = *MBB.getParent();
|
||||||
if (!MBB.succ_empty()) {
|
if (!MBB.succ_empty()) {
|
||||||
const MachineFunction &MF = *MBB.getParent();
|
|
||||||
addPristines(*this, MF);
|
addPristines(*this, MF);
|
||||||
addLiveOutsNoPristines(MBB);
|
addLiveOutsNoPristines(MBB);
|
||||||
} else if (MBB.isReturnBlock()) {
|
} else if (MBB.isReturnBlock()) {
|
||||||
// For the return block: Add all callee saved registers.
|
// For the return block: Add all callee saved registers.
|
||||||
const MachineFunction &MF = *MBB.getParent();
|
|
||||||
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||||
if (MFI.isCalleeSavedInfoValid())
|
if (MFI.isCalleeSavedInfoValid())
|
||||||
addCalleeSavedRegs(*this, MF);
|
addCalleeSavedRegs(*this, MF);
|
||||||
|
@ -12,11 +12,13 @@
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
#include "llvm/CodeGen/LiveRegUnits.h"
|
#include "llvm/CodeGen/LiveRegUnits.h"
|
||||||
|
|
||||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||||
#include "llvm/CodeGen/MachineFunction.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
#include "llvm/CodeGen/MachineInstrBundle.h"
|
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||||
#include "llvm/CodeGen/MachineOperand.h"
|
#include "llvm/CodeGen/MachineOperand.h"
|
||||||
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||||
#include "llvm/MC/MCRegisterInfo.h"
|
#include "llvm/MC/MCRegisterInfo.h"
|
||||||
#include "llvm/Target/TargetRegisterInfo.h"
|
#include "llvm/Target/TargetRegisterInfo.h"
|
||||||
|
|
||||||
@ -81,46 +83,50 @@ void LiveRegUnits::accumulateBackward(const MachineInstr &MI) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Add live-in registers of basic block \p MBB to \p LiveUnits.
|
/// Add live-in registers of basic block \p MBB to \p LiveUnits.
|
||||||
static void addLiveIns(LiveRegUnits &LiveUnits, const MachineBasicBlock &MBB) {
|
static void addBlockLiveIns(LiveRegUnits &LiveUnits,
|
||||||
|
const MachineBasicBlock &MBB) {
|
||||||
for (const auto &LI : MBB.liveins())
|
for (const auto &LI : MBB.liveins())
|
||||||
LiveUnits.addRegMasked(LI.PhysReg, LI.LaneMask);
|
LiveUnits.addRegMasked(LI.PhysReg, LI.LaneMask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void addLiveOuts(LiveRegUnits &LiveUnits, const MachineBasicBlock &MBB) {
|
/// Adds all callee saved registers to \p LiveUnits.
|
||||||
// To get the live-outs we simply merge the live-ins of all successors.
|
static void addCalleeSavedRegs(LiveRegUnits &LiveUnits,
|
||||||
for (const MachineBasicBlock *Succ : MBB.successors())
|
const MachineFunction &MF) {
|
||||||
addLiveIns(LiveUnits, *Succ);
|
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||||
|
for (const MCPhysReg *CSR = MRI.getCalleeSavedRegs(); CSR && *CSR; ++CSR)
|
||||||
|
LiveUnits.addReg(*CSR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add pristine registers to the given \p LiveUnits. This function removes
|
/// Adds pristine registers to the given \p LiveUnits. Pristine registers are
|
||||||
/// actually saved callee save registers when \p InPrologueEpilogue is false.
|
/// callee saved registers that are unused in the function.
|
||||||
static void removeSavedRegs(LiveRegUnits &LiveUnits, const MachineFunction &MF,
|
static void addPristines(LiveRegUnits &LiveUnits, const MachineFunction &MF) {
|
||||||
const MachineFrameInfo &MFI,
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||||
const TargetRegisterInfo &TRI) {
|
if (!MFI.isCalleeSavedInfoValid())
|
||||||
|
return;
|
||||||
|
/// Add all callee saved regs, then remove the ones that are saved+restored.
|
||||||
|
addCalleeSavedRegs(LiveUnits, MF);
|
||||||
|
/// Remove the ones that are not saved/restored; they are pristine.
|
||||||
for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo())
|
for (const CalleeSavedInfo &Info : MFI.getCalleeSavedInfo())
|
||||||
LiveUnits.removeReg(Info.getReg());
|
LiveUnits.removeReg(Info.getReg());
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiveRegUnits::addLiveOuts(const MachineBasicBlock &MBB) {
|
void LiveRegUnits::addLiveOuts(const MachineBasicBlock &MBB) {
|
||||||
const MachineFunction &MF = *MBB.getParent();
|
const MachineFunction &MF = *MBB.getParent();
|
||||||
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
if (!MBB.succ_empty()) {
|
||||||
if (MFI.isCalleeSavedInfoValid()) {
|
addPristines(*this, MF);
|
||||||
for (const MCPhysReg *I = TRI->getCalleeSavedRegs(&MF); *I; ++I)
|
// To get the live-outs we simply merge the live-ins of all successors.
|
||||||
addReg(*I);
|
for (const MachineBasicBlock *Succ : MBB.successors())
|
||||||
if (!MBB.isReturnBlock())
|
addBlockLiveIns(*this, *Succ);
|
||||||
removeSavedRegs(*this, MF, MFI, *TRI);
|
} else if (MBB.isReturnBlock()) {
|
||||||
|
// For the return block: Add all callee saved registers.
|
||||||
|
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
||||||
|
if (MFI.isCalleeSavedInfoValid())
|
||||||
|
addCalleeSavedRegs(*this, MF);
|
||||||
}
|
}
|
||||||
::addLiveOuts(*this, MBB);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void LiveRegUnits::addLiveIns(const MachineBasicBlock &MBB) {
|
void LiveRegUnits::addLiveIns(const MachineBasicBlock &MBB) {
|
||||||
const MachineFunction &MF = *MBB.getParent();
|
const MachineFunction &MF = *MBB.getParent();
|
||||||
const MachineFrameInfo &MFI = MF.getFrameInfo();
|
addPristines(*this, MF);
|
||||||
if (MFI.isCalleeSavedInfoValid()) {
|
addBlockLiveIns(*this, MBB);
|
||||||
for (const MCPhysReg *I = TRI->getCalleeSavedRegs(&MF); *I; ++I)
|
|
||||||
addReg(*I);
|
|
||||||
if (&MBB != &MF.front())
|
|
||||||
removeSavedRegs(*this, MF, MFI, *TRI);
|
|
||||||
}
|
|
||||||
::addLiveIns(*this, MBB);
|
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,19 @@
|
|||||||
#include "llvm/CodeGen/MachineRegionInfo.h"
|
//===- lib/Codegen/MachineRegionInfo.cpp ----------------------------------===//
|
||||||
|
//
|
||||||
|
// The LLVM Compiler Infrastructure
|
||||||
|
//
|
||||||
|
// This file is distributed under the University of Illinois Open Source
|
||||||
|
// License. See LICENSE.TXT for details.
|
||||||
|
//
|
||||||
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
#include "llvm/ADT/Statistic.h"
|
#include "llvm/ADT/Statistic.h"
|
||||||
#include "llvm/Analysis/RegionInfoImpl.h"
|
#include "llvm/Analysis/RegionInfoImpl.h"
|
||||||
#include "llvm/CodeGen/MachinePostDominators.h"
|
#include "llvm/CodeGen/MachinePostDominators.h"
|
||||||
|
#include "llvm/CodeGen/MachineRegionInfo.h"
|
||||||
|
#include "llvm/Pass.h"
|
||||||
|
#include "llvm/Support/Compiler.h"
|
||||||
|
#include "llvm/Support/Debug.h"
|
||||||
|
|
||||||
#define DEBUG_TYPE "machine-region-info"
|
#define DEBUG_TYPE "machine-region-info"
|
||||||
|
|
||||||
@ -11,36 +23,29 @@ STATISTIC(numMachineRegions, "The # of machine regions");
|
|||||||
STATISTIC(numMachineSimpleRegions, "The # of simple machine regions");
|
STATISTIC(numMachineSimpleRegions, "The # of simple machine regions");
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
template class RegionBase<RegionTraits<MachineFunction>>;
|
template class RegionBase<RegionTraits<MachineFunction>>;
|
||||||
template class RegionNodeBase<RegionTraits<MachineFunction>>;
|
template class RegionNodeBase<RegionTraits<MachineFunction>>;
|
||||||
template class RegionInfoBase<RegionTraits<MachineFunction>>;
|
template class RegionInfoBase<RegionTraits<MachineFunction>>;
|
||||||
}
|
|
||||||
|
} // end namespace llvm
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// MachineRegion implementation
|
// MachineRegion implementation
|
||||||
//
|
|
||||||
|
|
||||||
MachineRegion::MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
|
MachineRegion::MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
|
||||||
MachineRegionInfo* RI,
|
MachineRegionInfo* RI,
|
||||||
MachineDominatorTree *DT, MachineRegion *Parent) :
|
MachineDominatorTree *DT, MachineRegion *Parent) :
|
||||||
RegionBase<RegionTraits<MachineFunction>>(Entry, Exit, RI, DT, Parent) {
|
RegionBase<RegionTraits<MachineFunction>>(Entry, Exit, RI, DT, Parent) {}
|
||||||
|
|
||||||
}
|
MachineRegion::~MachineRegion() = default;
|
||||||
|
|
||||||
MachineRegion::~MachineRegion() { }
|
|
||||||
|
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
// MachineRegionInfo implementation
|
// MachineRegionInfo implementation
|
||||||
//
|
|
||||||
|
|
||||||
MachineRegionInfo::MachineRegionInfo() :
|
MachineRegionInfo::MachineRegionInfo() = default;
|
||||||
RegionInfoBase<RegionTraits<MachineFunction>>() {
|
|
||||||
|
|
||||||
}
|
MachineRegionInfo::~MachineRegionInfo() = default;
|
||||||
|
|
||||||
MachineRegionInfo::~MachineRegionInfo() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void MachineRegionInfo::updateStatistics(MachineRegion *R) {
|
void MachineRegionInfo::updateStatistics(MachineRegion *R) {
|
||||||
++numMachineRegions;
|
++numMachineRegions;
|
||||||
@ -73,9 +78,7 @@ MachineRegionInfoPass::MachineRegionInfoPass() : MachineFunctionPass(ID) {
|
|||||||
initializeMachineRegionInfoPassPass(*PassRegistry::getPassRegistry());
|
initializeMachineRegionInfoPassPass(*PassRegistry::getPassRegistry());
|
||||||
}
|
}
|
||||||
|
|
||||||
MachineRegionInfoPass::~MachineRegionInfoPass() {
|
MachineRegionInfoPass::~MachineRegionInfoPass() = default;
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MachineRegionInfoPass::runOnMachineFunction(MachineFunction &F) {
|
bool MachineRegionInfoPass::runOnMachineFunction(MachineFunction &F) {
|
||||||
releaseMemory();
|
releaseMemory();
|
||||||
@ -137,8 +140,9 @@ INITIALIZE_PASS_END(MachineRegionInfoPass, DEBUG_TYPE,
|
|||||||
// the link time optimization.
|
// the link time optimization.
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
FunctionPass *createMachineRegionInfoPass() {
|
|
||||||
return new MachineRegionInfoPass();
|
FunctionPass *createMachineRegionInfoPass() {
|
||||||
}
|
return new MachineRegionInfoPass();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // end namespace llvm
|
||||||
|
@ -36,6 +36,7 @@
|
|||||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||||
#include "llvm/CodeGen/MachineMemOperand.h"
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
||||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||||
|
#include "llvm/CodeGen/StackMaps.h"
|
||||||
#include "llvm/IR/BasicBlock.h"
|
#include "llvm/IR/BasicBlock.h"
|
||||||
#include "llvm/IR/InlineAsm.h"
|
#include "llvm/IR/InlineAsm.h"
|
||||||
#include "llvm/IR/Instructions.h"
|
#include "llvm/IR/Instructions.h"
|
||||||
@ -909,17 +910,43 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generic loads and stores must have a single MachineMemOperand
|
|
||||||
// describing that access.
|
|
||||||
if ((MI->getOpcode() == TargetOpcode::G_LOAD ||
|
|
||||||
MI->getOpcode() == TargetOpcode::G_STORE) &&
|
|
||||||
!MI->hasOneMemOperand())
|
|
||||||
report("Generic instruction accessing memory must have one mem operand",
|
|
||||||
MI);
|
|
||||||
|
|
||||||
StringRef ErrorInfo;
|
StringRef ErrorInfo;
|
||||||
if (!TII->verifyInstruction(*MI, ErrorInfo))
|
if (!TII->verifyInstruction(*MI, ErrorInfo))
|
||||||
report(ErrorInfo.data(), MI);
|
report(ErrorInfo.data(), MI);
|
||||||
|
|
||||||
|
// Verify properties of various specific instruction types
|
||||||
|
switch(MI->getOpcode()) {
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
case TargetOpcode::G_LOAD:
|
||||||
|
case TargetOpcode::G_STORE:
|
||||||
|
// Generic loads and stores must have a single MachineMemOperand
|
||||||
|
// describing that access.
|
||||||
|
if (!MI->hasOneMemOperand())
|
||||||
|
report("Generic instruction accessing memory must have one mem operand",
|
||||||
|
MI);
|
||||||
|
break;
|
||||||
|
case TargetOpcode::STATEPOINT:
|
||||||
|
if (!MI->getOperand(StatepointOpers::IDPos).isImm() ||
|
||||||
|
!MI->getOperand(StatepointOpers::NBytesPos).isImm() ||
|
||||||
|
!MI->getOperand(StatepointOpers::NCallArgsPos).isImm())
|
||||||
|
report("meta operands to STATEPOINT not constant!", MI);
|
||||||
|
break;
|
||||||
|
|
||||||
|
auto VerifyStackMapConstant = [&](unsigned Offset) {
|
||||||
|
if (!MI->getOperand(Offset).isImm() ||
|
||||||
|
MI->getOperand(Offset).getImm() != StackMaps::ConstantOp ||
|
||||||
|
!MI->getOperand(Offset + 1).isImm())
|
||||||
|
report("stack map constant to STATEPOINT not well formed!", MI);
|
||||||
|
};
|
||||||
|
const unsigned VarStart = StatepointOpers(MI).getVarIdx();
|
||||||
|
VerifyStackMapConstant(VarStart + StatepointOpers::CCOffset);
|
||||||
|
VerifyStackMapConstant(VarStart + StatepointOpers::FlagsOffset);
|
||||||
|
VerifyStackMapConstant(VarStart + StatepointOpers::NumDeoptOperandsOffset);
|
||||||
|
|
||||||
|
// TODO: verify we have properly encoded deopt arguments
|
||||||
|
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -54,8 +54,6 @@ static void doSpillCalleeSavedRegs(MachineFunction &MF, RegScavenger *RS,
|
|||||||
const MBBVector &SaveBlocks,
|
const MBBVector &SaveBlocks,
|
||||||
const MBBVector &RestoreBlocks);
|
const MBBVector &RestoreBlocks);
|
||||||
|
|
||||||
static void doScavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger *RS);
|
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
class PEI : public MachineFunctionPass {
|
class PEI : public MachineFunctionPass {
|
||||||
public:
|
public:
|
||||||
@ -84,7 +82,7 @@ private:
|
|||||||
const MBBVector &SaveBlocks,
|
const MBBVector &SaveBlocks,
|
||||||
const MBBVector &RestoreBlocks)>
|
const MBBVector &RestoreBlocks)>
|
||||||
SpillCalleeSavedRegisters;
|
SpillCalleeSavedRegisters;
|
||||||
std::function<void(MachineFunction &MF, RegScavenger *RS)>
|
std::function<void(MachineFunction &MF, RegScavenger &RS)>
|
||||||
ScavengeFrameVirtualRegs;
|
ScavengeFrameVirtualRegs;
|
||||||
|
|
||||||
bool UsesCalleeSaves = false;
|
bool UsesCalleeSaves = false;
|
||||||
@ -142,7 +140,6 @@ MachineFunctionPass *llvm::createPrologEpilogInserterPass() {
|
|||||||
return new PEI();
|
return new PEI();
|
||||||
}
|
}
|
||||||
|
|
||||||
STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
|
|
||||||
STATISTIC(NumBytesStackSpace,
|
STATISTIC(NumBytesStackSpace,
|
||||||
"Number of bytes used for stack in all functions");
|
"Number of bytes used for stack in all functions");
|
||||||
|
|
||||||
@ -168,10 +165,10 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
|
|||||||
SpillCalleeSavedRegisters = [](MachineFunction &, RegScavenger *,
|
SpillCalleeSavedRegisters = [](MachineFunction &, RegScavenger *,
|
||||||
unsigned &, unsigned &, const MBBVector &,
|
unsigned &, unsigned &, const MBBVector &,
|
||||||
const MBBVector &) {};
|
const MBBVector &) {};
|
||||||
ScavengeFrameVirtualRegs = [](MachineFunction &, RegScavenger *) {};
|
ScavengeFrameVirtualRegs = [](MachineFunction &, RegScavenger &) {};
|
||||||
} else {
|
} else {
|
||||||
SpillCalleeSavedRegisters = doSpillCalleeSavedRegs;
|
SpillCalleeSavedRegisters = doSpillCalleeSavedRegs;
|
||||||
ScavengeFrameVirtualRegs = doScavengeFrameVirtualRegs;
|
ScavengeFrameVirtualRegs = scavengeFrameVirtualRegs;
|
||||||
UsesCalleeSaves = true;
|
UsesCalleeSaves = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -222,7 +219,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
|
|||||||
// post-pass, scavenge the virtual registers that frame index elimination
|
// post-pass, scavenge the virtual registers that frame index elimination
|
||||||
// inserted.
|
// inserted.
|
||||||
if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging) {
|
if (TRI->requiresRegisterScavenging(Fn) && FrameIndexVirtualScavenging) {
|
||||||
ScavengeFrameVirtualRegs(Fn, RS);
|
ScavengeFrameVirtualRegs(Fn, *RS);
|
||||||
|
|
||||||
// Clear any vregs created by virtual scavenging.
|
// Clear any vregs created by virtual scavenging.
|
||||||
Fn.getRegInfo().clearVirtRegs();
|
Fn.getRegInfo().clearVirtRegs();
|
||||||
@ -1153,92 +1150,3 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
|
|||||||
RS->forward(MI);
|
RS->forward(MI);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// doScavengeFrameVirtualRegs - Replace all frame index virtual registers
|
|
||||||
/// with physical registers. Use the register scavenger to find an
|
|
||||||
/// appropriate register to use.
|
|
||||||
///
|
|
||||||
/// FIXME: Iterating over the instruction stream is unnecessary. We can simply
|
|
||||||
/// iterate over the vreg use list, which at this point only contains machine
|
|
||||||
/// operands for which eliminateFrameIndex need a new scratch reg.
|
|
||||||
static void
|
|
||||||
doScavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger *RS) {
|
|
||||||
// Run through the instructions and find any virtual registers.
|
|
||||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
||||||
for (MachineBasicBlock &MBB : MF) {
|
|
||||||
RS->enterBasicBlock(MBB);
|
|
||||||
|
|
||||||
int SPAdj = 0;
|
|
||||||
|
|
||||||
// The instruction stream may change in the loop, so check MBB.end()
|
|
||||||
// directly.
|
|
||||||
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
|
|
||||||
// We might end up here again with a NULL iterator if we scavenged a
|
|
||||||
// register for which we inserted spill code for definition by what was
|
|
||||||
// originally the first instruction in MBB.
|
|
||||||
if (I == MachineBasicBlock::iterator(nullptr))
|
|
||||||
I = MBB.begin();
|
|
||||||
|
|
||||||
const MachineInstr &MI = *I;
|
|
||||||
MachineBasicBlock::iterator J = std::next(I);
|
|
||||||
MachineBasicBlock::iterator P =
|
|
||||||
I == MBB.begin() ? MachineBasicBlock::iterator(nullptr)
|
|
||||||
: std::prev(I);
|
|
||||||
|
|
||||||
// RS should process this instruction before we might scavenge at this
|
|
||||||
// location. This is because we might be replacing a virtual register
|
|
||||||
// defined by this instruction, and if so, registers killed by this
|
|
||||||
// instruction are available, and defined registers are not.
|
|
||||||
RS->forward(I);
|
|
||||||
|
|
||||||
for (const MachineOperand &MO : MI.operands()) {
|
|
||||||
if (!MO.isReg())
|
|
||||||
continue;
|
|
||||||
unsigned Reg = MO.getReg();
|
|
||||||
if (!TargetRegisterInfo::isVirtualRegister(Reg))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
// When we first encounter a new virtual register, it
|
|
||||||
// must be a definition.
|
|
||||||
assert(MO.isDef() && "frame index virtual missing def!");
|
|
||||||
// Scavenge a new scratch register
|
|
||||||
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
|
|
||||||
unsigned ScratchReg = RS->scavengeRegister(RC, J, SPAdj);
|
|
||||||
|
|
||||||
++NumScavengedRegs;
|
|
||||||
|
|
||||||
// Replace this reference to the virtual register with the
|
|
||||||
// scratch register.
|
|
||||||
assert(ScratchReg && "Missing scratch register!");
|
|
||||||
MRI.replaceRegWith(Reg, ScratchReg);
|
|
||||||
|
|
||||||
// Because this instruction was processed by the RS before this
|
|
||||||
// register was allocated, make sure that the RS now records the
|
|
||||||
// register as being used.
|
|
||||||
RS->setRegUsed(ScratchReg);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the scavenger needed to use one of its spill slots, the
|
|
||||||
// spill code will have been inserted in between I and J. This is a
|
|
||||||
// problem because we need the spill code before I: Move I to just
|
|
||||||
// prior to J.
|
|
||||||
if (I != std::prev(J)) {
|
|
||||||
MBB.splice(J, &MBB, I);
|
|
||||||
|
|
||||||
// Before we move I, we need to prepare the RS to visit I again.
|
|
||||||
// Specifically, RS will assert if it sees uses of registers that
|
|
||||||
// it believes are undefined. Because we have already processed
|
|
||||||
// register kills in I, when it visits I again, it will believe that
|
|
||||||
// those registers are undefined. To avoid this situation, unprocess
|
|
||||||
// the instruction I.
|
|
||||||
assert(RS->getCurrentPosition() == I &&
|
|
||||||
"The register scavenger has an unexpected position");
|
|
||||||
I = P;
|
|
||||||
RS->unprocess(P);
|
|
||||||
} else
|
|
||||||
++I;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
|
|
||||||
}
|
|
||||||
|
@ -58,8 +58,9 @@ namespace {
|
|||||||
/// whenever a register is unavailable. This is not practical in production but
|
/// whenever a register is unavailable. This is not practical in production but
|
||||||
/// provides a useful baseline both for measuring other allocators and comparing
|
/// provides a useful baseline both for measuring other allocators and comparing
|
||||||
/// the speed of the basic algorithm against other styles of allocators.
|
/// the speed of the basic algorithm against other styles of allocators.
|
||||||
class RABasic : public MachineFunctionPass, public RegAllocBase
|
class RABasic : public MachineFunctionPass,
|
||||||
{
|
public RegAllocBase,
|
||||||
|
private LiveRangeEdit::Delegate {
|
||||||
// context
|
// context
|
||||||
MachineFunction *MF;
|
MachineFunction *MF;
|
||||||
|
|
||||||
@ -72,6 +73,9 @@ class RABasic : public MachineFunctionPass, public RegAllocBase
|
|||||||
// selectOrSplit().
|
// selectOrSplit().
|
||||||
BitVector UsableRegs;
|
BitVector UsableRegs;
|
||||||
|
|
||||||
|
bool LRE_CanEraseVirtReg(unsigned) override;
|
||||||
|
void LRE_WillShrinkVirtReg(unsigned) override;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RABasic();
|
RABasic();
|
||||||
|
|
||||||
@ -121,17 +125,46 @@ char RABasic::ID = 0;
|
|||||||
|
|
||||||
} // end anonymous namespace
|
} // end anonymous namespace
|
||||||
|
|
||||||
|
char &llvm::RABasicID = RABasic::ID;
|
||||||
|
|
||||||
|
INITIALIZE_PASS_BEGIN(RABasic, "regallocbasic", "Basic Register Allocator",
|
||||||
|
false, false)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(MachineScheduler)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(LiveStacks)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
|
||||||
|
INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix)
|
||||||
|
INITIALIZE_PASS_END(RABasic, "regallocbasic", "Basic Register Allocator", false,
|
||||||
|
false)
|
||||||
|
|
||||||
|
bool RABasic::LRE_CanEraseVirtReg(unsigned VirtReg) {
|
||||||
|
if (VRM->hasPhys(VirtReg)) {
|
||||||
|
LiveInterval &LI = LIS->getInterval(VirtReg);
|
||||||
|
Matrix->unassign(LI);
|
||||||
|
aboutToRemoveInterval(LI);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// Unassigned virtreg is probably in the priority queue.
|
||||||
|
// RegAllocBase will erase it after dequeueing.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RABasic::LRE_WillShrinkVirtReg(unsigned VirtReg) {
|
||||||
|
if (!VRM->hasPhys(VirtReg))
|
||||||
|
return;
|
||||||
|
|
||||||
|
// Register is assigned, put it back on the queue for reassignment.
|
||||||
|
LiveInterval &LI = LIS->getInterval(VirtReg);
|
||||||
|
Matrix->unassign(LI);
|
||||||
|
enqueue(&LI);
|
||||||
|
}
|
||||||
|
|
||||||
RABasic::RABasic(): MachineFunctionPass(ID) {
|
RABasic::RABasic(): MachineFunctionPass(ID) {
|
||||||
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
|
|
||||||
initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
|
void RABasic::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||||
@ -200,7 +233,7 @@ bool RABasic::spillInterferences(LiveInterval &VirtReg, unsigned PhysReg,
|
|||||||
Matrix->unassign(Spill);
|
Matrix->unassign(Spill);
|
||||||
|
|
||||||
// Spill the extracted interval.
|
// Spill the extracted interval.
|
||||||
LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM, nullptr, &DeadRemats);
|
LiveRangeEdit LRE(&Spill, SplitVRegs, *MF, *LIS, VRM, this, &DeadRemats);
|
||||||
spiller().spill(LRE);
|
spiller().spill(LRE);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@ -259,7 +292,7 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg,
|
|||||||
DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
|
DEBUG(dbgs() << "spilling: " << VirtReg << '\n');
|
||||||
if (!VirtReg.isSpillable())
|
if (!VirtReg.isSpillable())
|
||||||
return ~0u;
|
return ~0u;
|
||||||
LiveRangeEdit LRE(&VirtReg, SplitVRegs, *MF, *LIS, VRM, nullptr, &DeadRemats);
|
LiveRangeEdit LRE(&VirtReg, SplitVRegs, *MF, *LIS, VRM, this, &DeadRemats);
|
||||||
spiller().spill(LRE);
|
spiller().spill(LRE);
|
||||||
|
|
||||||
// The live virtual register requesting allocation was spilled, so tell
|
// The live virtual register requesting allocation was spilled, so tell
|
||||||
|
@ -49,9 +49,11 @@
|
|||||||
#include "llvm/CodeGen/MachineDominators.h"
|
#include "llvm/CodeGen/MachineDominators.h"
|
||||||
#include "llvm/CodeGen/MachineFunction.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||||
|
#include "llvm/CodeGen/MachineInstr.h"
|
||||||
#include "llvm/CodeGen/MachineLoopInfo.h"
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
||||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||||
#include "llvm/CodeGen/PBQP/Graph.h"
|
#include "llvm/CodeGen/PBQP/Graph.h"
|
||||||
|
#include "llvm/CodeGen/PBQP/Math.h"
|
||||||
#include "llvm/CodeGen/PBQP/Solution.h"
|
#include "llvm/CodeGen/PBQP/Solution.h"
|
||||||
#include "llvm/CodeGen/PBQPRAConstraint.h"
|
#include "llvm/CodeGen/PBQPRAConstraint.h"
|
||||||
#include "llvm/CodeGen/RegAllocPBQP.h"
|
#include "llvm/CodeGen/RegAllocPBQP.h"
|
||||||
@ -139,13 +141,13 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
typedef std::map<const LiveInterval*, unsigned> LI2NodeMap;
|
using LI2NodeMap = std::map<const LiveInterval *, unsigned>;
|
||||||
typedef std::vector<const LiveInterval*> Node2LIMap;
|
using Node2LIMap = std::vector<const LiveInterval *>;
|
||||||
typedef std::vector<unsigned> AllowedSet;
|
using AllowedSet = std::vector<unsigned>;
|
||||||
typedef std::vector<AllowedSet> AllowedSetMap;
|
using AllowedSetMap = std::vector<AllowedSet>;
|
||||||
typedef std::pair<unsigned, unsigned> RegPair;
|
using RegPair = std::pair<unsigned, unsigned>;
|
||||||
typedef std::map<RegPair, PBQP::PBQPNum> CoalesceMap;
|
using CoalesceMap = std::map<RegPair, PBQP::PBQPNum>;
|
||||||
typedef std::set<unsigned> RegSet;
|
using RegSet = std::set<unsigned>;
|
||||||
|
|
||||||
char *customPassID;
|
char *customPassID;
|
||||||
|
|
||||||
@ -212,12 +214,12 @@ public:
|
|||||||
/// @brief Add interference edges between overlapping vregs.
|
/// @brief Add interference edges between overlapping vregs.
|
||||||
class Interference : public PBQPRAConstraint {
|
class Interference : public PBQPRAConstraint {
|
||||||
private:
|
private:
|
||||||
typedef const PBQP::RegAlloc::AllowedRegVector* AllowedRegVecPtr;
|
using AllowedRegVecPtr = const PBQP::RegAlloc::AllowedRegVector *;
|
||||||
typedef std::pair<AllowedRegVecPtr, AllowedRegVecPtr> IKey;
|
using IKey = std::pair<AllowedRegVecPtr, AllowedRegVecPtr>;
|
||||||
typedef DenseMap<IKey, PBQPRAGraph::MatrixPtr> IMatrixCache;
|
using IMatrixCache = DenseMap<IKey, PBQPRAGraph::MatrixPtr>;
|
||||||
typedef DenseSet<IKey> DisjointAllowedRegsCache;
|
using DisjointAllowedRegsCache = DenseSet<IKey>;
|
||||||
typedef std::pair<PBQP::GraphBase::NodeId, PBQP::GraphBase::NodeId> IEdgeKey;
|
using IEdgeKey = std::pair<PBQP::GraphBase::NodeId, PBQP::GraphBase::NodeId>;
|
||||||
typedef DenseSet<IEdgeKey> IEdgeCache;
|
using IEdgeCache = DenseSet<IEdgeKey>;
|
||||||
|
|
||||||
bool haveDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
|
bool haveDisjointAllowedRegs(const PBQPRAGraph &G, PBQPRAGraph::NodeId NId,
|
||||||
PBQPRAGraph::NodeId MId,
|
PBQPRAGraph::NodeId MId,
|
||||||
@ -252,8 +254,8 @@ private:
|
|||||||
// for the fast interference graph construction algorithm. The last is there
|
// for the fast interference graph construction algorithm. The last is there
|
||||||
// to save us from looking up node ids via the VRegToNode map in the graph
|
// to save us from looking up node ids via the VRegToNode map in the graph
|
||||||
// metadata.
|
// metadata.
|
||||||
typedef std::tuple<LiveInterval*, size_t, PBQP::GraphBase::NodeId>
|
using IntervalInfo =
|
||||||
IntervalInfo;
|
std::tuple<LiveInterval*, size_t, PBQP::GraphBase::NodeId>;
|
||||||
|
|
||||||
static SlotIndex getStartPoint(const IntervalInfo &I) {
|
static SlotIndex getStartPoint(const IntervalInfo &I) {
|
||||||
return std::get<0>(I)->segments[std::get<1>(I)].start;
|
return std::get<0>(I)->segments[std::get<1>(I)].start;
|
||||||
@ -320,9 +322,10 @@ public:
|
|||||||
// Cache known disjoint allowed registers pairs
|
// Cache known disjoint allowed registers pairs
|
||||||
DisjointAllowedRegsCache D;
|
DisjointAllowedRegsCache D;
|
||||||
|
|
||||||
typedef std::set<IntervalInfo, decltype(&lowestEndPoint)> IntervalSet;
|
using IntervalSet = std::set<IntervalInfo, decltype(&lowestEndPoint)>;
|
||||||
typedef std::priority_queue<IntervalInfo, std::vector<IntervalInfo>,
|
using IntervalQueue =
|
||||||
decltype(&lowestStartPoint)> IntervalQueue;
|
std::priority_queue<IntervalInfo, std::vector<IntervalInfo>,
|
||||||
|
decltype(&lowestStartPoint)>;
|
||||||
IntervalSet Active(lowestEndPoint);
|
IntervalSet Active(lowestEndPoint);
|
||||||
IntervalQueue Inactive(lowestStartPoint);
|
IntervalQueue Inactive(lowestStartPoint);
|
||||||
|
|
||||||
@ -658,7 +661,6 @@ void RegAllocPBQP::spillVReg(unsigned VReg,
|
|||||||
SmallVectorImpl<unsigned> &NewIntervals,
|
SmallVectorImpl<unsigned> &NewIntervals,
|
||||||
MachineFunction &MF, LiveIntervals &LIS,
|
MachineFunction &MF, LiveIntervals &LIS,
|
||||||
VirtRegMap &VRM, Spiller &VRegSpiller) {
|
VirtRegMap &VRM, Spiller &VRegSpiller) {
|
||||||
|
|
||||||
VRegsToAlloc.erase(VReg);
|
VRegsToAlloc.erase(VReg);
|
||||||
LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM,
|
LiveRangeEdit LRE(&LIS.getInterval(VReg), NewIntervals, MF, LIS, &VRM,
|
||||||
nullptr, &DeadRemats);
|
nullptr, &DeadRemats);
|
||||||
|
@ -15,18 +15,23 @@
|
|||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
|
#include "llvm/CodeGen/RegisterScavenging.h"
|
||||||
|
|
||||||
#include "llvm/ADT/BitVector.h"
|
#include "llvm/ADT/BitVector.h"
|
||||||
#include "llvm/ADT/SmallVector.h"
|
#include "llvm/ADT/SmallVector.h"
|
||||||
#include "llvm/CodeGen/RegisterScavenging.h"
|
#include "llvm/ADT/Statistic.h"
|
||||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||||
#include "llvm/CodeGen/MachineFunction.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
#include "llvm/CodeGen/MachineInstr.h"
|
#include "llvm/CodeGen/MachineInstr.h"
|
||||||
#include "llvm/CodeGen/MachineOperand.h"
|
#include "llvm/CodeGen/MachineOperand.h"
|
||||||
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||||
#include "llvm/MC/MCRegisterInfo.h"
|
#include "llvm/MC/MCRegisterInfo.h"
|
||||||
|
#include "llvm/PassSupport.h"
|
||||||
#include "llvm/Support/Debug.h"
|
#include "llvm/Support/Debug.h"
|
||||||
#include "llvm/Support/ErrorHandling.h"
|
#include "llvm/Support/ErrorHandling.h"
|
||||||
#include "llvm/Support/raw_ostream.h"
|
#include "llvm/Support/raw_ostream.h"
|
||||||
|
#include "llvm/Target/TargetFrameLowering.h"
|
||||||
#include "llvm/Target/TargetInstrInfo.h"
|
#include "llvm/Target/TargetInstrInfo.h"
|
||||||
#include "llvm/Target/TargetRegisterInfo.h"
|
#include "llvm/Target/TargetRegisterInfo.h"
|
||||||
#include "llvm/Target/TargetSubtargetInfo.h"
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
||||||
@ -39,6 +44,8 @@ using namespace llvm;
|
|||||||
|
|
||||||
#define DEBUG_TYPE "reg-scavenging"
|
#define DEBUG_TYPE "reg-scavenging"
|
||||||
|
|
||||||
|
STATISTIC(NumScavengedRegs, "Number of frame index regs scavenged");
|
||||||
|
|
||||||
void RegScavenger::setRegUsed(unsigned Reg, LaneBitmask LaneMask) {
|
void RegScavenger::setRegUsed(unsigned Reg, LaneBitmask LaneMask) {
|
||||||
LiveUnits.addRegMasked(Reg, LaneMask);
|
LiveUnits.addRegMasked(Reg, LaneMask);
|
||||||
}
|
}
|
||||||
@ -469,3 +476,120 @@ unsigned RegScavenger::scavengeRegister(const TargetRegisterClass *RC,
|
|||||||
|
|
||||||
return SReg;
|
return SReg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void llvm::scavengeFrameVirtualRegs(MachineFunction &MF, RegScavenger &RS) {
|
||||||
|
// FIXME: Iterating over the instruction stream is unnecessary. We can simply
|
||||||
|
// iterate over the vreg use list, which at this point only contains machine
|
||||||
|
// operands for which eliminateFrameIndex need a new scratch reg.
|
||||||
|
|
||||||
|
// Run through the instructions and find any virtual registers.
|
||||||
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||||
|
for (MachineBasicBlock &MBB : MF) {
|
||||||
|
RS.enterBasicBlock(MBB);
|
||||||
|
|
||||||
|
int SPAdj = 0;
|
||||||
|
|
||||||
|
// The instruction stream may change in the loop, so check MBB.end()
|
||||||
|
// directly.
|
||||||
|
for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
|
||||||
|
// We might end up here again with a NULL iterator if we scavenged a
|
||||||
|
// register for which we inserted spill code for definition by what was
|
||||||
|
// originally the first instruction in MBB.
|
||||||
|
if (I == MachineBasicBlock::iterator(nullptr))
|
||||||
|
I = MBB.begin();
|
||||||
|
|
||||||
|
const MachineInstr &MI = *I;
|
||||||
|
MachineBasicBlock::iterator J = std::next(I);
|
||||||
|
MachineBasicBlock::iterator P =
|
||||||
|
I == MBB.begin() ? MachineBasicBlock::iterator(nullptr)
|
||||||
|
: std::prev(I);
|
||||||
|
|
||||||
|
// RS should process this instruction before we might scavenge at this
|
||||||
|
// location. This is because we might be replacing a virtual register
|
||||||
|
// defined by this instruction, and if so, registers killed by this
|
||||||
|
// instruction are available, and defined registers are not.
|
||||||
|
RS.forward(I);
|
||||||
|
|
||||||
|
for (const MachineOperand &MO : MI.operands()) {
|
||||||
|
if (!MO.isReg())
|
||||||
|
continue;
|
||||||
|
unsigned Reg = MO.getReg();
|
||||||
|
if (!TargetRegisterInfo::isVirtualRegister(Reg))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
// When we first encounter a new virtual register, it
|
||||||
|
// must be a definition.
|
||||||
|
assert(MO.isDef() && "frame index virtual missing def!");
|
||||||
|
// Scavenge a new scratch register
|
||||||
|
const TargetRegisterClass *RC = MRI.getRegClass(Reg);
|
||||||
|
unsigned ScratchReg = RS.scavengeRegister(RC, J, SPAdj);
|
||||||
|
|
||||||
|
++NumScavengedRegs;
|
||||||
|
|
||||||
|
// Replace this reference to the virtual register with the
|
||||||
|
// scratch register.
|
||||||
|
assert(ScratchReg && "Missing scratch register!");
|
||||||
|
MRI.replaceRegWith(Reg, ScratchReg);
|
||||||
|
|
||||||
|
// Because this instruction was processed by the RS before this
|
||||||
|
// register was allocated, make sure that the RS now records the
|
||||||
|
// register as being used.
|
||||||
|
RS.setRegUsed(ScratchReg);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the scavenger needed to use one of its spill slots, the
|
||||||
|
// spill code will have been inserted in between I and J. This is a
|
||||||
|
// problem because we need the spill code before I: Move I to just
|
||||||
|
// prior to J.
|
||||||
|
if (I != std::prev(J)) {
|
||||||
|
MBB.splice(J, &MBB, I);
|
||||||
|
|
||||||
|
// Before we move I, we need to prepare the RS to visit I again.
|
||||||
|
// Specifically, RS will assert if it sees uses of registers that
|
||||||
|
// it believes are undefined. Because we have already processed
|
||||||
|
// register kills in I, when it visits I again, it will believe that
|
||||||
|
// those registers are undefined. To avoid this situation, unprocess
|
||||||
|
// the instruction I.
|
||||||
|
assert(RS.getCurrentPosition() == I &&
|
||||||
|
"The register scavenger has an unexpected position");
|
||||||
|
I = P;
|
||||||
|
RS.unprocess(P);
|
||||||
|
} else
|
||||||
|
++I;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MRI.clearVirtRegs();
|
||||||
|
MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
/// This class runs register scavenging independ of the PrologEpilogInserter.
|
||||||
|
/// This is used in for testing.
|
||||||
|
class ScavengerTest : public MachineFunctionPass {
|
||||||
|
public:
|
||||||
|
static char ID;
|
||||||
|
ScavengerTest() : MachineFunctionPass(ID) {}
|
||||||
|
bool runOnMachineFunction(MachineFunction &MF) {
|
||||||
|
const TargetSubtargetInfo &STI = MF.getSubtarget();
|
||||||
|
const TargetFrameLowering &TFL = *STI.getFrameLowering();
|
||||||
|
|
||||||
|
RegScavenger RS;
|
||||||
|
// Let's hope that calling those outside of PrologEpilogueInserter works
|
||||||
|
// well enough to initialize the scavenger with some emergency spillslots
|
||||||
|
// for the target.
|
||||||
|
BitVector SavedRegs;
|
||||||
|
TFL.determineCalleeSaves(MF, SavedRegs, &RS);
|
||||||
|
TFL.processFunctionBeforeFrameFinalized(MF, &RS);
|
||||||
|
|
||||||
|
// Let's scavenge the current function
|
||||||
|
scavengeFrameVirtualRegs(MF, RS);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
char ScavengerTest::ID;
|
||||||
|
|
||||||
|
} // end anonymous namespace
|
||||||
|
|
||||||
|
INITIALIZE_PASS(ScavengerTest, "scavenger-test",
|
||||||
|
"Scavenge virtual registers inside basic blocks", false, false)
|
||||||
|
@ -12,32 +12,54 @@
|
|||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
|
|
||||||
#include "llvm/ADT/IntEqClasses.h"
|
#include "llvm/ADT/IntEqClasses.h"
|
||||||
|
#include "llvm/ADT/iterator_range.h"
|
||||||
|
#include "llvm/ADT/MapVector.h"
|
||||||
#include "llvm/ADT/SmallPtrSet.h"
|
#include "llvm/ADT/SmallPtrSet.h"
|
||||||
#include "llvm/ADT/SmallSet.h"
|
#include "llvm/ADT/SmallVector.h"
|
||||||
|
#include "llvm/ADT/SparseSet.h"
|
||||||
#include "llvm/Analysis/AliasAnalysis.h"
|
#include "llvm/Analysis/AliasAnalysis.h"
|
||||||
#include "llvm/Analysis/ValueTracking.h"
|
#include "llvm/Analysis/ValueTracking.h"
|
||||||
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
||||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
#include "llvm/CodeGen/LivePhysRegs.h"
|
||||||
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||||
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
#include "llvm/CodeGen/MachineInstr.h"
|
||||||
|
#include "llvm/CodeGen/MachineInstrBundle.h"
|
||||||
#include "llvm/CodeGen/MachineMemOperand.h"
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
||||||
|
#include "llvm/CodeGen/MachineOperand.h"
|
||||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||||
#include "llvm/CodeGen/PseudoSourceValue.h"
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
||||||
#include "llvm/CodeGen/RegisterPressure.h"
|
#include "llvm/CodeGen/RegisterPressure.h"
|
||||||
|
#include "llvm/CodeGen/ScheduleDAG.h"
|
||||||
|
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
|
||||||
#include "llvm/CodeGen/ScheduleDFS.h"
|
#include "llvm/CodeGen/ScheduleDFS.h"
|
||||||
|
#include "llvm/CodeGen/SlotIndexes.h"
|
||||||
|
#include "llvm/IR/Constants.h"
|
||||||
#include "llvm/IR/Function.h"
|
#include "llvm/IR/Function.h"
|
||||||
#include "llvm/IR/Type.h"
|
#include "llvm/IR/Instruction.h"
|
||||||
|
#include "llvm/IR/Instructions.h"
|
||||||
#include "llvm/IR/Operator.h"
|
#include "llvm/IR/Operator.h"
|
||||||
|
#include "llvm/IR/Type.h"
|
||||||
|
#include "llvm/IR/Value.h"
|
||||||
|
#include "llvm/MC/LaneBitmask.h"
|
||||||
|
#include "llvm/MC/MCRegisterInfo.h"
|
||||||
|
#include "llvm/Support/Casting.h"
|
||||||
#include "llvm/Support/CommandLine.h"
|
#include "llvm/Support/CommandLine.h"
|
||||||
|
#include "llvm/Support/Compiler.h"
|
||||||
#include "llvm/Support/Debug.h"
|
#include "llvm/Support/Debug.h"
|
||||||
|
#include "llvm/Support/ErrorHandling.h"
|
||||||
#include "llvm/Support/Format.h"
|
#include "llvm/Support/Format.h"
|
||||||
#include "llvm/Support/raw_ostream.h"
|
#include "llvm/Support/raw_ostream.h"
|
||||||
#include "llvm/Target/TargetInstrInfo.h"
|
|
||||||
#include "llvm/Target/TargetMachine.h"
|
|
||||||
#include "llvm/Target/TargetRegisterInfo.h"
|
#include "llvm/Target/TargetRegisterInfo.h"
|
||||||
#include "llvm/Target/TargetSubtargetInfo.h"
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cassert>
|
||||||
|
#include <iterator>
|
||||||
|
#include <string>
|
||||||
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace llvm;
|
using namespace llvm;
|
||||||
|
|
||||||
@ -90,11 +112,9 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
|
|||||||
const MachineLoopInfo *mli,
|
const MachineLoopInfo *mli,
|
||||||
bool RemoveKillFlags)
|
bool RemoveKillFlags)
|
||||||
: ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()),
|
: ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()),
|
||||||
RemoveKillFlags(RemoveKillFlags), CanHandleTerminators(false),
|
RemoveKillFlags(RemoveKillFlags),
|
||||||
TrackLaneMasks(false), AAForDep(nullptr), BarrierChain(nullptr),
|
|
||||||
UnknownValue(UndefValue::get(
|
UnknownValue(UndefValue::get(
|
||||||
Type::getVoidTy(mf.getFunction()->getContext()))),
|
Type::getVoidTy(mf.getFunction()->getContext()))) {
|
||||||
FirstDbgValue(nullptr) {
|
|
||||||
DbgValues.clear();
|
DbgValues.clear();
|
||||||
|
|
||||||
const TargetSubtargetInfo &ST = mf.getSubtarget();
|
const TargetSubtargetInfo &ST = mf.getSubtarget();
|
||||||
@ -126,7 +146,7 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) {
|
|||||||
return V;
|
return V;
|
||||||
}
|
}
|
||||||
assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
|
assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
|
||||||
} while (1);
|
} while (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is a wrapper around GetUnderlyingObjects and adds support for basic
|
/// This is a wrapper around GetUnderlyingObjects and adds support for basic
|
||||||
@ -563,7 +583,7 @@ void ScheduleDAGInstrs::initSUnits() {
|
|||||||
// which is contained within a basic block.
|
// which is contained within a basic block.
|
||||||
SUnits.reserve(NumRegionInstrs);
|
SUnits.reserve(NumRegionInstrs);
|
||||||
|
|
||||||
for (MachineInstr &MI : llvm::make_range(RegionBegin, RegionEnd)) {
|
for (MachineInstr &MI : make_range(RegionBegin, RegionEnd)) {
|
||||||
if (MI.isDebugValue())
|
if (MI.isDebugValue())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -606,13 +626,13 @@ void ScheduleDAGInstrs::initSUnits() {
|
|||||||
|
|
||||||
class ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> {
|
class ScheduleDAGInstrs::Value2SUsMap : public MapVector<ValueType, SUList> {
|
||||||
/// Current total number of SUs in map.
|
/// Current total number of SUs in map.
|
||||||
unsigned NumNodes;
|
unsigned NumNodes = 0;
|
||||||
|
|
||||||
/// 1 for loads, 0 for stores. (see comment in SUList)
|
/// 1 for loads, 0 for stores. (see comment in SUList)
|
||||||
unsigned TrueMemOrderLatency;
|
unsigned TrueMemOrderLatency;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Value2SUsMap(unsigned lat = 0) : NumNodes(0), TrueMemOrderLatency(lat) {}
|
Value2SUsMap(unsigned lat = 0) : TrueMemOrderLatency(lat) {}
|
||||||
|
|
||||||
/// To keep NumNodes up to date, insert() is used instead of
|
/// To keep NumNodes up to date, insert() is used instead of
|
||||||
/// this operator w/ push_back().
|
/// this operator w/ push_back().
|
||||||
@ -630,7 +650,7 @@ public:
|
|||||||
void inline clearList(ValueType V) {
|
void inline clearList(ValueType V) {
|
||||||
iterator Itr = find(V);
|
iterator Itr = find(V);
|
||||||
if (Itr != end()) {
|
if (Itr != end()) {
|
||||||
assert (NumNodes >= Itr->second.size());
|
assert(NumNodes >= Itr->second.size());
|
||||||
NumNodes -= Itr->second.size();
|
NumNodes -= Itr->second.size();
|
||||||
|
|
||||||
Itr->second.clear();
|
Itr->second.clear();
|
||||||
@ -646,7 +666,7 @@ public:
|
|||||||
unsigned inline size() const { return NumNodes; }
|
unsigned inline size() const { return NumNodes; }
|
||||||
|
|
||||||
/// Counts the number of SUs in this map after a reduction.
|
/// Counts the number of SUs in this map after a reduction.
|
||||||
void reComputeSize(void) {
|
void reComputeSize() {
|
||||||
NumNodes = 0;
|
NumNodes = 0;
|
||||||
for (auto &I : *this)
|
for (auto &I : *this)
|
||||||
NumNodes += I.second.size();
|
NumNodes += I.second.size();
|
||||||
@ -676,7 +696,7 @@ void ScheduleDAGInstrs::addChainDependencies(SUnit *SU,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) {
|
void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) {
|
||||||
assert (BarrierChain != nullptr);
|
assert(BarrierChain != nullptr);
|
||||||
|
|
||||||
for (auto &I : map) {
|
for (auto &I : map) {
|
||||||
SUList &sus = I.second;
|
SUList &sus = I.second;
|
||||||
@ -687,7 +707,7 @@ void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap &map) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) {
|
void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap &map) {
|
||||||
assert (BarrierChain != nullptr);
|
assert(BarrierChain != nullptr);
|
||||||
|
|
||||||
// Go through all lists of SUs.
|
// Go through all lists of SUs.
|
||||||
for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) {
|
for (Value2SUsMap::iterator I = map.begin(), EE = map.end(); I != EE;) {
|
||||||
@ -1028,7 +1048,7 @@ void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap &stores,
|
|||||||
// The N last elements in NodeNums will be removed, and the SU with
|
// The N last elements in NodeNums will be removed, and the SU with
|
||||||
// the lowest NodeNum of them will become the new BarrierChain to
|
// the lowest NodeNum of them will become the new BarrierChain to
|
||||||
// let the not yet seen SUs have a dependency to the removed SUs.
|
// let the not yet seen SUs have a dependency to the removed SUs.
|
||||||
assert (N <= NodeNums.size());
|
assert(N <= NodeNums.size());
|
||||||
SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)];
|
SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)];
|
||||||
if (BarrierChain) {
|
if (BarrierChain) {
|
||||||
// The aliasing and non-aliasing maps reduce independently of each
|
// The aliasing and non-aliasing maps reduce independently of each
|
||||||
@ -1156,6 +1176,7 @@ std::string ScheduleDAGInstrs::getDAGName() const {
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
namespace llvm {
|
namespace llvm {
|
||||||
|
|
||||||
/// Internal state used to compute SchedDFSResult.
|
/// Internal state used to compute SchedDFSResult.
|
||||||
class SchedDFSImpl {
|
class SchedDFSImpl {
|
||||||
SchedDFSResult &R;
|
SchedDFSResult &R;
|
||||||
@ -1163,16 +1184,16 @@ class SchedDFSImpl {
|
|||||||
/// Join DAG nodes into equivalence classes by their subtree.
|
/// Join DAG nodes into equivalence classes by their subtree.
|
||||||
IntEqClasses SubtreeClasses;
|
IntEqClasses SubtreeClasses;
|
||||||
/// List PredSU, SuccSU pairs that represent data edges between subtrees.
|
/// List PredSU, SuccSU pairs that represent data edges between subtrees.
|
||||||
std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs;
|
std::vector<std::pair<const SUnit *, const SUnit*>> ConnectionPairs;
|
||||||
|
|
||||||
struct RootData {
|
struct RootData {
|
||||||
unsigned NodeID;
|
unsigned NodeID;
|
||||||
unsigned ParentNodeID; ///< Parent node (member of the parent subtree).
|
unsigned ParentNodeID; ///< Parent node (member of the parent subtree).
|
||||||
unsigned SubInstrCount; ///< Instr count in this tree only, not children.
|
unsigned SubInstrCount = 0; ///< Instr count in this tree only, not
|
||||||
|
/// children.
|
||||||
|
|
||||||
RootData(unsigned id): NodeID(id),
|
RootData(unsigned id): NodeID(id),
|
||||||
ParentNodeID(SchedDFSResult::InvalidSubtreeID),
|
ParentNodeID(SchedDFSResult::InvalidSubtreeID) {}
|
||||||
SubInstrCount(0) {}
|
|
||||||
|
|
||||||
unsigned getSparseSetIndex() const { return NodeID; }
|
unsigned getSparseSetIndex() const { return NodeID; }
|
||||||
};
|
};
|
||||||
@ -1340,12 +1361,15 @@ protected:
|
|||||||
} while (FromTree != SchedDFSResult::InvalidSubtreeID);
|
} while (FromTree != SchedDFSResult::InvalidSubtreeID);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // end namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
/// Manage the stack used by a reverse depth-first search over the DAG.
|
/// Manage the stack used by a reverse depth-first search over the DAG.
|
||||||
class SchedDAGReverseDFS {
|
class SchedDAGReverseDFS {
|
||||||
std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack;
|
std::vector<std::pair<const SUnit *, SUnit::const_pred_iterator>> DFSStack;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
bool isComplete() const { return DFSStack.empty(); }
|
bool isComplete() const { return DFSStack.empty(); }
|
||||||
|
|
||||||
@ -1367,7 +1391,8 @@ public:
|
|||||||
return getCurr()->Preds.end();
|
return getCurr()->Preds.end();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} // anonymous
|
|
||||||
|
} // end anonymous namespace
|
||||||
|
|
||||||
static bool hasDataSucc(const SUnit *SU) {
|
static bool hasDataSucc(const SUnit *SU) {
|
||||||
for (const SDep &SuccDep : SU->Succs) {
|
for (const SDep &SuccDep : SU->Succs) {
|
||||||
@ -1392,7 +1417,7 @@ void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) {
|
|||||||
SchedDAGReverseDFS DFS;
|
SchedDAGReverseDFS DFS;
|
||||||
Impl.visitPreorder(&SU);
|
Impl.visitPreorder(&SU);
|
||||||
DFS.follow(&SU);
|
DFS.follow(&SU);
|
||||||
for (;;) {
|
while (true) {
|
||||||
// Traverse the leftmost path as far as possible.
|
// Traverse the leftmost path as far as possible.
|
||||||
while (DFS.getPred() != DFS.getPredEnd()) {
|
while (DFS.getPred() != DFS.getPredEnd()) {
|
||||||
const SDep &PredDep = *DFS.getPred();
|
const SDep &PredDep = *DFS.getPred();
|
||||||
@ -1457,4 +1482,5 @@ raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
} // end namespace llvm
|
} // end namespace llvm
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -225,6 +225,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
|
|||||||
}
|
}
|
||||||
return TranslateLegalizeResults(Op, Lowered);
|
return TranslateLegalizeResults(Op, Lowered);
|
||||||
}
|
}
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
case TargetLowering::Expand:
|
case TargetLowering::Expand:
|
||||||
Changed = true;
|
Changed = true;
|
||||||
return LegalizeOp(ExpandLoad(Op));
|
return LegalizeOp(ExpandLoad(Op));
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
//===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
|
//===- SelectionDAG.cpp - Implement the SelectionDAG data structures ------===//
|
||||||
//
|
//
|
||||||
// The LLVM Compiler Infrastructure
|
// The LLVM Compiler Infrastructure
|
||||||
//
|
//
|
||||||
@ -11,29 +11,46 @@
|
|||||||
//
|
//
|
||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
#include "llvm/CodeGen/SelectionDAG.h"
|
|
||||||
#include "SDNodeDbgValue.h"
|
#include "SDNodeDbgValue.h"
|
||||||
|
#include "llvm/ADT/APFloat.h"
|
||||||
|
#include "llvm/ADT/APInt.h"
|
||||||
#include "llvm/ADT/APSInt.h"
|
#include "llvm/ADT/APSInt.h"
|
||||||
#include "llvm/ADT/SetVector.h"
|
#include "llvm/ADT/ArrayRef.h"
|
||||||
|
#include "llvm/ADT/BitVector.h"
|
||||||
|
#include "llvm/ADT/FoldingSet.h"
|
||||||
|
#include "llvm/ADT/None.h"
|
||||||
#include "llvm/ADT/SmallPtrSet.h"
|
#include "llvm/ADT/SmallPtrSet.h"
|
||||||
#include "llvm/ADT/SmallSet.h"
|
|
||||||
#include "llvm/ADT/SmallVector.h"
|
#include "llvm/ADT/SmallVector.h"
|
||||||
#include "llvm/ADT/StringExtras.h"
|
#include "llvm/ADT/STLExtras.h"
|
||||||
|
#include "llvm/ADT/Triple.h"
|
||||||
|
#include "llvm/ADT/Twine.h"
|
||||||
#include "llvm/Analysis/ValueTracking.h"
|
#include "llvm/Analysis/ValueTracking.h"
|
||||||
|
#include "llvm/CodeGen/ISDOpcodes.h"
|
||||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||||
#include "llvm/CodeGen/MachineConstantPool.h"
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
||||||
#include "llvm/CodeGen/MachineFrameInfo.h"
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
||||||
#include "llvm/CodeGen/MachineModuleInfo.h"
|
#include "llvm/CodeGen/MachineFunction.h"
|
||||||
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
||||||
|
#include "llvm/CodeGen/MachineValueType.h"
|
||||||
|
#include "llvm/CodeGen/RuntimeLibcalls.h"
|
||||||
|
#include "llvm/CodeGen/SelectionDAG.h"
|
||||||
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
||||||
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
|
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
|
||||||
#include "llvm/IR/CallingConv.h"
|
#include "llvm/CodeGen/ValueTypes.h"
|
||||||
|
#include "llvm/IR/Constant.h"
|
||||||
#include "llvm/IR/Constants.h"
|
#include "llvm/IR/Constants.h"
|
||||||
#include "llvm/IR/DataLayout.h"
|
#include "llvm/IR/DataLayout.h"
|
||||||
#include "llvm/IR/DebugInfo.h"
|
#include "llvm/IR/DebugInfoMetadata.h"
|
||||||
|
#include "llvm/IR/DebugLoc.h"
|
||||||
#include "llvm/IR/DerivedTypes.h"
|
#include "llvm/IR/DerivedTypes.h"
|
||||||
#include "llvm/IR/Function.h"
|
#include "llvm/IR/Function.h"
|
||||||
#include "llvm/IR/GlobalAlias.h"
|
#include "llvm/IR/GlobalValue.h"
|
||||||
#include "llvm/IR/GlobalVariable.h"
|
#include "llvm/IR/Metadata.h"
|
||||||
#include "llvm/IR/Intrinsics.h"
|
#include "llvm/IR/Type.h"
|
||||||
|
#include "llvm/IR/Value.h"
|
||||||
|
#include "llvm/Support/Casting.h"
|
||||||
|
#include "llvm/Support/CodeGen.h"
|
||||||
|
#include "llvm/Support/Compiler.h"
|
||||||
#include "llvm/Support/Debug.h"
|
#include "llvm/Support/Debug.h"
|
||||||
#include "llvm/Support/ErrorHandling.h"
|
#include "llvm/Support/ErrorHandling.h"
|
||||||
#include "llvm/Support/KnownBits.h"
|
#include "llvm/Support/KnownBits.h"
|
||||||
@ -41,16 +58,20 @@
|
|||||||
#include "llvm/Support/MathExtras.h"
|
#include "llvm/Support/MathExtras.h"
|
||||||
#include "llvm/Support/Mutex.h"
|
#include "llvm/Support/Mutex.h"
|
||||||
#include "llvm/Support/raw_ostream.h"
|
#include "llvm/Support/raw_ostream.h"
|
||||||
#include "llvm/Target/TargetInstrInfo.h"
|
|
||||||
#include "llvm/Target/TargetIntrinsicInfo.h"
|
|
||||||
#include "llvm/Target/TargetLowering.h"
|
#include "llvm/Target/TargetLowering.h"
|
||||||
#include "llvm/Target/TargetMachine.h"
|
#include "llvm/Target/TargetMachine.h"
|
||||||
#include "llvm/Target/TargetOptions.h"
|
#include "llvm/Target/TargetOptions.h"
|
||||||
#include "llvm/Target/TargetRegisterInfo.h"
|
#include "llvm/Target/TargetRegisterInfo.h"
|
||||||
#include "llvm/Target/TargetSubtargetInfo.h"
|
#include "llvm/Target/TargetSubtargetInfo.h"
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cmath>
|
#include <cassert>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <limits>
|
||||||
|
#include <set>
|
||||||
|
#include <string>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace llvm;
|
using namespace llvm;
|
||||||
|
|
||||||
@ -269,7 +290,6 @@ ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
|
|||||||
return ISD::CondCode(Operation);
|
return ISD::CondCode(Operation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// For an integer comparison, return 1 if the comparison is a signed operation
|
/// For an integer comparison, return 1 if the comparison is a signed operation
|
||||||
/// and 2 if the result is an unsigned comparison. Return zero if the operation
|
/// and 2 if the result is an unsigned comparison. Return zero if the operation
|
||||||
/// does not depend on the sign of the input (setne and seteq).
|
/// does not depend on the sign of the input (setne and seteq).
|
||||||
@ -338,7 +358,6 @@ ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
|
|||||||
//===----------------------------------------------------------------------===//
|
//===----------------------------------------------------------------------===//
|
||||||
|
|
||||||
/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
|
/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
|
||||||
///
|
|
||||||
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
|
static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
|
||||||
ID.AddInteger(OpC);
|
ID.AddInteger(OpC);
|
||||||
}
|
}
|
||||||
@ -350,7 +369,6 @@ static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
|
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
|
||||||
///
|
|
||||||
static void AddNodeIDOperands(FoldingSetNodeID &ID,
|
static void AddNodeIDOperands(FoldingSetNodeID &ID,
|
||||||
ArrayRef<SDValue> Ops) {
|
ArrayRef<SDValue> Ops) {
|
||||||
for (auto& Op : Ops) {
|
for (auto& Op : Ops) {
|
||||||
@ -360,7 +378,6 @@ static void AddNodeIDOperands(FoldingSetNodeID &ID,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
|
/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
|
||||||
///
|
|
||||||
static void AddNodeIDOperands(FoldingSetNodeID &ID,
|
static void AddNodeIDOperands(FoldingSetNodeID &ID,
|
||||||
ArrayRef<SDUse> Ops) {
|
ArrayRef<SDUse> Ops) {
|
||||||
for (auto& Op : Ops) {
|
for (auto& Op : Ops) {
|
||||||
@ -392,10 +409,9 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case ISD::TargetConstantFP:
|
case ISD::TargetConstantFP:
|
||||||
case ISD::ConstantFP: {
|
case ISD::ConstantFP:
|
||||||
ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
|
ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
case ISD::TargetGlobalAddress:
|
case ISD::TargetGlobalAddress:
|
||||||
case ISD::GlobalAddress:
|
case ISD::GlobalAddress:
|
||||||
case ISD::TargetGlobalTLSAddress:
|
case ISD::TargetGlobalTLSAddress:
|
||||||
@ -770,7 +786,6 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
|
|||||||
/// maps and modified in place. Add it back to the CSE maps, unless an identical
|
/// maps and modified in place. Add it back to the CSE maps, unless an identical
|
||||||
/// node already exists, in which case transfer all its users to the existing
|
/// node already exists, in which case transfer all its users to the existing
|
||||||
/// node. This transfer can potentially trigger recursive merging.
|
/// node. This transfer can potentially trigger recursive merging.
|
||||||
///
|
|
||||||
void
|
void
|
||||||
SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
|
SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
|
||||||
// For node types that aren't CSE'd, just act as if no identical node
|
// For node types that aren't CSE'd, just act as if no identical node
|
||||||
@ -835,7 +850,6 @@ SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
|
|||||||
return Node;
|
return Node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
|
/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
|
||||||
/// were replaced with those specified. If this node is never memoized,
|
/// were replaced with those specified. If this node is never memoized,
|
||||||
/// return null, otherwise return a pointer to the slot it would take. If a
|
/// return null, otherwise return a pointer to the slot it would take. If a
|
||||||
@ -864,10 +878,9 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
|
|||||||
|
|
||||||
// EntryNode could meaningfully have debug info if we can find it...
|
// EntryNode could meaningfully have debug info if we can find it...
|
||||||
SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
|
SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
|
||||||
: TM(tm), TSI(nullptr), TLI(nullptr), OptLevel(OL),
|
: TM(tm), OptLevel(OL),
|
||||||
EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
|
EntryNode(ISD::EntryToken, 0, DebugLoc(), getVTList(MVT::Other)),
|
||||||
Root(getEntryNode()), NewNodesMustHaveLegalTypes(false),
|
Root(getEntryNode()) {
|
||||||
UpdateListeners(nullptr) {
|
|
||||||
InsertNode(&EntryNode);
|
InsertNode(&EntryNode);
|
||||||
DbgInfo = new SDDbgInfo();
|
DbgInfo = new SDDbgInfo();
|
||||||
}
|
}
|
||||||
@ -1038,7 +1051,6 @@ SDValue SelectionDAG::getZeroExtendVectorInReg(SDValue Op, const SDLoc &DL,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
|
/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
|
||||||
///
|
|
||||||
SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
|
SDValue SelectionDAG::getNOT(const SDLoc &DL, SDValue Val, EVT VT) {
|
||||||
EVT EltVT = VT.getScalarType();
|
EVT EltVT = VT.getScalarType();
|
||||||
SDValue NegOne =
|
SDValue NegOne =
|
||||||
@ -1317,7 +1329,6 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
|
|||||||
return SDValue(N, 0);
|
return SDValue(N, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
|
SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
|
||||||
unsigned Alignment, int Offset,
|
unsigned Alignment, int Offset,
|
||||||
bool isTarget,
|
bool isTarget,
|
||||||
@ -1451,7 +1462,7 @@ SDValue SelectionDAG::getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1,
|
|||||||
// Validate that all indices in Mask are within the range of the elements
|
// Validate that all indices in Mask are within the range of the elements
|
||||||
// input to the shuffle.
|
// input to the shuffle.
|
||||||
int NElts = Mask.size();
|
int NElts = Mask.size();
|
||||||
assert(all_of(Mask, [&](int M) { return M < (NElts * 2); }) &&
|
assert(llvm::all_of(Mask, [&](int M) { return M < (NElts * 2); }) &&
|
||||||
"Index out of range");
|
"Index out of range");
|
||||||
|
|
||||||
// Copy the mask so we can do any needed cleanup.
|
// Copy the mask so we can do any needed cleanup.
|
||||||
@ -2918,7 +2929,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
|
|||||||
else
|
else
|
||||||
DemandedRHS.setBit((unsigned)M % NumElts);
|
DemandedRHS.setBit((unsigned)M % NumElts);
|
||||||
}
|
}
|
||||||
Tmp = UINT_MAX;
|
Tmp = std::numeric_limits<unsigned>::max();
|
||||||
if (!!DemandedLHS)
|
if (!!DemandedLHS)
|
||||||
Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
|
Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
|
||||||
if (!!DemandedRHS) {
|
if (!!DemandedRHS) {
|
||||||
@ -3122,7 +3133,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
|
|||||||
unsigned EltIdx = CEltNo->getZExtValue();
|
unsigned EltIdx = CEltNo->getZExtValue();
|
||||||
|
|
||||||
// If we demand the inserted element then get its sign bits.
|
// If we demand the inserted element then get its sign bits.
|
||||||
Tmp = UINT_MAX;
|
Tmp = std::numeric_limits<unsigned>::max();
|
||||||
if (DemandedElts[EltIdx]) {
|
if (DemandedElts[EltIdx]) {
|
||||||
// TODO - handle implicit truncation of inserted elements.
|
// TODO - handle implicit truncation of inserted elements.
|
||||||
if (InVal.getScalarValueSizeInBits() != VTBits)
|
if (InVal.getScalarValueSizeInBits() != VTBits)
|
||||||
@ -3188,7 +3199,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
|
|||||||
case ISD::CONCAT_VECTORS:
|
case ISD::CONCAT_VECTORS:
|
||||||
// Determine the minimum number of sign bits across all demanded
|
// Determine the minimum number of sign bits across all demanded
|
||||||
// elts of the input vectors. Early out if the result is already 1.
|
// elts of the input vectors. Early out if the result is already 1.
|
||||||
Tmp = UINT_MAX;
|
Tmp = std::numeric_limits<unsigned>::max();
|
||||||
EVT SubVectorVT = Op.getOperand(0).getValueType();
|
EVT SubVectorVT = Op.getOperand(0).getValueType();
|
||||||
unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
|
unsigned NumSubVectorElts = SubVectorVT.getVectorNumElements();
|
||||||
unsigned NumSubVectors = Op.getNumOperands();
|
unsigned NumSubVectors = Op.getNumOperands();
|
||||||
@ -3327,7 +3338,7 @@ bool SelectionDAG::haveNoCommonBitsSet(SDValue A, SDValue B) const {
|
|||||||
|
|
||||||
static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
|
static SDValue FoldCONCAT_VECTORS(const SDLoc &DL, EVT VT,
|
||||||
ArrayRef<SDValue> Ops,
|
ArrayRef<SDValue> Ops,
|
||||||
llvm::SelectionDAG &DAG) {
|
SelectionDAG &DAG) {
|
||||||
assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
|
assert(!Ops.empty() && "Can't concatenate an empty list of vectors!");
|
||||||
assert(llvm::all_of(Ops,
|
assert(llvm::all_of(Ops,
|
||||||
[Ops](SDValue Op) {
|
[Ops](SDValue Op) {
|
||||||
@ -3836,8 +3847,9 @@ bool SelectionDAG::isUndef(unsigned Opcode, ArrayRef<SDValue> Ops) {
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
|
return ISD::isBuildVectorOfConstantSDNodes(Divisor.getNode()) &&
|
||||||
any_of(Divisor->op_values(),
|
llvm::any_of(Divisor->op_values(),
|
||||||
[](SDValue V) { return V.isUndef() || isNullConstant(V); });
|
[](SDValue V) { return V.isUndef() ||
|
||||||
|
isNullConstant(V); });
|
||||||
// TODO: Handle signed overflow.
|
// TODO: Handle signed overflow.
|
||||||
}
|
}
|
||||||
// TODO: Handle oversized shifts.
|
// TODO: Handle oversized shifts.
|
||||||
@ -3948,8 +3960,8 @@ SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode,
|
|||||||
// All operands must be vector types with the same number of elements as
|
// All operands must be vector types with the same number of elements as
|
||||||
// the result type and must be either UNDEF or a build vector of constant
|
// the result type and must be either UNDEF or a build vector of constant
|
||||||
// or UNDEF scalars.
|
// or UNDEF scalars.
|
||||||
if (!all_of(Ops, IsConstantBuildVectorOrUndef) ||
|
if (!llvm::all_of(Ops, IsConstantBuildVectorOrUndef) ||
|
||||||
!all_of(Ops, IsScalarOrSameVectorSize))
|
!llvm::all_of(Ops, IsScalarOrSameVectorSize))
|
||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
// If we are comparing vectors, then the result needs to be a i1 boolean
|
// If we are comparing vectors, then the result needs to be a i1 boolean
|
||||||
@ -5550,7 +5562,7 @@ SDValue SelectionDAG::getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl,
|
|||||||
Opcode == ISD::PREFETCH ||
|
Opcode == ISD::PREFETCH ||
|
||||||
Opcode == ISD::LIFETIME_START ||
|
Opcode == ISD::LIFETIME_START ||
|
||||||
Opcode == ISD::LIFETIME_END ||
|
Opcode == ISD::LIFETIME_END ||
|
||||||
(Opcode <= INT_MAX &&
|
((int)Opcode <= std::numeric_limits<int>::max() &&
|
||||||
(int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
|
(int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
|
||||||
"Opcode is not a memory-accessing opcode!");
|
"Opcode is not a memory-accessing opcode!");
|
||||||
|
|
||||||
@ -5884,7 +5896,6 @@ SDValue SelectionDAG::getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain,
|
|||||||
SDValue Ptr, SDValue Mask, SDValue Src0,
|
SDValue Ptr, SDValue Mask, SDValue Src0,
|
||||||
EVT MemVT, MachineMemOperand *MMO,
|
EVT MemVT, MachineMemOperand *MMO,
|
||||||
ISD::LoadExtType ExtTy, bool isExpanding) {
|
ISD::LoadExtType ExtTy, bool isExpanding) {
|
||||||
|
|
||||||
SDVTList VTs = getVTList(VT, MVT::Other);
|
SDVTList VTs = getVTList(VT, MVT::Other);
|
||||||
SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
|
SDValue Ops[] = { Chain, Ptr, Mask, Src0 };
|
||||||
FoldingSetNodeID ID;
|
FoldingSetNodeID ID;
|
||||||
@ -6038,13 +6049,12 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
|||||||
|
|
||||||
switch (Opcode) {
|
switch (Opcode) {
|
||||||
default: break;
|
default: break;
|
||||||
case ISD::CONCAT_VECTORS: {
|
case ISD::CONCAT_VECTORS:
|
||||||
// Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
|
// Attempt to fold CONCAT_VECTORS into BUILD_VECTOR or UNDEF.
|
||||||
if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
|
if (SDValue V = FoldCONCAT_VECTORS(DL, VT, Ops, *this))
|
||||||
return V;
|
return V;
|
||||||
break;
|
break;
|
||||||
}
|
case ISD::SELECT_CC:
|
||||||
case ISD::SELECT_CC: {
|
|
||||||
assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
|
assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
|
||||||
assert(Ops[0].getValueType() == Ops[1].getValueType() &&
|
assert(Ops[0].getValueType() == Ops[1].getValueType() &&
|
||||||
"LHS and RHS of condition must have same type!");
|
"LHS and RHS of condition must have same type!");
|
||||||
@ -6053,14 +6063,12 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
|||||||
assert(Ops[2].getValueType() == VT &&
|
assert(Ops[2].getValueType() == VT &&
|
||||||
"select_cc node must be of same type as true and false value!");
|
"select_cc node must be of same type as true and false value!");
|
||||||
break;
|
break;
|
||||||
}
|
case ISD::BR_CC:
|
||||||
case ISD::BR_CC: {
|
|
||||||
assert(NumOps == 5 && "BR_CC takes 5 operands!");
|
assert(NumOps == 5 && "BR_CC takes 5 operands!");
|
||||||
assert(Ops[2].getValueType() == Ops[3].getValueType() &&
|
assert(Ops[2].getValueType() == Ops[3].getValueType() &&
|
||||||
"LHS/RHS of comparison should match types!");
|
"LHS/RHS of comparison should match types!");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Memoize nodes.
|
// Memoize nodes.
|
||||||
SDNode *N;
|
SDNode *N;
|
||||||
@ -6599,7 +6607,6 @@ SDNode* SelectionDAG::mutateStrictFPToFP(SDNode *Node) {
|
|||||||
return Res;
|
return Res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// getMachineNode - These are used for target selectors to create a new node
|
/// getMachineNode - These are used for target selectors to create a new node
|
||||||
/// with specified return type(s), MachineInstr opcode, and operands.
|
/// with specified return type(s), MachineInstr opcode, and operands.
|
||||||
///
|
///
|
||||||
@ -6812,7 +6819,7 @@ public:
|
|||||||
: SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
|
: SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
} // end anonymous namespace
|
||||||
|
|
||||||
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
|
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
|
||||||
/// This can cause recursive merging of nodes in the DAG.
|
/// This can cause recursive merging of nodes in the DAG.
|
||||||
@ -6858,7 +6865,6 @@ void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
|
|||||||
AddModifiedNodeToCSEMaps(User);
|
AddModifiedNodeToCSEMaps(User);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// If we just RAUW'd the root, take note.
|
// If we just RAUW'd the root, take note.
|
||||||
if (FromN == getRoot())
|
if (FromN == getRoot())
|
||||||
setRoot(To);
|
setRoot(To);
|
||||||
@ -7028,6 +7034,7 @@ void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
/// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
|
/// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
|
||||||
/// to record information about a use.
|
/// to record information about a use.
|
||||||
struct UseMemo {
|
struct UseMemo {
|
||||||
@ -7040,7 +7047,8 @@ namespace {
|
|||||||
bool operator<(const UseMemo &L, const UseMemo &R) {
|
bool operator<(const UseMemo &L, const UseMemo &R) {
|
||||||
return (intptr_t)L.User < (intptr_t)R.User;
|
return (intptr_t)L.User < (intptr_t)R.User;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
} // end anonymous namespace
|
||||||
|
|
||||||
/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
|
/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
|
||||||
/// uses of other values produced by From.getNode() alone. The same value
|
/// uses of other values produced by From.getNode() alone. The same value
|
||||||
@ -7106,7 +7114,6 @@ void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
|
|||||||
/// based on their topological order. It returns the maximum id and a vector
|
/// based on their topological order. It returns the maximum id and a vector
|
||||||
/// of the SDNodes* in assigned order by reference.
|
/// of the SDNodes* in assigned order by reference.
|
||||||
unsigned SelectionDAG::AssignTopologicalOrder() {
|
unsigned SelectionDAG::AssignTopologicalOrder() {
|
||||||
|
|
||||||
unsigned DAGSize = 0;
|
unsigned DAGSize = 0;
|
||||||
|
|
||||||
// SortedPos tracks the progress of the algorithm. Nodes before it are
|
// SortedPos tracks the progress of the algorithm. Nodes before it are
|
||||||
@ -7333,6 +7340,7 @@ void SDNode::Profile(FoldingSetNodeID &ID) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct EVTArray {
|
struct EVTArray {
|
||||||
std::vector<EVT> VTs;
|
std::vector<EVT> VTs;
|
||||||
|
|
||||||
@ -7342,11 +7350,12 @@ namespace {
|
|||||||
VTs.push_back(MVT((MVT::SimpleValueType)i));
|
VTs.push_back(MVT((MVT::SimpleValueType)i));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
|
||||||
|
|
||||||
static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
|
} // end anonymous namespace
|
||||||
|
|
||||||
|
static ManagedStatic<std::set<EVT, EVT::compareRawBits>> EVTs;
|
||||||
static ManagedStatic<EVTArray> SimpleVTArray;
|
static ManagedStatic<EVTArray> SimpleVTArray;
|
||||||
static ManagedStatic<sys::SmartMutex<true> > VTMutex;
|
static ManagedStatic<sys::SmartMutex<true>> VTMutex;
|
||||||
|
|
||||||
/// getValueTypeList - Return a pointer to the specified value type.
|
/// getValueTypeList - Return a pointer to the specified value type.
|
||||||
///
|
///
|
||||||
@ -7380,7 +7389,6 @@ bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
|
|||||||
return NUses == 0;
|
return NUses == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// hasAnyUseOfValue - Return true if there are any use of the indicated
|
/// hasAnyUseOfValue - Return true if there are any use of the indicated
|
||||||
/// value. This method ignores uses of other values defined by this operation.
|
/// value. This method ignores uses of other values defined by this operation.
|
||||||
bool SDNode::hasAnyUseOfValue(unsigned Value) const {
|
bool SDNode::hasAnyUseOfValue(unsigned Value) const {
|
||||||
@ -7393,9 +7401,7 @@ bool SDNode::hasAnyUseOfValue(unsigned Value) const {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// isOnlyUserOf - Return true if this node is the only use of N.
|
/// isOnlyUserOf - Return true if this node is the only use of N.
|
||||||
///
|
|
||||||
bool SDNode::isOnlyUserOf(const SDNode *N) const {
|
bool SDNode::isOnlyUserOf(const SDNode *N) const {
|
||||||
bool Seen = false;
|
bool Seen = false;
|
||||||
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
|
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
|
||||||
@ -7425,7 +7431,6 @@ bool SDNode::areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// isOperand - Return true if this node is an operand of N.
|
/// isOperand - Return true if this node is an operand of N.
|
||||||
///
|
|
||||||
bool SDValue::isOperandOf(const SDNode *N) const {
|
bool SDValue::isOperandOf(const SDNode *N) const {
|
||||||
for (const SDValue &Op : N->op_values())
|
for (const SDValue &Op : N->op_values())
|
||||||
if (*this == Op)
|
if (*this == Op)
|
||||||
@ -7475,7 +7480,7 @@ bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
|
|||||||
}
|
}
|
||||||
// Next, try a deep search: check whether every operand of the TokenFactor
|
// Next, try a deep search: check whether every operand of the TokenFactor
|
||||||
// reaches Dest.
|
// reaches Dest.
|
||||||
return all_of((*this)->ops(), [=](SDValue Op) {
|
return llvm::all_of((*this)->ops(), [=](SDValue Op) {
|
||||||
return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
|
return Op.reachesChainWithoutSideEffects(Dest, Depth - 1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -7627,7 +7632,6 @@ bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
|
/// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
|
||||||
/// it cannot be inferred.
|
/// it cannot be inferred.
|
||||||
unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
|
unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
|
||||||
@ -7718,7 +7722,6 @@ unsigned GlobalAddressSDNode::getAddressSpace() const {
|
|||||||
return getGlobal()->getType()->getAddressSpace();
|
return getGlobal()->getType()->getAddressSpace();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Type *ConstantPoolSDNode::getType() const {
|
Type *ConstantPoolSDNode::getType() const {
|
||||||
if (isMachineConstantPoolEntry())
|
if (isMachineConstantPoolEntry())
|
||||||
return Val.MachineCPVal->getType();
|
return Val.MachineCPVal->getType();
|
||||||
|
@ -2022,7 +2022,7 @@ static SDNode *findGlueUse(SDNode *N) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
|
/// findNonImmUse - Return true if "Use" is a non-immediate use of "Def".
|
||||||
/// This function recursively traverses up the operand chain, ignoring
|
/// This function iteratively traverses up the operand chain, ignoring
|
||||||
/// certain nodes.
|
/// certain nodes.
|
||||||
static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
|
static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
|
||||||
SDNode *Root, SmallPtrSetImpl<SDNode*> &Visited,
|
SDNode *Root, SmallPtrSetImpl<SDNode*> &Visited,
|
||||||
@ -2035,30 +2035,36 @@ static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
|
|||||||
// The Use may be -1 (unassigned) if it is a newly allocated node. This can
|
// The Use may be -1 (unassigned) if it is a newly allocated node. This can
|
||||||
// happen because we scan down to newly selected nodes in the case of glue
|
// happen because we scan down to newly selected nodes in the case of glue
|
||||||
// uses.
|
// uses.
|
||||||
if ((Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1))
|
std::vector<SDNode *> WorkList;
|
||||||
return false;
|
WorkList.push_back(Use);
|
||||||
|
|
||||||
// Don't revisit nodes if we already scanned it and didn't fail, we know we
|
while (!WorkList.empty()) {
|
||||||
// won't fail if we scan it again.
|
Use = WorkList.back();
|
||||||
if (!Visited.insert(Use).second)
|
WorkList.pop_back();
|
||||||
return false;
|
if (Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1)
|
||||||
|
|
||||||
for (const SDValue &Op : Use->op_values()) {
|
|
||||||
// Ignore chain uses, they are validated by HandleMergeInputChains.
|
|
||||||
if (Op.getValueType() == MVT::Other && IgnoreChains)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
SDNode *N = Op.getNode();
|
// Don't revisit nodes if we already scanned it and didn't fail, we know we
|
||||||
if (N == Def) {
|
// won't fail if we scan it again.
|
||||||
if (Use == ImmedUse || Use == Root)
|
if (!Visited.insert(Use).second)
|
||||||
continue; // We are not looking for immediate use.
|
continue;
|
||||||
assert(N != Root);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Traverse up the operand chain.
|
for (const SDValue &Op : Use->op_values()) {
|
||||||
if (findNonImmUse(N, Def, ImmedUse, Root, Visited, IgnoreChains))
|
// Ignore chain uses, they are validated by HandleMergeInputChains.
|
||||||
return true;
|
if (Op.getValueType() == MVT::Other && IgnoreChains)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
SDNode *N = Op.getNode();
|
||||||
|
if (N == Def) {
|
||||||
|
if (Use == ImmedUse || Use == Root)
|
||||||
|
continue; // We are not looking for immediate use.
|
||||||
|
assert(N != Root);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse up the operand chain.
|
||||||
|
WorkList.push_back(N);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -818,7 +818,7 @@ SelectionDAGBuilder::LowerStatepoint(ImmutableStatepoint ISP,
|
|||||||
SI.GCTransitionArgs =
|
SI.GCTransitionArgs =
|
||||||
ArrayRef<const Use>(ISP.gc_args_begin(), ISP.gc_args_end());
|
ArrayRef<const Use>(ISP.gc_args_begin(), ISP.gc_args_end());
|
||||||
SI.ID = ISP.getID();
|
SI.ID = ISP.getID();
|
||||||
SI.DeoptState = ArrayRef<const Use>(ISP.vm_state_begin(), ISP.vm_state_end());
|
SI.DeoptState = ArrayRef<const Use>(ISP.deopt_begin(), ISP.deopt_end());
|
||||||
SI.StatepointFlags = ISP.getFlags();
|
SI.StatepointFlags = ISP.getFlags();
|
||||||
SI.NumPatchBytes = ISP.getNumPatchBytes();
|
SI.NumPatchBytes = ISP.getNumPatchBytes();
|
||||||
SI.EHPadBB = EHPadBB;
|
SI.EHPadBB = EHPadBB;
|
||||||
|
@ -1493,8 +1493,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the constant occurs on the RHS, and fold constant
|
// Ensure that the constant occurs on the RHS and fold constant comparisons.
|
||||||
// comparisons.
|
|
||||||
ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
|
ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond);
|
||||||
if (isa<ConstantSDNode>(N0.getNode()) &&
|
if (isa<ConstantSDNode>(N0.getNode()) &&
|
||||||
(DCI.isBeforeLegalizeOps() ||
|
(DCI.isBeforeLegalizeOps() ||
|
||||||
@ -1638,14 +1637,13 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
|
return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0),
|
||||||
TopSetCC.getOperand(1),
|
TopSetCC.getOperand(1),
|
||||||
InvCond);
|
InvCond);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the LHS is '(and load, const)', the RHS is 0,
|
// If the LHS is '(and load, const)', the RHS is 0, the test is for
|
||||||
// the test is for equality or unsigned, and all 1 bits of the const are
|
// equality or unsigned, and all 1 bits of the const are in the same
|
||||||
// in the same partial word, see if we can shorten the load.
|
// partial word, see if we can shorten the load.
|
||||||
if (DCI.isBeforeLegalize() &&
|
if (DCI.isBeforeLegalize() &&
|
||||||
!ISD::isSignedIntSetCC(Cond) &&
|
!ISD::isSignedIntSetCC(Cond) &&
|
||||||
N0.getOpcode() == ISD::AND && C1 == 0 &&
|
N0.getOpcode() == ISD::AND && C1 == 0 &&
|
||||||
@ -1669,10 +1667,10 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
APInt newMask = APInt::getLowBitsSet(maskWidth, width);
|
APInt newMask = APInt::getLowBitsSet(maskWidth, width);
|
||||||
for (unsigned offset=0; offset<origWidth/width; offset++) {
|
for (unsigned offset=0; offset<origWidth/width; offset++) {
|
||||||
if ((newMask & Mask) == Mask) {
|
if ((newMask & Mask) == Mask) {
|
||||||
if (!DAG.getDataLayout().isLittleEndian())
|
if (DAG.getDataLayout().isLittleEndian())
|
||||||
bestOffset = (origWidth/width - offset - 1) * (width/8);
|
|
||||||
else
|
|
||||||
bestOffset = (uint64_t)offset * (width/8);
|
bestOffset = (uint64_t)offset * (width/8);
|
||||||
|
else
|
||||||
|
bestOffset = (origWidth/width - offset - 1) * (width/8);
|
||||||
bestMask = Mask.lshr(offset * (width/8) * 8);
|
bestMask = Mask.lshr(offset * (width/8) * 8);
|
||||||
bestWidth = width;
|
bestWidth = width;
|
||||||
break;
|
break;
|
||||||
@ -1713,10 +1711,12 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
switch (Cond) {
|
switch (Cond) {
|
||||||
case ISD::SETUGT:
|
case ISD::SETUGT:
|
||||||
case ISD::SETUGE:
|
case ISD::SETUGE:
|
||||||
case ISD::SETEQ: return DAG.getConstant(0, dl, VT);
|
case ISD::SETEQ:
|
||||||
|
return DAG.getConstant(0, dl, VT);
|
||||||
case ISD::SETULT:
|
case ISD::SETULT:
|
||||||
case ISD::SETULE:
|
case ISD::SETULE:
|
||||||
case ISD::SETNE: return DAG.getConstant(1, dl, VT);
|
case ISD::SETNE:
|
||||||
|
return DAG.getConstant(1, dl, VT);
|
||||||
case ISD::SETGT:
|
case ISD::SETGT:
|
||||||
case ISD::SETGE:
|
case ISD::SETGE:
|
||||||
// True if the sign bit of C1 is set.
|
// True if the sign bit of C1 is set.
|
||||||
@ -1816,9 +1816,9 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
BitWidth-1))) {
|
BitWidth-1))) {
|
||||||
// Okay, get the un-inverted input value.
|
// Okay, get the un-inverted input value.
|
||||||
SDValue Val;
|
SDValue Val;
|
||||||
if (N0.getOpcode() == ISD::XOR)
|
if (N0.getOpcode() == ISD::XOR) {
|
||||||
Val = N0.getOperand(0);
|
Val = N0.getOperand(0);
|
||||||
else {
|
} else {
|
||||||
assert(N0.getOpcode() == ISD::AND &&
|
assert(N0.getOpcode() == ISD::AND &&
|
||||||
N0.getOperand(0).getOpcode() == ISD::XOR);
|
N0.getOperand(0).getOpcode() == ISD::XOR);
|
||||||
// ((X^1)&1)^1 -> X & 1
|
// ((X^1)&1)^1 -> X & 1
|
||||||
@ -1883,7 +1883,10 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
|
|
||||||
// Canonicalize GE/LE comparisons to use GT/LT comparisons.
|
// Canonicalize GE/LE comparisons to use GT/LT comparisons.
|
||||||
if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
|
if (Cond == ISD::SETGE || Cond == ISD::SETUGE) {
|
||||||
if (C1 == MinVal) return DAG.getConstant(1, dl, VT); // X >= MIN --> true
|
// X >= MIN --> true
|
||||||
|
if (C1 == MinVal)
|
||||||
|
return DAG.getConstant(1, dl, VT);
|
||||||
|
|
||||||
// X >= C0 --> X > (C0 - 1)
|
// X >= C0 --> X > (C0 - 1)
|
||||||
APInt C = C1 - 1;
|
APInt C = C1 - 1;
|
||||||
ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
|
ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT;
|
||||||
@ -1898,7 +1901,10 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
|
if (Cond == ISD::SETLE || Cond == ISD::SETULE) {
|
||||||
if (C1 == MaxVal) return DAG.getConstant(1, dl, VT); // X <= MAX --> true
|
// X <= MAX --> true
|
||||||
|
if (C1 == MaxVal)
|
||||||
|
return DAG.getConstant(1, dl, VT);
|
||||||
|
|
||||||
// X <= C0 --> X < (C0 + 1)
|
// X <= C0 --> X < (C0 + 1)
|
||||||
APInt C = C1 + 1;
|
APInt C = C1 + 1;
|
||||||
ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
|
ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT;
|
||||||
|
@ -1456,6 +1456,7 @@ void TargetLoweringBase::computeRegisterProperties(
|
|||||||
}
|
}
|
||||||
if (IsLegalWiderType)
|
if (IsLegalWiderType)
|
||||||
break;
|
break;
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
case TypeWidenVector: {
|
case TypeWidenVector: {
|
||||||
// Try to widen the vector.
|
// Try to widen the vector.
|
||||||
@ -1473,6 +1474,7 @@ void TargetLoweringBase::computeRegisterProperties(
|
|||||||
}
|
}
|
||||||
if (IsLegalWiderType)
|
if (IsLegalWiderType)
|
||||||
break;
|
break;
|
||||||
|
LLVM_FALLTHROUGH;
|
||||||
}
|
}
|
||||||
case TypeSplitVector:
|
case TypeSplitVector:
|
||||||
case TypeScalarizeVector: {
|
case TypeScalarizeVector: {
|
||||||
|
@ -27,6 +27,14 @@ Error CodeViewRecordIO::beginRecord(Optional<uint32_t> MaxLength) {
|
|||||||
Error CodeViewRecordIO::endRecord() {
|
Error CodeViewRecordIO::endRecord() {
|
||||||
assert(!Limits.empty() && "Not in a record!");
|
assert(!Limits.empty() && "Not in a record!");
|
||||||
Limits.pop_back();
|
Limits.pop_back();
|
||||||
|
// We would like to assert that we actually read / wrote all the bytes that we
|
||||||
|
// expected to for this record, but unfortunately we can't do this. Some
|
||||||
|
// producers such as MASM over-allocate for certain types of records and
|
||||||
|
// commit the extraneous data, so when reading we can't be sure every byte
|
||||||
|
// will have been read. And when writing we over-allocate temporarily since
|
||||||
|
// we don't know how big the record is until we're finished writing it, so
|
||||||
|
// even though we don't commit the extraneous data, we still can't guarantee
|
||||||
|
// we're at the end of the allocated data.
|
||||||
return Error::success();
|
return Error::success();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -49,6 +57,12 @@ uint32_t CodeViewRecordIO::maxFieldLength() const {
|
|||||||
return *Min;
|
return *Min;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Error CodeViewRecordIO::padToAlignment(uint32_t Align) {
|
||||||
|
if (isReading())
|
||||||
|
return Reader->padToAlignment(Align);
|
||||||
|
return Writer->padToAlignment(Align);
|
||||||
|
}
|
||||||
|
|
||||||
Error CodeViewRecordIO::skipPadding() {
|
Error CodeViewRecordIO::skipPadding() {
|
||||||
assert(!isWriting() && "Cannot skip padding while writing!");
|
assert(!isWriting() && "Cannot skip padding while writing!");
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user