Merge llvm, clang, lld, lldb, compiler-rt and libc++ r303571, and update

build glue.
This commit is contained in:
dim 2017-05-22 21:17:44 +00:00
commit 061a9fc919
410 changed files with 7659 additions and 4737 deletions

View File

@ -69,6 +69,10 @@ extern int __xray_remove_handler_arg1();
/// Provide a function to invoke when XRay encounters a custom event.
extern int __xray_set_customevent_handler(void (*entry)(void*, std::size_t));
/// This removes whatever the currently provided custom event handler is.
/// Returns 1 on success, 0 on error.
extern int __xray_remove_customevent_handler();
enum XRayPatchingStatus {
NOT_INITIALIZED = 0,
SUCCESS = 1,

View File

@ -194,10 +194,6 @@ void InitializeFlags() {
Report("WARNING: strchr* interceptors are enabled even though "
"replace_str=0. Use intercept_strchr=0 to disable them.");
}
if (!f->replace_str && common_flags()->intercept_strndup) {
Report("WARNING: strndup* interceptors are enabled even though "
"replace_str=0. Use intercept_strndup=0 to disable them.");
}
}
} // namespace __asan

View File

@ -110,7 +110,8 @@ void ProcessGlobalRegions(Frontier *frontier) {
for (const __sanitizer::LoadedModule::AddressRange &range :
modules[i].ranges()) {
if (range.executable || !range.readable) continue;
// Sections storing global variables are writable and non-executable
if (range.executable || !range.writable) continue;
ScanGlobalRange(range.beg, range.end, frontier);
}

View File

@ -341,6 +341,33 @@ INTERCEPTOR(char *, __strdup, char *src) {
#define MSAN_MAYBE_INTERCEPT___STRDUP
#endif
INTERCEPTOR(char *, strndup, char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
// On FreeBSD strndup() leverages strnlen().
InterceptorScope interceptor_scope;
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(strndup)(src, n);
CopyShadowAndOrigin(res, src, copy_size, &stack);
__msan_unpoison(res + copy_size, 1); // \0
return res;
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(char *, __strndup, char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(__strndup)(src, n);
CopyShadowAndOrigin(res, src, copy_size, &stack);
__msan_unpoison(res + copy_size, 1); // \0
return res;
}
#define MSAN_MAYBE_INTERCEPT___STRNDUP INTERCEPT_FUNCTION(__strndup)
#else
#define MSAN_MAYBE_INTERCEPT___STRNDUP
#endif
INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) {
ENSURE_MSAN_INITED();
char *res = REAL(gcvt)(number, ndigit, buf);
@ -1344,13 +1371,6 @@ int OnExit() {
return __msan_memcpy(to, from, size); \
}
#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) \
do { \
GET_STORE_STACK_TRACE; \
CopyShadowAndOrigin(to, from, size, &stack); \
__msan_unpoison(to + size, 1); \
} while (false)
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_common_interceptors.inc"
@ -1518,6 +1538,8 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(stpcpy); // NOLINT
INTERCEPT_FUNCTION(strdup);
MSAN_MAYBE_INTERCEPT___STRDUP;
INTERCEPT_FUNCTION(strndup);
MSAN_MAYBE_INTERCEPT___STRNDUP;
INTERCEPT_FUNCTION(strncpy); // NOLINT
INTERCEPT_FUNCTION(gcvt);
INTERCEPT_FUNCTION(strcat); // NOLINT

View File

@ -285,9 +285,9 @@ void LoadedModule::clear() {
}
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
bool readable) {
bool writable) {
void *mem = InternalAlloc(sizeof(AddressRange));
AddressRange *r = new(mem) AddressRange(beg, end, executable, readable);
AddressRange *r = new(mem) AddressRange(beg, end, executable, writable);
ranges_.push_back(r);
if (executable && end > max_executable_address_)
max_executable_address_ = end;

View File

@ -717,7 +717,7 @@ class LoadedModule {
void set(const char *module_name, uptr base_address, ModuleArch arch,
u8 uuid[kModuleUUIDSize], bool instrumented);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable, bool readable);
void addAddressRange(uptr beg, uptr end, bool executable, bool writable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
@ -732,14 +732,14 @@ class LoadedModule {
uptr beg;
uptr end;
bool executable;
bool readable;
bool writable;
AddressRange(uptr beg, uptr end, bool executable, bool readable)
AddressRange(uptr beg, uptr end, bool executable, bool writable)
: next(nullptr),
beg(beg),
end(end),
executable(executable),
readable(readable) {}
writable(writable) {}
};
const IntrusiveList<AddressRange> &ranges() const { return ranges_; }

View File

@ -34,8 +34,6 @@
// COMMON_INTERCEPTOR_MEMSET_IMPL
// COMMON_INTERCEPTOR_MEMMOVE_IMPL
// COMMON_INTERCEPTOR_MEMCPY_IMPL
// COMMON_INTERCEPTOR_COPY_STRING
// COMMON_INTERCEPTOR_STRNDUP_IMPL
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
@ -219,25 +217,6 @@ bool PlatformHasDifferentMemcpyAndMemmove();
}
#endif
#ifndef COMMON_INTERCEPTOR_COPY_STRING
#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
#endif
#ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL
#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \
COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \
uptr from_length = internal_strnlen(s, size); \
uptr copy_length = Min(size, from_length); \
char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \
if (common_flags()->intercept_strndup) { \
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, copy_length + 1); \
} \
COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
internal_memcpy(new_mem, s, copy_length); \
new_mem[copy_length] = '\0'; \
return new_mem;
#endif
struct FileMetadata {
// For open_memstream().
char **addr;
@ -321,26 +300,6 @@ INTERCEPTOR(SIZE_T, strnlen, const char *s, SIZE_T maxlen) {
#define INIT_STRNLEN
#endif
#if SANITIZER_INTERCEPT_STRNDUP
INTERCEPTOR(char*, strndup, const char *s, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
}
#define INIT_STRNDUP COMMON_INTERCEPT_FUNCTION(strndup)
#else
#define INIT_STRNDUP
#endif // SANITIZER_INTERCEPT_STRNDUP
#if SANITIZER_INTERCEPT___STRNDUP
INTERCEPTOR(char*, __strndup, const char *s, uptr size) {
void *ctx;
COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
}
#define INIT___STRNDUP COMMON_INTERCEPT_FUNCTION(__strndup)
#else
#define INIT___STRNDUP
#endif // SANITIZER_INTERCEPT___STRNDUP
#if SANITIZER_INTERCEPT_TEXTDOMAIN
INTERCEPTOR(char*, textdomain, const char *domainname) {
void *ctx;
@ -6204,8 +6163,6 @@ static void InitializeCommonInterceptors() {
INIT_TEXTDOMAIN;
INIT_STRLEN;
INIT_STRNLEN;
INIT_STRNDUP;
INIT___STRNDUP;
INIT_STRCMP;
INIT_STRNCMP;
INIT_STRCASECMP;

View File

@ -34,24 +34,40 @@ class FlagHandler : public FlagHandlerBase {
bool Parse(const char *value) final;
};
template <>
inline bool FlagHandler<bool>::Parse(const char *value) {
inline bool ParseBool(const char *value, bool *b) {
if (internal_strcmp(value, "0") == 0 ||
internal_strcmp(value, "no") == 0 ||
internal_strcmp(value, "false") == 0) {
*t_ = false;
*b = false;
return true;
}
if (internal_strcmp(value, "1") == 0 ||
internal_strcmp(value, "yes") == 0 ||
internal_strcmp(value, "true") == 0) {
*t_ = true;
*b = true;
return true;
}
return false;
}
template <>
inline bool FlagHandler<bool>::Parse(const char *value) {
if (ParseBool(value, t_)) return true;
Printf("ERROR: Invalid value for bool option: '%s'\n", value);
return false;
}
template <>
inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
bool b;
if (ParseBool(value, &b)) {
*t_ = b ? kHandleSignalYes : kHandleSignalNo;
return true;
}
Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
return false;
}
template <>
inline bool FlagHandler<const char *>::Parse(const char *value) {
*t_ = internal_strdup(value);

View File

@ -18,6 +18,11 @@
namespace __sanitizer {
enum HandleSignalMode {
kHandleSignalNo,
kHandleSignalYes,
};
struct CommonFlags {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "sanitizer_flags.inc"

View File

@ -78,16 +78,20 @@ COMMON_FLAG(int, print_module_map, 0,
"OS X only. 0 = don't print, 1 = print only once before process "
"exits, 2 = print after each report.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
COMMON_FLAG(bool, handle_segv, true,
"If set, registers the tool's custom SIGSEGV handler.")
COMMON_FLAG(bool, handle_sigbus, true,
"If set, registers the tool's custom SIGBUS handler.")
COMMON_FLAG(bool, handle_abort, false,
"If set, registers the tool's custom SIGABRT handler.")
COMMON_FLAG(bool, handle_sigill, false,
"If set, registers the tool's custom SIGILL handler.")
COMMON_FLAG(bool, handle_sigfpe, true,
"If set, registers the tool's custom SIGFPE handler.")
#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \
"Controls custom tool's " #signal " handler (0 - do not registers the " \
"handler, 1 - register the handler). "
COMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV))
COMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGBUS))
COMMON_FLAG(HandleSignalMode, handle_abort, kHandleSignalNo,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGABRT))
COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGILL))
COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))
#undef COMMON_FLAG_HANDLE_SIGNAL_HELP
COMMON_FLAG(bool, allow_user_segv_handler, false,
"If set, allows user to register a SEGV handler even if the tool "
"registers one.")
@ -195,9 +199,6 @@ COMMON_FLAG(bool, intercept_strpbrk, true,
COMMON_FLAG(bool, intercept_strlen, true,
"If set, uses custom wrappers for strlen and strnlen functions "
"to find more errors.")
COMMON_FLAG(bool, intercept_strndup, true,
"If set, uses custom wrappers for strndup functions "
"to find more errors.")
COMMON_FLAG(bool, intercept_strchr, true,
"If set, uses custom wrappers for strchr, strchrnul, and strrchr "
"functions to find more errors.")

View File

@ -1395,15 +1395,19 @@ AndroidApiLevel AndroidGetApiLevel() {
#endif
bool IsHandledDeadlySignal(int signum) {
if (common_flags()->handle_abort && signum == SIGABRT)
return true;
if (common_flags()->handle_sigill && signum == SIGILL)
return true;
if (common_flags()->handle_sigfpe && signum == SIGFPE)
return true;
if (common_flags()->handle_segv && signum == SIGSEGV)
return true;
return common_flags()->handle_sigbus && signum == SIGBUS;
switch (signum) {
case SIGABRT:
return common_flags()->handle_abort;
case SIGILL:
return common_flags()->handle_sigill;
case SIGFPE:
return common_flags()->handle_sigfpe;
case SIGSEGV:
return common_flags()->handle_segv;
case SIGBUS:
return common_flags()->handle_sigbus;
}
return false;
}
#if !SANITIZER_GO

View File

@ -447,9 +447,9 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X;
bool readable = phdr->p_flags & PF_R;
bool writable = phdr->p_flags & PF_W;
cur_module.addAddressRange(cur_beg, cur_end, executable,
readable);
writable);
}
}
data->modules->push_back(cur_module);

View File

@ -394,18 +394,22 @@ void ListOfModules::init() {
}
bool IsHandledDeadlySignal(int signum) {
// Handling fatal signals on watchOS and tvOS devices is disallowed.
if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
// Handling fatal signals on watchOS and tvOS devices is disallowed.
return false;
if (common_flags()->handle_abort && signum == SIGABRT)
return true;
if (common_flags()->handle_sigill && signum == SIGILL)
return true;
if (common_flags()->handle_sigfpe && signum == SIGFPE)
return true;
if (common_flags()->handle_segv && signum == SIGSEGV)
return true;
return common_flags()->handle_sigbus && signum == SIGBUS;
switch (signum) {
case SIGABRT:
return common_flags()->handle_abort;
case SIGILL:
return common_flags()->handle_sigill;
case SIGFPE:
return common_flags()->handle_sigfpe;
case SIGSEGV:
return common_flags()->handle_segv;
case SIGBUS:
return common_flags()->handle_sigbus;
}
return false;
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;

View File

@ -25,12 +25,6 @@
# define SI_NOT_WINDOWS 0
#endif
#if SANITIZER_POSIX
# define SI_POSIX 1
#else
# define SI_POSIX 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define SI_LINUX_NOT_ANDROID 1
#else
@ -75,12 +69,6 @@
# define SI_UNIX_NOT_MAC 0
#endif
#if SANITIZER_LINUX && !SANITIZER_FREEBSD
# define SI_LINUX_NOT_FREEBSD 1
# else
# define SI_LINUX_NOT_FREEBSD 0
#endif
#define SANITIZER_INTERCEPT_STRLEN 1
#define SANITIZER_INTERCEPT_STRNLEN SI_NOT_MAC
#define SANITIZER_INTERCEPT_STRCMP 1
@ -98,8 +86,6 @@
#define SANITIZER_INTERCEPT_MEMMOVE 1
#define SANITIZER_INTERCEPT_MEMCPY 1
#define SANITIZER_INTERCEPT_MEMCMP 1
#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX
#define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD
#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
# define SI_MAC_DEPLOYMENT_BELOW_10_7 1

View File

@ -142,7 +142,7 @@ void MemoryMappingLayout::DumpListOfModules(
LoadedModule cur_module;
cur_module.set(cur_name, base_address);
cur_module.addAddressRange(cur_beg, cur_end, prot & kProtectionExecute,
prot & kProtectionRead);
prot & kProtectionWrite);
modules->push_back(cur_module);
}
}

View File

@ -336,7 +336,7 @@ void MemoryMappingLayout::DumpListOfModules(
current_instrumented_);
}
cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute,
prot & kProtectionRead);
prot & kProtectionWrite);
}
}

View File

@ -554,7 +554,7 @@ void ListOfModules::init() {
cur_module.set(module_name, adjusted_base);
// We add the whole module as one single address range.
cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
/*readable*/ true);
/*writable*/ true);
modules_.push_back(cur_module);
}
UnmapOrDie(hmodules, modules_buffer_size);

View File

@ -68,7 +68,7 @@ void initFlags() {
// Sanity checks and default settings for the Quarantine parameters.
if (f->QuarantineSizeMb < 0) {
const int DefaultQuarantineSizeMb = FIRST_32_SECOND_64(16, 64);
const int DefaultQuarantineSizeMb = FIRST_32_SECOND_64(4, 16);
f->QuarantineSizeMb = DefaultQuarantineSizeMb;
}
// We enforce an upper limit for the quarantine size of 4Gb.
@ -77,7 +77,7 @@ void initFlags() {
}
if (f->ThreadLocalQuarantineSizeKb < 0) {
const int DefaultThreadLocalQuarantineSizeKb =
FIRST_32_SECOND_64(256, 1024);
FIRST_32_SECOND_64(64, 256);
f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
}
// And an upper limit of 128Mb for the thread quarantine cache.

View File

@ -119,10 +119,15 @@ int __xray_set_customevent_handler(void (*entry)(void *, size_t))
return 0;
}
int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
return __xray_set_handler(nullptr);
}
int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
return __xray_set_customevent_handler(nullptr);
}
__sanitizer::atomic_uint8_t XRayPatching{0};
using namespace __xray;
@ -326,7 +331,14 @@ uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
__sanitizer::SpinMutexLock Guard(&XRayInstrMapMutex);
if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)
return 0;
return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Address;
return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Address
// On PPC, function entries are always aligned to 16 bytes. The beginning of a
// sled might be a local entry, which is always +8 based on the global entry.
// Always return the global entry.
#ifdef __PPC__
& ~0xf
#endif
;
}
size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {

View File

@ -182,8 +182,9 @@ private:
/// provides a more convenient form of divide for internal use since KnuthDiv
/// has specific constraints on its inputs. If those constraints are not met
/// then it provides a simpler form of divide.
static void divide(const APInt &LHS, unsigned lhsWords, const APInt &RHS,
unsigned rhsWords, APInt *Quotient, APInt *Remainder);
static void divide(const WordType *LHS, unsigned lhsWords,
const WordType *RHS, unsigned rhsWords, WordType *Quotient,
WordType *Remainder);
/// out-of-line slow case for inline constructor
void initSlowCase(uint64_t val, bool isSigned);
@ -1016,11 +1017,13 @@ public:
///
/// \returns a new APInt value containing the division result
APInt udiv(const APInt &RHS) const;
APInt udiv(uint64_t RHS) const;
/// \brief Signed division function for APInt.
///
/// Signed divide this APInt by APInt RHS.
APInt sdiv(const APInt &RHS) const;
APInt sdiv(int64_t RHS) const;
/// \brief Unsigned remainder operation.
///
@ -1032,11 +1035,13 @@ public:
///
/// \returns a new APInt value containing the remainder result
APInt urem(const APInt &RHS) const;
uint64_t urem(uint64_t RHS) const;
/// \brief Function for signed remainder operation.
///
/// Signed remainder operation on APInt.
APInt srem(const APInt &RHS) const;
int64_t srem(int64_t RHS) const;
/// \brief Dual division/remainder interface.
///
@ -1047,9 +1052,13 @@ public:
/// udivrem(X, Y, X, Y), for example.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
APInt &Remainder);
static void udivrem(const APInt &LHS, uint64_t RHS, APInt &Quotient,
uint64_t &Remainder);
static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
APInt &Remainder);
static void sdivrem(const APInt &LHS, int64_t RHS, APInt &Quotient,
int64_t &Remainder);
// Operations that return overflow indicators.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
@ -2015,7 +2024,7 @@ inline APInt operator-(APInt a, const APInt &b) {
}
inline APInt operator-(const APInt &a, APInt &&b) {
b = -std::move(b);
b.negate();
b += a;
return std::move(b);
}
@ -2026,7 +2035,7 @@ inline APInt operator-(APInt a, uint64_t RHS) {
}
inline APInt operator-(uint64_t LHS, APInt b) {
b = -std::move(b);
b.negate();
b += LHS;
return b;
}

View File

@ -365,6 +365,8 @@ protected:
public:
using iterator = SmallPtrSetIterator<PtrType>;
using const_iterator = SmallPtrSetIterator<PtrType>;
using key_type = ConstPtrType;
using value_type = PtrType;
SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;

View File

@ -101,6 +101,16 @@ public:
return init();
}
void updateMax(unsigned V) {
unsigned PrevMax = Value.load(std::memory_order_relaxed);
// Keep trying to update max until we succeed or another thread produces
// a bigger max than us.
while (V > PrevMax && !Value.compare_exchange_weak(
PrevMax, V, std::memory_order_relaxed)) {
}
init();
}
#else // Statistics are disabled in release builds.
const Statistic &operator=(unsigned Val) {
@ -131,6 +141,8 @@ public:
return *this;
}
void updateMax(unsigned V) {}
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
protected:

View File

@ -252,6 +252,10 @@ public:
ObjectFormat == Other.ObjectFormat;
}
bool operator!=(const Triple &Other) const {
return !(*this == Other);
}
/// @}
/// @name Normalization
/// @{
@ -722,6 +726,12 @@ public:
/// \returns true if the triple is little endian, false otherwise.
bool isLittleEndian() const;
/// Test whether target triples are compatible.
bool isCompatibleWith(const Triple &Other) const;
/// Merge target triples.
std::string merge(const Triple &Other) const;
/// @}
/// @name Static helpers for IDs.
/// @{

View File

@ -84,6 +84,7 @@
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DerivedUser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/OperandTraits.h"
@ -127,7 +128,7 @@ using const_memoryaccess_def_iterator =
// \brief The base for all memory accesses. All memory accesses in a block are
// linked together using an intrusive list.
class MemoryAccess
: public User,
: public DerivedUser,
public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {
public:
@ -145,15 +146,14 @@ public:
MemoryAccess(const MemoryAccess &) = delete;
MemoryAccess &operator=(const MemoryAccess &) = delete;
~MemoryAccess() override;
void *operator new(size_t, unsigned) = delete;
void *operator new(size_t) = delete;
BasicBlock *getBlock() const { return Block; }
virtual void print(raw_ostream &OS) const = 0;
virtual void dump() const;
void print(raw_ostream &OS) const;
void dump() const;
/// \brief The user iterators for a memory access
typedef user_iterator iterator;
@ -207,11 +207,12 @@ protected:
/// \brief Used for debugging and tracking things about MemoryAccesses.
/// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
virtual unsigned getID() const = 0;
inline unsigned getID() const;
MemoryAccess(LLVMContext &C, unsigned Vty, BasicBlock *BB,
unsigned NumOperands)
: User(Type::getVoidTy(C), Vty, nullptr, NumOperands), Block(BB) {}
MemoryAccess(LLVMContext &C, unsigned Vty, DeleteValueTy DeleteValue,
BasicBlock *BB, unsigned NumOperands)
: DerivedUser(Type::getVoidTy(C), Vty, nullptr, NumOperands, DeleteValue),
Block(BB) {}
private:
BasicBlock *Block;
@ -248,21 +249,21 @@ public:
// Sadly, these have to be public because they are needed in some of the
// iterators.
virtual bool isOptimized() const = 0;
virtual MemoryAccess *getOptimized() const = 0;
virtual void setOptimized(MemoryAccess *) = 0;
inline bool isOptimized() const;
inline MemoryAccess *getOptimized() const;
inline void setOptimized(MemoryAccess *);
/// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
/// be rewalked by the walker if necessary.
/// This really should only be called by tests.
virtual void resetOptimized() = 0;
inline void resetOptimized();
protected:
friend class MemorySSA;
friend class MemorySSAUpdater;
MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
Instruction *MI, BasicBlock *BB)
: MemoryAccess(C, Vty, BB, 1), MemoryInst(MI) {
DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB)
: MemoryAccess(C, Vty, DeleteValue, BB, 1), MemoryInst(MI) {
setDefiningAccess(DMA);
}
void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
@ -292,7 +293,8 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
: MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB), OptimizedID(0) {}
: MemoryUseOrDef(C, DMA, MemoryUseVal, deleteMe, MI, BB),
OptimizedID(0) {}
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
@ -302,32 +304,30 @@ public:
return MA->getValueID() == MemoryUseVal;
}
void print(raw_ostream &OS) const override;
void print(raw_ostream &OS) const;
virtual void setOptimized(MemoryAccess *DMA) override {
void setOptimized(MemoryAccess *DMA) {
OptimizedID = DMA->getID();
setOperand(0, DMA);
}
virtual bool isOptimized() const override {
bool isOptimized() const {
return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
}
virtual MemoryAccess *getOptimized() const override {
MemoryAccess *getOptimized() const {
return getDefiningAccess();
}
virtual void resetOptimized() override {
void resetOptimized() {
OptimizedID = INVALID_MEMORYACCESS_ID;
}
protected:
friend class MemorySSA;
unsigned getID() const override {
llvm_unreachable("MemoryUses do not have IDs");
}
private:
static void deleteMe(DerivedUser *Self);
unsigned int OptimizedID;
};
@ -350,8 +350,8 @@ public:
MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
unsigned Ver)
: MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver),
Optimized(nullptr), OptimizedID(INVALID_MEMORYACCESS_ID) {}
: MemoryUseOrDef(C, DMA, MemoryDefVal, deleteMe, MI, BB),
ID(Ver), Optimized(nullptr), OptimizedID(INVALID_MEMORYACCESS_ID) {}
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
@ -361,27 +361,28 @@ public:
return MA->getValueID() == MemoryDefVal;
}
virtual void setOptimized(MemoryAccess *MA) override {
void setOptimized(MemoryAccess *MA) {
Optimized = MA;
OptimizedID = getDefiningAccess()->getID();
}
virtual MemoryAccess *getOptimized() const override { return Optimized; }
virtual bool isOptimized() const override {
MemoryAccess *getOptimized() const { return Optimized; }
bool isOptimized() const {
return getOptimized() && getDefiningAccess() &&
OptimizedID == getDefiningAccess()->getID();
}
virtual void resetOptimized() override {
void resetOptimized() {
OptimizedID = INVALID_MEMORYACCESS_ID;
}
void print(raw_ostream &OS) const override;
void print(raw_ostream &OS) const;
protected:
friend class MemorySSA;
unsigned getID() const override { return ID; }
unsigned getID() const { return ID; }
private:
static void deleteMe(DerivedUser *Self);
const unsigned ID;
MemoryAccess *Optimized;
unsigned int OptimizedID;
@ -432,7 +433,8 @@ public:
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
: MemoryAccess(C, MemoryPhiVal, BB, 0), ID(Ver), ReservedSpace(NumPreds) {
: MemoryAccess(C, MemoryPhiVal, deleteMe, BB, 0), ID(Ver),
ReservedSpace(NumPreds) {
allocHungoffUses(ReservedSpace);
}
@ -534,7 +536,9 @@ public:
return V->getValueID() == MemoryPhiVal;
}
void print(raw_ostream &OS) const override;
void print(raw_ostream &OS) const;
unsigned getID() const { return ID; }
protected:
friend class MemorySSA;
@ -546,8 +550,6 @@ protected:
User::allocHungoffUses(N, /* IsPhi */ true);
}
unsigned getID() const final { return ID; }
private:
// For debugging only
const unsigned ID;
@ -561,8 +563,45 @@ private:
ReservedSpace = std::max(E + E / 2, 2u);
growHungoffUses(ReservedSpace, /* IsPhi */ true);
}
static void deleteMe(DerivedUser *Self);
};
inline unsigned MemoryAccess::getID() const {
assert((isa<MemoryDef>(this) || isa<MemoryPhi>(this)) &&
"only memory defs and phis have ids");
if (const auto *MD = dyn_cast<MemoryDef>(this))
return MD->getID();
return cast<MemoryPhi>(this)->getID();
}
inline bool MemoryUseOrDef::isOptimized() const {
if (const auto *MD = dyn_cast<MemoryDef>(this))
return MD->isOptimized();
return cast<MemoryUse>(this)->isOptimized();
}
inline MemoryAccess *MemoryUseOrDef::getOptimized() const {
if (const auto *MD = dyn_cast<MemoryDef>(this))
return MD->getOptimized();
return cast<MemoryUse>(this)->getOptimized();
}
inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {
if (auto *MD = dyn_cast<MemoryDef>(this))
MD->setOptimized(MA);
else
cast<MemoryUse>(this)->setOptimized(MA);
}
inline void MemoryUseOrDef::resetOptimized() {
if (auto *MD = dyn_cast<MemoryDef>(this))
MD->resetOptimized();
else
cast<MemoryUse>(this)->resetOptimized();
}
template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)

View File

@ -656,10 +656,12 @@ private:
/// Test whether this BackedgeTakenInfo contains complete information.
bool hasFullInfo() const { return isComplete(); }
/// Return an expression indicating the exact backedge-taken count of the
/// loop if it is known or SCEVCouldNotCompute otherwise. This is the
/// number of times the loop header can be guaranteed to execute, minus
/// one.
/// Return an expression indicating the exact *backedge-taken*
/// count of the loop if it is known or SCEVCouldNotCompute
/// otherwise. If execution makes it to the backedge on every
/// iteration (i.e. there are no abnormal exists like exception
/// throws and thread exits) then this is the number of times the
/// loop header will execute minus one.
///
/// If the SCEV predicate associated with the answer can be different
/// from AlwaysTrue, we must add a (non null) Predicates argument.
@ -1398,11 +1400,11 @@ public:
const SCEV *getExitCount(const Loop *L, BasicBlock *ExitingBlock);
/// If the specified loop has a predictable backedge-taken count, return it,
/// otherwise return a SCEVCouldNotCompute object. The backedge-taken count
/// is the number of times the loop header will be branched to from within
/// the loop. This is one less than the trip count of the loop, since it
/// doesn't count the first iteration, when the header is branched to from
/// outside the loop.
/// otherwise return a SCEVCouldNotCompute object. The backedge-taken count is
/// the number of times the loop header will be branched to from within the
/// loop, assuming there are no abnormal exists like exception throws. This is
/// one less than the trip count of the loop, since it doesn't count the first
/// iteration, when the header is branched to from outside the loop.
///
/// Note that it is not valid to call this method on a loop without a
/// loop-invariant backedge-taken count (see
@ -1417,8 +1419,10 @@ public:
const SCEV *getPredicatedBackedgeTakenCount(const Loop *L,
SCEVUnionPredicate &Predicates);
/// Similar to getBackedgeTakenCount, except return the least SCEV value
/// that is known never to be less than the actual backedge taken count.
/// When successful, this returns a SCEVConstant that is greater than or equal
/// to (i.e. a "conservative over-approximation") of the value returend by
/// getBackedgeTakenCount. If such a value cannot be computed, it returns the
/// SCEVCouldNotCompute object.
const SCEV *getMaxBackedgeTakenCount(const Loop *L);
/// Return true if the backedge taken count is either the value returned by

View File

@ -191,6 +191,14 @@ public:
void setShouldSignExtI32Param(bool Val) {
ShouldSignExtI32Param = Val;
}
/// Returns the size of the wchar_t type in bytes.
unsigned getWCharSize(const Module &M) const;
/// Returns size of the default wchar_t type on target \p T. This is mostly
/// intended to verify that the size in the frontend matches LLVM. All other
/// queries should use getWCharSize() instead.
static unsigned getTargetWCharSize(const Triple &T);
};
/// Provides information about what library functions are available for
@ -307,6 +315,11 @@ public:
return Attribute::None;
}
/// \copydoc TargetLibraryInfoImpl::getWCharSize()
unsigned getWCharSize(const Module &M) const {
return Impl->getWCharSize(M);
}
/// Handle invalidation from the pass manager.
///
/// If we try to invalidate this info, just return false. It cannot become

View File

@ -218,9 +218,38 @@ template <typename T> class ArrayRef;
DL);
}
/// Returns true if the GEP is based on a pointer to a string (array of i8),
/// and is indexing into this string.
bool isGEPBasedOnPointerToString(const GEPOperator *GEP);
/// Returns true if the GEP is based on a pointer to a string (array of
// \p CharSize integers) and is indexing into this string.
bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
unsigned CharSize = 8);
/// Represents offset+length into a ConstantDataArray.
struct ConstantDataArraySlice {
/// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
/// initializer, it just doesn't fit the ConstantDataArray interface).
const ConstantDataArray *Array;
/// Slice starts at this Offset.
uint64_t Offset;
/// Length of the slice.
uint64_t Length;
/// Moves the Offset and adjusts Length accordingly.
void move(uint64_t Delta) {
assert(Delta < Length);
Offset += Delta;
Length -= Delta;
}
/// Convenience accessor for elements in the slice.
uint64_t operator[](unsigned I) const {
return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset);
}
};
/// Returns true if the value \p V is a pointer into a ContantDataArray.
/// If successfull \p Index will point to a ConstantDataArray info object
/// with an apropriate offset.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
unsigned ElementSize, uint64_t Offset = 0);
/// This function computes the length of a null-terminated C string pointed to
/// by V. If successful, it returns true and returns the string in Str. If
@ -233,7 +262,7 @@ template <typename T> class ArrayRef;
/// If we can compute the length of the string pointed to by the specified
/// pointer, return 'len+1'. If we can't, return 0.
uint64_t GetStringLength(const Value *V);
uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
/// This method strips off any GEP address adjustments and pointer casts from
/// the specified value, returning the original object being addressed. Note

View File

@ -189,7 +189,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
void pruneValue(LiveRange &LR, SlotIndex Kill,
SmallVectorImpl<SlotIndex> *EndPoints);
/// This function should be used. Its intend is to tell you that
/// This function should not be used. Its intend is to tell you that
/// you are doing something wrong if you call pruveValue directly on a
/// LiveInterval. Indeed, you are supposed to call pruneValue on the main
/// LiveRange and all the LiveRange of the subranges if any.

View File

@ -56,117 +56,119 @@ class MVT {
FIRST_FP_VALUETYPE = f16,
LAST_FP_VALUETYPE = ppcf128,
v2i1 = 14, // 2 x i1
v4i1 = 15, // 4 x i1
v8i1 = 16, // 8 x i1
v16i1 = 17, // 16 x i1
v32i1 = 18, // 32 x i1
v64i1 = 19, // 64 x i1
v512i1 = 20, // 512 x i1
v1024i1 = 21, // 1024 x i1
v1i1 = 14, // 1 x i1
v2i1 = 15, // 2 x i1
v4i1 = 16, // 4 x i1
v8i1 = 17, // 8 x i1
v16i1 = 18, // 16 x i1
v32i1 = 19, // 32 x i1
v64i1 = 20, // 64 x i1
v512i1 = 21, // 512 x i1
v1024i1 = 22, // 1024 x i1
v1i8 = 22, // 1 x i8
v2i8 = 23, // 2 x i8
v4i8 = 24, // 4 x i8
v8i8 = 25, // 8 x i8
v16i8 = 26, // 16 x i8
v32i8 = 27, // 32 x i8
v64i8 = 28, // 64 x i8
v128i8 = 29, //128 x i8
v256i8 = 30, //256 x i8
v1i8 = 23, // 1 x i8
v2i8 = 24, // 2 x i8
v4i8 = 25, // 4 x i8
v8i8 = 26, // 8 x i8
v16i8 = 27, // 16 x i8
v32i8 = 28, // 32 x i8
v64i8 = 29, // 64 x i8
v128i8 = 30, //128 x i8
v256i8 = 31, //256 x i8
v1i16 = 31, // 1 x i16
v2i16 = 32, // 2 x i16
v4i16 = 33, // 4 x i16
v8i16 = 34, // 8 x i16
v16i16 = 35, // 16 x i16
v32i16 = 36, // 32 x i16
v64i16 = 37, // 64 x i16
v128i16 = 38, //128 x i16
v1i16 = 32, // 1 x i16
v2i16 = 33, // 2 x i16
v4i16 = 34, // 4 x i16
v8i16 = 35, // 8 x i16
v16i16 = 36, // 16 x i16
v32i16 = 37, // 32 x i16
v64i16 = 38, // 64 x i16
v128i16 = 39, //128 x i16
v1i32 = 39, // 1 x i32
v2i32 = 40, // 2 x i32
v4i32 = 41, // 4 x i32
v8i32 = 42, // 8 x i32
v16i32 = 43, // 16 x i32
v32i32 = 44, // 32 x i32
v64i32 = 45, // 64 x i32
v1i32 = 40, // 1 x i32
v2i32 = 41, // 2 x i32
v4i32 = 42, // 4 x i32
v8i32 = 43, // 8 x i32
v16i32 = 44, // 16 x i32
v32i32 = 45, // 32 x i32
v64i32 = 46, // 64 x i32
v1i64 = 46, // 1 x i64
v2i64 = 47, // 2 x i64
v4i64 = 48, // 4 x i64
v8i64 = 49, // 8 x i64
v16i64 = 50, // 16 x i64
v32i64 = 51, // 32 x i64
v1i64 = 47, // 1 x i64
v2i64 = 48, // 2 x i64
v4i64 = 49, // 4 x i64
v8i64 = 50, // 8 x i64
v16i64 = 51, // 16 x i64
v32i64 = 52, // 32 x i64
v1i128 = 52, // 1 x i128
v1i128 = 53, // 1 x i128
// Scalable integer types
nxv2i1 = 53, // n x 2 x i1
nxv4i1 = 54, // n x 4 x i1
nxv8i1 = 55, // n x 8 x i1
nxv16i1 = 56, // n x 16 x i1
nxv32i1 = 57, // n x 32 x i1
nxv1i1 = 54, // n x 1 x i1
nxv2i1 = 55, // n x 2 x i1
nxv4i1 = 56, // n x 4 x i1
nxv8i1 = 57, // n x 8 x i1
nxv16i1 = 58, // n x 16 x i1
nxv32i1 = 59, // n x 32 x i1
nxv1i8 = 58, // n x 1 x i8
nxv2i8 = 59, // n x 2 x i8
nxv4i8 = 60, // n x 4 x i8
nxv8i8 = 61, // n x 8 x i8
nxv16i8 = 62, // n x 16 x i8
nxv32i8 = 63, // n x 32 x i8
nxv1i8 = 60, // n x 1 x i8
nxv2i8 = 61, // n x 2 x i8
nxv4i8 = 62, // n x 4 x i8
nxv8i8 = 63, // n x 8 x i8
nxv16i8 = 64, // n x 16 x i8
nxv32i8 = 65, // n x 32 x i8
nxv1i16 = 64, // n x 1 x i16
nxv2i16 = 65, // n x 2 x i16
nxv4i16 = 66, // n x 4 x i16
nxv8i16 = 67, // n x 8 x i16
nxv16i16 = 68, // n x 16 x i16
nxv32i16 = 69, // n x 32 x i16
nxv1i16 = 66, // n x 1 x i16
nxv2i16 = 67, // n x 2 x i16
nxv4i16 = 68, // n x 4 x i16
nxv8i16 = 69, // n x 8 x i16
nxv16i16 = 70, // n x 16 x i16
nxv32i16 = 71, // n x 32 x i16
nxv1i32 = 70, // n x 1 x i32
nxv2i32 = 71, // n x 2 x i32
nxv4i32 = 72, // n x 4 x i32
nxv8i32 = 73, // n x 8 x i32
nxv16i32 = 74, // n x 16 x i32
nxv32i32 = 75, // n x 32 x i32
nxv1i32 = 72, // n x 1 x i32
nxv2i32 = 73, // n x 2 x i32
nxv4i32 = 74, // n x 4 x i32
nxv8i32 = 75, // n x 8 x i32
nxv16i32 = 76, // n x 16 x i32
nxv32i32 = 77, // n x 32 x i32
nxv1i64 = 76, // n x 1 x i64
nxv2i64 = 77, // n x 2 x i64
nxv4i64 = 78, // n x 4 x i64
nxv8i64 = 79, // n x 8 x i64
nxv16i64 = 80, // n x 16 x i64
nxv32i64 = 81, // n x 32 x i64
nxv1i64 = 78, // n x 1 x i64
nxv2i64 = 79, // n x 2 x i64
nxv4i64 = 80, // n x 4 x i64
nxv8i64 = 81, // n x 8 x i64
nxv16i64 = 82, // n x 16 x i64
nxv32i64 = 83, // n x 32 x i64
FIRST_INTEGER_VECTOR_VALUETYPE = v2i1,
FIRST_INTEGER_VECTOR_VALUETYPE = v1i1,
LAST_INTEGER_VECTOR_VALUETYPE = nxv32i64,
FIRST_INTEGER_SCALABLE_VALUETYPE = nxv2i1,
FIRST_INTEGER_SCALABLE_VALUETYPE = nxv1i1,
LAST_INTEGER_SCALABLE_VALUETYPE = nxv32i64,
v2f16 = 82, // 2 x f16
v4f16 = 83, // 4 x f16
v8f16 = 84, // 8 x f16
v1f32 = 85, // 1 x f32
v2f32 = 86, // 2 x f32
v4f32 = 87, // 4 x f32
v8f32 = 88, // 8 x f32
v16f32 = 89, // 16 x f32
v1f64 = 90, // 1 x f64
v2f64 = 91, // 2 x f64
v4f64 = 92, // 4 x f64
v8f64 = 93, // 8 x f64
v2f16 = 84, // 2 x f16
v4f16 = 85, // 4 x f16
v8f16 = 86, // 8 x f16
v1f32 = 87, // 1 x f32
v2f32 = 88, // 2 x f32
v4f32 = 89, // 4 x f32
v8f32 = 90, // 8 x f32
v16f32 = 91, // 16 x f32
v1f64 = 92, // 1 x f64
v2f64 = 93, // 2 x f64
v4f64 = 94, // 4 x f64
v8f64 = 95, // 8 x f64
nxv2f16 = 94, // n x 2 x f16
nxv4f16 = 95, // n x 4 x f16
nxv8f16 = 96, // n x 8 x f16
nxv1f32 = 97, // n x 1 x f32
nxv2f32 = 98, // n x 2 x f32
nxv4f32 = 99, // n x 4 x f32
nxv8f32 = 100, // n x 8 x f32
nxv16f32 = 101, // n x 16 x f32
nxv1f64 = 102, // n x 1 x f64
nxv2f64 = 103, // n x 2 x f64
nxv4f64 = 104, // n x 4 x f64
nxv8f64 = 105, // n x 8 x f64
nxv2f16 = 96, // n x 2 x f16
nxv4f16 = 97, // n x 4 x f16
nxv8f16 = 98, // n x 8 x f16
nxv1f32 = 99, // n x 1 x f32
nxv2f32 = 100, // n x 2 x f32
nxv4f32 = 101, // n x 4 x f32
nxv8f32 = 102, // n x 8 x f32
nxv16f32 = 103, // n x 16 x f32
nxv1f64 = 104, // n x 1 x f64
nxv2f64 = 105, // n x 2 x f64
nxv4f64 = 106, // n x 4 x f64
nxv8f64 = 107, // n x 8 x f64
FIRST_FP_VECTOR_VALUETYPE = v2f16,
LAST_FP_VECTOR_VALUETYPE = nxv8f64,
@ -174,21 +176,21 @@ class MVT {
FIRST_FP_SCALABLE_VALUETYPE = nxv2f16,
LAST_FP_SCALABLE_VALUETYPE = nxv8f64,
FIRST_VECTOR_VALUETYPE = v2i1,
FIRST_VECTOR_VALUETYPE = v1i1,
LAST_VECTOR_VALUETYPE = nxv8f64,
x86mmx = 106, // This is an X86 MMX value
x86mmx = 108, // This is an X86 MMX value
Glue = 107, // This glues nodes together during pre-RA sched
Glue = 109, // This glues nodes together during pre-RA sched
isVoid = 108, // This has no value
isVoid = 110, // This has no value
Untyped = 109, // This value takes a register, but has
Untyped = 111, // This value takes a register, but has
// unspecified type. The register class
// will be determined by the opcode.
FIRST_VALUETYPE = 1, // This is always the beginning of the list.
LAST_VALUETYPE = 110, // This always remains at the end of the list.
LAST_VALUETYPE = 112, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
@ -411,6 +413,7 @@ class MVT {
switch (SimpleTy) {
default:
llvm_unreachable("Not a vector MVT!");
case v1i1:
case v2i1:
case v4i1:
case v8i1:
@ -419,6 +422,7 @@ class MVT {
case v64i1:
case v512i1:
case v1024i1:
case nxv1i1:
case nxv2i1:
case nxv4i1:
case nxv8i1:
@ -589,6 +593,7 @@ class MVT {
case nxv2f16:
case nxv2f32:
case nxv2f64: return 2;
case v1i1:
case v1i8:
case v1i16:
case v1i32:
@ -596,6 +601,7 @@ class MVT {
case v1i128:
case v1f32:
case v1f64:
case nxv1i1:
case nxv1i8:
case nxv1i16:
case nxv1i32:
@ -628,7 +634,9 @@ class MVT {
"in codegen and has no size");
case Metadata:
llvm_unreachable("Value type is metadata.");
case i1 : return 1;
case i1:
case v1i1:
case nxv1i1: return 1;
case v2i1:
case nxv2i1: return 2;
case v4i1:
@ -814,6 +822,7 @@ class MVT {
default:
break;
case MVT::i1:
if (NumElements == 1) return MVT::v1i1;
if (NumElements == 2) return MVT::v2i1;
if (NumElements == 4) return MVT::v4i1;
if (NumElements == 8) return MVT::v8i1;
@ -891,6 +900,7 @@ class MVT {
default:
break;
case MVT::i1:
if (NumElements == 1) return MVT::nxv1i1;
if (NumElements == 2) return MVT::nxv2i1;
if (NumElements == 4) return MVT::nxv4i1;
if (NumElements == 8) return MVT::nxv8i1;

View File

@ -33,7 +33,7 @@ class raw_ostream;
/// List of target independent CodeGen pass IDs.
namespace llvm {
FunctionPass *createAtomicExpandPass(const TargetMachine *TM);
FunctionPass *createAtomicExpandPass();
/// createUnreachableBlockEliminationPass - The LLVM code generator does not
/// work well with unreachable basic blocks (what live ranges make sense for a
@ -66,7 +66,7 @@ namespace llvm {
/// createCodeGenPreparePass - Transform the code to expose more pattern
/// matching during instruction selection.
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
FunctionPass *createCodeGenPreparePass();
/// createScalarizeMaskedMemIntrinPass - Replace masked load, store, gather
/// and scatter intrinsics with scalar code when target doesn't support them.
@ -133,10 +133,6 @@ namespace llvm {
// instruction and update the MachineFunctionInfo with that information.
extern char &ShrinkWrapID;
/// LiveRangeShrink pass. Move instruction close to its definition to shrink
/// the definition's live range.
extern char &LiveRangeShrinkID;
/// Greedy register allocator.
extern char &RAGreedyID;
@ -177,7 +173,7 @@ namespace llvm {
/// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
/// and eliminates abstract frame references.
extern char &PrologEpilogCodeInserterID;
MachineFunctionPass *createPrologEpilogInserterPass(const TargetMachine *TM);
MachineFunctionPass *createPrologEpilogInserterPass();
/// ExpandPostRAPseudos - This pass expands pseudo instructions after
/// register allocation.
@ -305,7 +301,7 @@ namespace llvm {
/// createStackProtectorPass - This pass adds stack protectors to functions.
///
FunctionPass *createStackProtectorPass(const TargetMachine *TM);
FunctionPass *createStackProtectorPass();
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
@ -314,11 +310,11 @@ namespace llvm {
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.
FunctionPass *createDwarfEHPass(const TargetMachine *TM);
FunctionPass *createDwarfEHPass();
/// createWinEHPass - Prepares personality functions used by MSVC on Windows,
/// in addition to the Itanium LSDA based personalities.
FunctionPass *createWinEHPass(const TargetMachine *TM);
FunctionPass *createWinEHPass();
/// createSjLjEHPreparePass - This pass adapts exception handling code to use
/// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
@ -362,12 +358,12 @@ namespace llvm {
/// InterleavedAccess Pass - This pass identifies and matches interleaved
/// memory accesses to target specific intrinsics.
///
FunctionPass *createInterleavedAccessPass(const TargetMachine *TM);
FunctionPass *createInterleavedAccessPass();
/// LowerEmuTLS - This pass generates __emutls_[vt].xyz variables for all
/// TLS variables for the emulated TLS model.
///
ModulePass *createLowerEmuTLSPass(const TargetMachine *TM);
ModulePass *createLowerEmuTLSPass();
/// This pass lowers the @llvm.load.relative intrinsic to instructions.
/// This is unsafe to do earlier because a pass may combine the constant
@ -384,7 +380,7 @@ namespace llvm {
/// This pass splits the stack into a safe stack and an unsafe stack to
/// protect against stack-based overflow vulnerabilities.
FunctionPass *createSafeStackPass(const TargetMachine *TM = nullptr);
FunctionPass *createSafeStackPass();
/// This pass detects subregister lanes in a virtual register that are used
/// independently of other lanes and splits them into separate virtual
@ -419,33 +415,4 @@ namespace llvm {
} // End llvm namespace
/// Target machine pass initializer for passes with dependencies. Use with
/// INITIALIZE_TM_PASS_END.
#define INITIALIZE_TM_PASS_BEGIN INITIALIZE_PASS_BEGIN
/// Target machine pass initializer for passes with dependencies. Use with
/// INITIALIZE_TM_PASS_BEGIN.
#define INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis) \
PassInfo *PI = new PassInfo( \
name, arg, &passName::ID, \
PassInfo::NormalCtor_t(callDefaultCtor<passName>), cfg, analysis, \
PassInfo::TargetMachineCtor_t(callTargetMachineCtor<passName>)); \
Registry.registerPass(*PI, true); \
return PI; \
} \
static llvm::once_flag Initialize##passName##PassFlag; \
void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
llvm::call_once(Initialize##passName##PassFlag, \
initialize##passName##PassOnce, std::ref(Registry)); \
}
/// This initializer registers TargetMachine constructor, so the pass being
/// initialized can use target dependent interfaces. Please do not move this
/// macro to be together with INITIALIZE_PASS, which is a complete target
/// independent initializer, and we don't want to make libScalarOpts depend
/// on libCodeGen.
#define INITIALIZE_TM_PASS(passName, arg, name, cfg, analysis) \
INITIALIZE_TM_PASS_BEGIN(passName, arg, name, cfg, analysis) \
INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis)
#endif

View File

@ -19,6 +19,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Triple.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Pass.h"
@ -55,7 +56,7 @@ private:
/// TLI - Keep a pointer of a TargetLowering to consult for determining
/// target type sizes.
const TargetLoweringBase *TLI = nullptr;
const Triple Trip;
Triple Trip;
Function *F;
Module *M;
@ -114,17 +115,12 @@ private:
public:
static char ID; // Pass identification, replacement for typeid.
StackProtector() : FunctionPass(ID) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}
StackProtector(const TargetMachine *TM)
: FunctionPass(ID), TM(TM), Trip(TM->getTargetTriple()),
SSPBufferSize(8) {
StackProtector() : FunctionPass(ID), SSPBufferSize(8) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetPassConfig>();
AU.addPreserved<DominatorTreeWrapperPass>();
}

View File

@ -33,115 +33,117 @@ def f80 : ValueType<80 , 11>; // 80-bit floating point value
def f128 : ValueType<128, 12>; // 128-bit floating point value
def ppcf128: ValueType<128, 13>; // PPC 128-bit floating point value
def v2i1 : ValueType<2 , 14>; // 2 x i1 vector value
def v4i1 : ValueType<4 , 15>; // 4 x i1 vector value
def v8i1 : ValueType<8 , 16>; // 8 x i1 vector value
def v16i1 : ValueType<16, 17>; // 16 x i1 vector value
def v32i1 : ValueType<32 , 18>; // 32 x i1 vector value
def v64i1 : ValueType<64 , 19>; // 64 x i1 vector value
def v512i1 : ValueType<512, 20>; // 512 x i1 vector value
def v1024i1: ValueType<1024,21>; //1024 x i1 vector value
def v1i1 : ValueType<1 , 14>; // 1 x i1 vector value
def v2i1 : ValueType<2 , 15>; // 2 x i1 vector value
def v4i1 : ValueType<4 , 16>; // 4 x i1 vector value
def v8i1 : ValueType<8 , 17>; // 8 x i1 vector value
def v16i1 : ValueType<16, 18>; // 16 x i1 vector value
def v32i1 : ValueType<32 , 19>; // 32 x i1 vector value
def v64i1 : ValueType<64 , 20>; // 64 x i1 vector value
def v512i1 : ValueType<512, 21>; // 512 x i1 vector value
def v1024i1: ValueType<1024,22>; //1024 x i1 vector value
def v1i8 : ValueType<8, 22>; // 1 x i8 vector value
def v2i8 : ValueType<16 , 23>; // 2 x i8 vector value
def v4i8 : ValueType<32 , 24>; // 4 x i8 vector value
def v8i8 : ValueType<64 , 25>; // 8 x i8 vector value
def v16i8 : ValueType<128, 26>; // 16 x i8 vector value
def v32i8 : ValueType<256, 27>; // 32 x i8 vector value
def v64i8 : ValueType<512, 28>; // 64 x i8 vector value
def v128i8 : ValueType<1024,29>; //128 x i8 vector value
def v256i8 : ValueType<2048,30>; //256 x i8 vector value
def v1i8 : ValueType<8, 23>; // 1 x i8 vector value
def v2i8 : ValueType<16 , 24>; // 2 x i8 vector value
def v4i8 : ValueType<32 , 25>; // 4 x i8 vector value
def v8i8 : ValueType<64 , 26>; // 8 x i8 vector value
def v16i8 : ValueType<128, 27>; // 16 x i8 vector value
def v32i8 : ValueType<256, 28>; // 32 x i8 vector value
def v64i8 : ValueType<512, 29>; // 64 x i8 vector value
def v128i8 : ValueType<1024,30>; //128 x i8 vector value
def v256i8 : ValueType<2048,31>; //256 x i8 vector value
def v1i16 : ValueType<16 , 31>; // 1 x i16 vector value
def v2i16 : ValueType<32 , 32>; // 2 x i16 vector value
def v4i16 : ValueType<64 , 33>; // 4 x i16 vector value
def v8i16 : ValueType<128, 34>; // 8 x i16 vector value
def v16i16 : ValueType<256, 35>; // 16 x i16 vector value
def v32i16 : ValueType<512, 36>; // 32 x i16 vector value
def v64i16 : ValueType<1024,37>; // 64 x i16 vector value
def v128i16: ValueType<2048,38>; //128 x i16 vector value
def v1i16 : ValueType<16 , 32>; // 1 x i16 vector value
def v2i16 : ValueType<32 , 33>; // 2 x i16 vector value
def v4i16 : ValueType<64 , 34>; // 4 x i16 vector value
def v8i16 : ValueType<128, 35>; // 8 x i16 vector value
def v16i16 : ValueType<256, 36>; // 16 x i16 vector value
def v32i16 : ValueType<512, 37>; // 32 x i16 vector value
def v64i16 : ValueType<1024,38>; // 64 x i16 vector value
def v128i16: ValueType<2048,39>; //128 x i16 vector value
def v1i32 : ValueType<32 , 39>; // 1 x i32 vector value
def v2i32 : ValueType<64 , 40>; // 2 x i32 vector value
def v4i32 : ValueType<128, 41>; // 4 x i32 vector value
def v8i32 : ValueType<256, 42>; // 8 x i32 vector value
def v16i32 : ValueType<512, 43>; // 16 x i32 vector value
def v32i32 : ValueType<1024,44>; // 32 x i32 vector value
def v64i32 : ValueType<2048,45>; // 32 x i32 vector value
def v1i32 : ValueType<32 , 40>; // 1 x i32 vector value
def v2i32 : ValueType<64 , 41>; // 2 x i32 vector value
def v4i32 : ValueType<128, 42>; // 4 x i32 vector value
def v8i32 : ValueType<256, 43>; // 8 x i32 vector value
def v16i32 : ValueType<512, 44>; // 16 x i32 vector value
def v32i32 : ValueType<1024,45>; // 32 x i32 vector value
def v64i32 : ValueType<2048,46>; // 32 x i32 vector value
def v1i64 : ValueType<64 , 46>; // 1 x i64 vector value
def v2i64 : ValueType<128, 47>; // 2 x i64 vector value
def v4i64 : ValueType<256, 48>; // 4 x i64 vector value
def v8i64 : ValueType<512, 49>; // 8 x i64 vector value
def v16i64 : ValueType<1024,50>; // 16 x i64 vector value
def v32i64 : ValueType<2048,51>; // 32 x i64 vector value
def v1i64 : ValueType<64 , 47>; // 1 x i64 vector value
def v2i64 : ValueType<128, 48>; // 2 x i64 vector value
def v4i64 : ValueType<256, 49>; // 4 x i64 vector value
def v8i64 : ValueType<512, 50>; // 8 x i64 vector value
def v16i64 : ValueType<1024,51>; // 16 x i64 vector value
def v32i64 : ValueType<2048,52>; // 32 x i64 vector value
def v1i128 : ValueType<128, 52>; // 1 x i128 vector value
def v1i128 : ValueType<128, 53>; // 1 x i128 vector value
def nxv2i1 : ValueType<2, 53>; // n x 2 x i1 vector value
def nxv4i1 : ValueType<4, 54>; // n x 4 x i1 vector value
def nxv8i1 : ValueType<8, 55>; // n x 8 x i1 vector value
def nxv16i1 : ValueType<16, 56>; // n x 16 x i1 vector value
def nxv32i1 : ValueType<32, 57>; // n x 32 x i1 vector value
def nxv1i1 : ValueType<1, 54>; // n x 1 x i1 vector value
def nxv2i1 : ValueType<2, 55>; // n x 2 x i1 vector value
def nxv4i1 : ValueType<4, 56>; // n x 4 x i1 vector value
def nxv8i1 : ValueType<8, 57>; // n x 8 x i1 vector value
def nxv16i1 : ValueType<16, 58>; // n x 16 x i1 vector value
def nxv32i1 : ValueType<32, 59>; // n x 32 x i1 vector value
def nxv1i8 : ValueType<8, 58>; // n x 1 x i8 vector value
def nxv2i8 : ValueType<16, 59>; // n x 2 x i8 vector value
def nxv4i8 : ValueType<32, 60>; // n x 4 x i8 vector value
def nxv8i8 : ValueType<64, 61>; // n x 8 x i8 vector value
def nxv16i8 : ValueType<128, 62>; // n x 16 x i8 vector value
def nxv32i8 : ValueType<256, 63>; // n x 32 x i8 vector value
def nxv1i8 : ValueType<8, 60>; // n x 1 x i8 vector value
def nxv2i8 : ValueType<16, 61>; // n x 2 x i8 vector value
def nxv4i8 : ValueType<32, 62>; // n x 4 x i8 vector value
def nxv8i8 : ValueType<64, 63>; // n x 8 x i8 vector value
def nxv16i8 : ValueType<128, 64>; // n x 16 x i8 vector value
def nxv32i8 : ValueType<256, 65>; // n x 32 x i8 vector value
def nxv1i16 : ValueType<16, 64>; // n x 1 x i16 vector value
def nxv2i16 : ValueType<32, 65>; // n x 2 x i16 vector value
def nxv4i16 : ValueType<64, 66>; // n x 4 x i16 vector value
def nxv8i16 : ValueType<128, 67>; // n x 8 x i16 vector value
def nxv16i16: ValueType<256, 68>; // n x 16 x i16 vector value
def nxv32i16: ValueType<512, 69>; // n x 32 x i16 vector value
def nxv1i16 : ValueType<16, 66>; // n x 1 x i16 vector value
def nxv2i16 : ValueType<32, 67>; // n x 2 x i16 vector value
def nxv4i16 : ValueType<64, 68>; // n x 4 x i16 vector value
def nxv8i16 : ValueType<128, 69>; // n x 8 x i16 vector value
def nxv16i16: ValueType<256, 70>; // n x 16 x i16 vector value
def nxv32i16: ValueType<512, 71>; // n x 32 x i16 vector value
def nxv1i32 : ValueType<32, 70>; // n x 1 x i32 vector value
def nxv2i32 : ValueType<64, 71>; // n x 2 x i32 vector value
def nxv4i32 : ValueType<128, 72>; // n x 4 x i32 vector value
def nxv8i32 : ValueType<256, 73>; // n x 8 x i32 vector value
def nxv16i32: ValueType<512, 74>; // n x 16 x i32 vector value
def nxv32i32: ValueType<1024,75>; // n x 32 x i32 vector value
def nxv1i32 : ValueType<32, 72>; // n x 1 x i32 vector value
def nxv2i32 : ValueType<64, 73>; // n x 2 x i32 vector value
def nxv4i32 : ValueType<128, 74>; // n x 4 x i32 vector value
def nxv8i32 : ValueType<256, 75>; // n x 8 x i32 vector value
def nxv16i32: ValueType<512, 76>; // n x 16 x i32 vector value
def nxv32i32: ValueType<1024,77>; // n x 32 x i32 vector value
def nxv1i64 : ValueType<64, 76>; // n x 1 x i64 vector value
def nxv2i64 : ValueType<128, 77>; // n x 2 x i64 vector value
def nxv4i64 : ValueType<256, 78>; // n x 4 x i64 vector value
def nxv8i64 : ValueType<512, 79>; // n x 8 x i64 vector value
def nxv16i64: ValueType<1024,80>; // n x 16 x i64 vector value
def nxv32i64: ValueType<2048,81>; // n x 32 x i64 vector value
def nxv1i64 : ValueType<64, 78>; // n x 1 x i64 vector value
def nxv2i64 : ValueType<128, 79>; // n x 2 x i64 vector value
def nxv4i64 : ValueType<256, 80>; // n x 4 x i64 vector value
def nxv8i64 : ValueType<512, 81>; // n x 8 x i64 vector value
def nxv16i64: ValueType<1024,82>; // n x 16 x i64 vector value
def nxv32i64: ValueType<2048,83>; // n x 32 x i64 vector value
def v2f16 : ValueType<32 , 82>; // 2 x f16 vector value
def v4f16 : ValueType<64 , 83>; // 4 x f16 vector value
def v8f16 : ValueType<128, 84>; // 8 x f16 vector value
def v1f32 : ValueType<32 , 85>; // 1 x f32 vector value
def v2f32 : ValueType<64 , 86>; // 2 x f32 vector value
def v4f32 : ValueType<128, 87>; // 4 x f32 vector value
def v8f32 : ValueType<256, 88>; // 8 x f32 vector value
def v16f32 : ValueType<512, 89>; // 16 x f32 vector value
def v1f64 : ValueType<64, 90>; // 1 x f64 vector value
def v2f64 : ValueType<128, 91>; // 2 x f64 vector value
def v4f64 : ValueType<256, 92>; // 4 x f64 vector value
def v8f64 : ValueType<512, 93>; // 8 x f64 vector value
def v2f16 : ValueType<32 , 84>; // 2 x f16 vector value
def v4f16 : ValueType<64 , 85>; // 4 x f16 vector value
def v8f16 : ValueType<128, 86>; // 8 x f16 vector value
def v1f32 : ValueType<32 , 87>; // 1 x f32 vector value
def v2f32 : ValueType<64 , 88>; // 2 x f32 vector value
def v4f32 : ValueType<128, 89>; // 4 x f32 vector value
def v8f32 : ValueType<256, 90>; // 8 x f32 vector value
def v16f32 : ValueType<512, 91>; // 16 x f32 vector value
def v1f64 : ValueType<64, 92>; // 1 x f64 vector value
def v2f64 : ValueType<128, 93>; // 2 x f64 vector value
def v4f64 : ValueType<256, 94>; // 4 x f64 vector value
def v8f64 : ValueType<512, 95>; // 8 x f64 vector value
def nxv2f16 : ValueType<32 , 94>; // n x 2 x f16 vector value
def nxv4f16 : ValueType<64 , 95>; // n x 4 x f16 vector value
def nxv8f16 : ValueType<128, 96>; // n x 8 x f16 vector value
def nxv1f32 : ValueType<32 , 97>; // n x 1 x f32 vector value
def nxv2f32 : ValueType<64 , 98>; // n x 2 x f32 vector value
def nxv4f32 : ValueType<128, 99>; // n x 4 x f32 vector value
def nxv8f32 : ValueType<256, 100>; // n x 8 x f32 vector value
def nxv16f32 : ValueType<512, 101>; // n x 16 x f32 vector value
def nxv1f64 : ValueType<64, 102>; // n x 1 x f64 vector value
def nxv2f64 : ValueType<128, 103>; // n x 2 x f64 vector value
def nxv4f64 : ValueType<256, 104>; // n x 4 x f64 vector value
def nxv8f64 : ValueType<512, 105>; // n x 8 x f64 vector value
def nxv2f16 : ValueType<32 , 96>; // n x 2 x f16 vector value
def nxv4f16 : ValueType<64 , 97>; // n x 4 x f16 vector value
def nxv8f16 : ValueType<128, 98>; // n x 8 x f16 vector value
def nxv1f32 : ValueType<32 , 99>; // n x 1 x f32 vector value
def nxv2f32 : ValueType<64 , 100>; // n x 2 x f32 vector value
def nxv4f32 : ValueType<128, 101>; // n x 4 x f32 vector value
def nxv8f32 : ValueType<256, 102>; // n x 8 x f32 vector value
def nxv16f32 : ValueType<512, 103>; // n x 16 x f32 vector value
def nxv1f64 : ValueType<64, 104>; // n x 1 x f64 vector value
def nxv2f64 : ValueType<128, 105>; // n x 2 x f64 vector value
def nxv4f64 : ValueType<256, 106>; // n x 4 x f64 vector value
def nxv8f64 : ValueType<512, 107>; // n x 8 x f64 vector value
def x86mmx : ValueType<64 , 106>; // X86 MMX value
def FlagVT : ValueType<0 , 107>; // Pre-RA sched glue
def isVoid : ValueType<0 , 108>; // Produces no value
def untyped: ValueType<8 , 109>; // Produces an untyped value
def x86mmx : ValueType<64 , 108>; // X86 MMX value
def FlagVT : ValueType<0 , 109>; // Pre-RA sched glue
def isVoid : ValueType<0 , 110>; // Produces no value
def untyped: ValueType<8 , 111>; // Produces an untyped value
def token : ValueType<0 , 248>; // TokenTy
def MetadataVT: ValueType<0, 249>; // Metadata

View File

@ -32,6 +32,10 @@ public:
uint32_t length() const { return RecordData.size(); }
Kind kind() const { return Type; }
ArrayRef<uint8_t> data() const { return RecordData; }
StringRef str_data() const {
return StringRef(reinterpret_cast<const char *>(RecordData.data()),
RecordData.size());
}
ArrayRef<uint8_t> content() const {
return RecordData.drop_front(sizeof(RecordPrefix));

View File

@ -1,61 +0,0 @@
//===-- CVTypeDumper.h - CodeView type info dumper --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_CVTYPEDUMPER_H
#define LLVM_DEBUGINFO_CODEVIEW_CVTYPEDUMPER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/Support/ScopedPrinter.h"
namespace llvm {
namespace codeview {
class TypeServerHandler;
/// Dumper for CodeView type streams found in COFF object files and PDB files.
class CVTypeDumper {
public:
explicit CVTypeDumper(TypeDatabase &TypeDB,
TypeServerHandler *Handler = nullptr)
: TypeDB(TypeDB), Handler(Handler) {}
/// Dumps one type record. Returns false if there was a type parsing error,
/// and true otherwise. This should be called in order, since the dumper
/// maintains state about previous records which are necessary for cross
/// type references.
Error dump(const CVType &Record, TypeVisitorCallbacks &Dumper);
/// Dumps the type records in Types. Returns false if there was a type stream
/// parse error, and true otherwise.
Error dump(const CVTypeArray &Types, TypeVisitorCallbacks &Dumper);
/// Dumps the type records in Data. Returns false if there was a type stream
/// parse error, and true otherwise. Use this method instead of the
/// CVTypeArray overload when type records are laid out contiguously in
/// memory.
Error dump(ArrayRef<uint8_t> Data, TypeVisitorCallbacks &Dumper);
static void printTypeIndex(ScopedPrinter &Printer, StringRef FieldName,
TypeIndex TI, TypeDatabase &DB);
private:
TypeDatabase &TypeDB;
TypeServerHandler *Handler;
};
} // end namespace codeview
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPER_H

View File

@ -10,42 +10,15 @@
#ifndef LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_CVTYPEVISITOR_H
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeServerHandler.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/Support/Error.h"
namespace llvm {
namespace codeview {
class CVTypeVisitor {
public:
explicit CVTypeVisitor(TypeVisitorCallbacks &Callbacks);
void addTypeServerHandler(TypeServerHandler &Handler);
Error visitTypeRecord(CVType &Record, TypeIndex Index);
Error visitTypeRecord(CVType &Record);
Error visitMemberRecord(CVMemberRecord Record);
/// Visits the type records in Data. Sets the error flag on parse failures.
Error visitTypeStream(const CVTypeArray &Types);
Error visitTypeStream(CVTypeRange Types);
Error visitFieldListMemberStream(ArrayRef<uint8_t> FieldList);
Error visitFieldListMemberStream(BinaryStreamReader Reader);
private:
Expected<bool> handleTypeServer(CVType &Record);
Error finishVisitation(CVType &Record);
/// The interface to the class that gets notified of each visitation.
TypeVisitorCallbacks &Callbacks;
TinyPtrVector<TypeServerHandler *> Handlers;
};
class TypeCollection;
class TypeServerHandler;
class TypeVisitorCallbacks;
enum VisitorDataSource {
VDS_BytesPresent, // The record bytes are passed into the the visitation
@ -76,6 +49,8 @@ Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS = nullptr);
Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS = nullptr);
Error visitTypeStream(TypeCollection &Types, TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS = nullptr);
} // end namespace codeview
} // end namespace llvm

View File

@ -1,4 +1,4 @@
//===- RandomAccessTypeVisitor.h ------------------------------ *- C++ --*-===//
//===- LazyRandomTypeCollection.h ---------------------------- *- C++ --*-===//
//
// The LLVM Compiler Infrastructure
//
@ -7,10 +7,10 @@
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_RANDOMACCESSTYPEVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_RANDOMACCESSTYPEVISITOR_H
#ifndef LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeDatabaseVisitor.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
@ -21,7 +21,6 @@ namespace llvm {
namespace codeview {
class TypeDatabase;
class TypeServerHandler;
class TypeVisitorCallbacks;
/// \brief Provides amortized O(1) random access to a CodeView type stream.
@ -40,32 +39,48 @@ class TypeVisitorCallbacks;
/// consumer much better access time, because the consumer can find the nearest
/// index in this array, and do a linear scan forward only from there.
///
/// RandomAccessTypeVisitor implements this algorithm, but additionally goes one
/// step further by caching offsets of every record that has been visited at
/// LazyRandomTypeCollection implements this algorithm, but additionally goes
/// one step further by caching offsets of every record that has been visited at
/// least once. This way, even repeated visits of the same record will never
/// require more than one linear scan. For a type stream of N elements divided
/// into M chunks of roughly equal size, this yields a worst case lookup time
/// of O(N/M) and an amortized time of O(1).
class RandomAccessTypeVisitor {
class LazyRandomTypeCollection : public TypeCollection {
typedef FixedStreamArray<TypeIndexOffset> PartialOffsetArray;
public:
RandomAccessTypeVisitor(const CVTypeArray &Types, uint32_t NumRecords,
PartialOffsetArray PartialOffsets);
explicit LazyRandomTypeCollection(uint32_t RecordCountHint);
LazyRandomTypeCollection(StringRef Data, uint32_t RecordCountHint);
LazyRandomTypeCollection(ArrayRef<uint8_t> Data, uint32_t RecordCountHint);
LazyRandomTypeCollection(const CVTypeArray &Types, uint32_t RecordCountHint,
PartialOffsetArray PartialOffsets);
LazyRandomTypeCollection(const CVTypeArray &Types, uint32_t RecordCountHint);
Error visitTypeIndex(TypeIndex Index, TypeVisitorCallbacks &Callbacks);
void reset(ArrayRef<uint8_t> Data);
void reset(StringRef Data);
const TypeDatabase &database() const { return Database; }
CVType getType(TypeIndex Index) override;
StringRef getTypeName(TypeIndex Index) override;
bool contains(TypeIndex Index) override;
uint32_t size() override;
uint32_t capacity() override;
Optional<TypeIndex> getFirst() override;
Optional<TypeIndex> getNext(TypeIndex Prev) override;
private:
const TypeDatabase &database() const { return Database; }
Error ensureTypeExists(TypeIndex Index);
Error visitRangeForType(TypeIndex TI);
Error fullScanForType(TypeIndex TI);
Error visitRange(TypeIndex Begin, uint32_t BeginOffset, TypeIndex End);
Error visitOneRecord(TypeIndex TI, uint32_t Offset, CVType &Record);
/// Visited records get automatically added to the type database.
TypeDatabase Database;
/// The type array to allow random access visitation of.
const CVTypeArray &Types;
CVTypeArray Types;
/// The database visitor which adds new records to the database.
TypeDatabaseVisitor DatabaseVisitor;
@ -85,4 +100,4 @@ private:
} // end namespace codeview
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_RANDOMACCESSTYPEVISITOR_H
#endif // LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H

View File

@ -20,15 +20,15 @@ namespace llvm {
class ScopedPrinter;
namespace codeview {
class TypeDatabase;
class TypeCollection;
/// Dumper for CodeView symbol streams found in COFF object files and PDB files.
class CVSymbolDumper {
public:
CVSymbolDumper(ScopedPrinter &W, TypeDatabase &TypeDB,
CVSymbolDumper(ScopedPrinter &W, TypeCollection &Types,
std::unique_ptr<SymbolDumpDelegate> ObjDelegate,
bool PrintRecordBytes)
: W(W), TypeDB(TypeDB), ObjDelegate(std::move(ObjDelegate)),
: W(W), Types(Types), ObjDelegate(std::move(ObjDelegate)),
PrintRecordBytes(PrintRecordBytes) {}
/// Dumps one type record. Returns false if there was a type parsing error,
@ -43,7 +43,7 @@ public:
private:
ScopedPrinter &W;
TypeDatabase &TypeDB;
TypeCollection &Types;
std::unique_ptr<SymbolDumpDelegate> ObjDelegate;
bool PrintRecordBytes;

View File

@ -0,0 +1,38 @@
//===- TypeCollection.h - A collection of CodeView type records -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPECOLLECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPECOLLECTION_H
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
namespace llvm {
namespace codeview {
class TypeCollection {
public:
virtual ~TypeCollection() = default;
bool empty() { return size() == 0; }
virtual Optional<TypeIndex> getFirst() = 0;
virtual Optional<TypeIndex> getNext(TypeIndex Prev) = 0;
virtual CVType getType(TypeIndex Index) = 0;
virtual StringRef getTypeName(TypeIndex Index) = 0;
virtual bool contains(TypeIndex Index) = 0;
virtual uint32_t size() = 0;
virtual uint32_t capacity() = 0;
};
}
}
#endif

View File

@ -13,6 +13,7 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Allocator.h"
@ -20,7 +21,7 @@
namespace llvm {
namespace codeview {
class TypeDatabase {
class TypeDatabase : public TypeCollection {
friend class RandomAccessTypeVisitor;
public:
@ -41,19 +42,31 @@ public:
CVType &getTypeRecord(TypeIndex Index);
bool contains(TypeIndex Index) const;
uint32_t size() const;
uint32_t capacity() const;
bool empty() const;
TypeIndex getAppendIndex() const;
CVType getType(TypeIndex Index) override;
StringRef getTypeName(TypeIndex Index) override;
bool contains(TypeIndex Index) override;
uint32_t size() override;
uint32_t capacity() override;
Optional<TypeIndex> getFirst() override;
Optional<TypeIndex> getNext(TypeIndex Prev) override;
Optional<TypeIndex> largestTypeIndexLessThan(TypeIndex TI) const;
private:
TypeIndex getAppendIndex() const;
void grow();
void grow(TypeIndex Index);
BumpPtrAllocator Allocator;
uint32_t Count = 0;
TypeIndex LargestTypeIndex;
/// All user defined type records in .debug$T live in here. Type indices
/// greater than 0x1000 are user defined. Subtract 0x1000 from the index to

View File

@ -12,7 +12,6 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
@ -22,17 +21,20 @@ class ScopedPrinter;
namespace codeview {
class TypeCollection;
/// Dumper for CodeView type streams found in COFF object files and PDB files.
class TypeDumpVisitor : public TypeVisitorCallbacks {
public:
TypeDumpVisitor(TypeDatabase &TypeDB, ScopedPrinter *W, bool PrintRecordBytes)
: W(W), PrintRecordBytes(PrintRecordBytes), TypeDB(TypeDB) {}
TypeDumpVisitor(TypeCollection &TpiTypes, ScopedPrinter *W,
bool PrintRecordBytes)
: W(W), PrintRecordBytes(PrintRecordBytes), TpiTypes(TpiTypes) {}
/// When dumping types from an IPI stream in a PDB, a type index may refer to
/// a type or an item ID. The dumper will lookup the "name" of the index in
/// the item database if appropriate. If ItemDB is null, it will use TypeDB,
/// which is correct when dumping types from an object file (/Z7).
void setItemDB(TypeDatabase &DB) { ItemDB = &DB; }
void setIpiTypes(TypeCollection &Types) { IpiTypes = &Types; }
void printTypeIndex(StringRef FieldName, TypeIndex TI) const;
@ -66,14 +68,16 @@ private:
/// Get the database of indices for the stream that we are dumping. If ItemDB
/// is set, then we must be dumping an item (IPI) stream. This will also
/// always get the appropriate DB for printing item names.
TypeDatabase &getSourceDB() const { return ItemDB ? *ItemDB : TypeDB; }
TypeCollection &getSourceTypes() const {
return IpiTypes ? *IpiTypes : TpiTypes;
}
ScopedPrinter *W;
bool PrintRecordBytes = false;
TypeDatabase &TypeDB;
TypeDatabase *ItemDB = nullptr;
TypeCollection &TpiTypes;
TypeCollection *IpiTypes = nullptr;
};
} // end namespace codeview

View File

@ -15,8 +15,13 @@
#include <cinttypes>
namespace llvm {
class ScopedPrinter;
namespace codeview {
class TypeCollection;
enum class SimpleTypeKind : uint32_t {
None = 0x0000, // uncharacterized type (no type)
Void = 0x0003, // void
@ -238,6 +243,11 @@ public:
return Result;
}
friend inline uint32_t operator-(const TypeIndex &A, const TypeIndex &B) {
assert(A >= B);
return A.toArrayIndex() - B.toArrayIndex();
}
private:
support::ulittle32_t Index;
};
@ -249,6 +259,9 @@ struct TypeIndexOffset {
TypeIndex Type;
support::ulittle32_t Offset;
};
void printTypeIndex(ScopedPrinter &Printer, StringRef FieldName, TypeIndex TI,
TypeCollection &Types);
}
}

View File

@ -70,6 +70,8 @@ class TypeSerializer : public TypeVisitorCallbacks {
MutableArrayRef<uint8_t> getCurrentRecordData();
Error writeRecordPrefix(TypeLeafKind Kind);
TypeIndex insertRecordBytesPrivate(MutableArrayRef<uint8_t> Record);
TypeIndex insertRecordBytesWithCopy(CVType &Record,
MutableArrayRef<uint8_t> Data);
Expected<MutableArrayRef<uint8_t>>
addPadding(MutableArrayRef<uint8_t> Record);

View File

@ -12,17 +12,20 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/DebugInfo/CodeView/TypeTableBuilder.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Error.h"
namespace llvm {
namespace codeview {
class TypeIndex;
class TypeServerHandler;
class TypeTableBuilder;
/// Merges one type stream into another. Returns true on success.
Error mergeTypeStreams(TypeTableBuilder &DestIdStream,
TypeTableBuilder &DestTypeStream,
SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler, const CVTypeArray &Types);
} // end namespace codeview

View File

@ -0,0 +1,42 @@
//===- TypeTableCollection.h ---------------------------------- *- C++ --*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPETABLECOLLECTION_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPETABLECOLLECTION_H
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
namespace llvm {
namespace codeview {
class TypeTableCollection : public TypeCollection {
public:
explicit TypeTableCollection(ArrayRef<MutableArrayRef<uint8_t>> Records);
Optional<TypeIndex> getFirst() override;
Optional<TypeIndex> getNext(TypeIndex Prev) override;
CVType getType(TypeIndex Index) override;
StringRef getTypeName(TypeIndex Index) override;
bool contains(TypeIndex Index) override;
uint32_t size() override;
uint32_t capacity() override;
private:
bool hasCapacityFor(TypeIndex Index) const;
void ensureTypeExists(TypeIndex Index);
ArrayRef<MutableArrayRef<uint8_t>> Records;
TypeDatabase Database;
};
}
}
#endif

View File

@ -17,8 +17,6 @@ namespace llvm {
namespace codeview {
class TypeVisitorCallbacks {
friend class CVTypeVisitor;
public:
virtual ~TypeVisitorCallbacks() = default;

View File

@ -19,8 +19,9 @@ class DWARFCompileUnit : public DWARFUnit {
public:
DWARFCompileUnit(DWARFContext &Context, const DWARFSection &Section,
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
StringRef SS, StringRef SOS, StringRef AOS, StringRef LS,
bool LE, bool IsDWO, const DWARFUnitSectionBase &UnitSection,
StringRef SS, StringRef SOS, const DWARFSection *AOS,
StringRef LS, bool LE, bool IsDWO,
const DWARFUnitSectionBase &UnitSection,
const DWARFUnitIndex::Entry *Entry)
: DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
UnitSection, Entry) {}

View File

@ -235,7 +235,7 @@ public:
virtual StringRef getStringDWOSection() = 0;
virtual StringRef getStringOffsetDWOSection() = 0;
virtual const DWARFSection &getRangeDWOSection() = 0;
virtual StringRef getAddrSection() = 0;
virtual const DWARFSection &getAddrSection() = 0;
virtual const DWARFSection& getAppleNamesSection() = 0;
virtual const DWARFSection& getAppleTypesSection() = 0;
virtual const DWARFSection& getAppleNamespacesSection() = 0;
@ -290,7 +290,7 @@ class DWARFContextInMemory : public DWARFContext {
StringRef StringDWOSection;
StringRef StringOffsetDWOSection;
DWARFSection RangeDWOSection;
StringRef AddrSection;
DWARFSection AddrSection;
DWARFSection AppleNamesSection;
DWARFSection AppleTypesSection;
DWARFSection AppleNamespacesSection;
@ -356,9 +356,7 @@ public:
const DWARFSection &getRangeDWOSection() override { return RangeDWOSection; }
StringRef getAddrSection() override {
return AddrSection;
}
const DWARFSection &getAddrSection() override { return AddrSection; }
StringRef getCUIndexSection() override { return CUIndexSection; }
StringRef getGdbIndexSection() override { return GdbIndexSection; }

View File

@ -17,7 +17,7 @@
namespace llvm {
struct RelocAddrEntry {
int64_t Value;
uint64_t Value;
};
/// In place of applying the relocations to the data we've read from disk we use

View File

@ -31,8 +31,9 @@ private:
public:
DWARFTypeUnit(DWARFContext &Context, const DWARFSection &Section,
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
StringRef SS, StringRef SOS, StringRef AOS, StringRef LS,
bool LE, bool IsDWO, const DWARFUnitSectionBase &UnitSection,
StringRef SS, StringRef SOS, const DWARFSection *AOS,
StringRef LS, bool LE, bool IsDWO,
const DWARFUnitSectionBase &UnitSection,
const DWARFUnitIndex::Entry *Entry)
: DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
UnitSection, Entry) {}

View File

@ -57,7 +57,7 @@ protected:
virtual void parseImpl(DWARFContext &Context, const DWARFSection &Section,
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
StringRef SS, StringRef SOS, StringRef AOS,
StringRef SS, StringRef SOS, const DWARFSection *AOS,
StringRef LS, bool isLittleEndian, bool isDWO) = 0;
};
@ -89,8 +89,8 @@ public:
private:
void parseImpl(DWARFContext &Context, const DWARFSection &Section,
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
StringRef SS, StringRef SOS, StringRef AOS, StringRef LS,
bool LE, bool IsDWO) override {
StringRef SS, StringRef SOS, const DWARFSection *AOS,
StringRef LS, bool LE, bool IsDWO) override {
if (Parsed)
return;
const auto &Index = getDWARFUnitIndex(Context, UnitType::Section);
@ -120,7 +120,7 @@ class DWARFUnit {
StringRef LineSection;
StringRef StringSection;
StringRef StringOffsetSection;
StringRef AddrOffsetSection;
const DWARFSection *AddrOffsetSection;
uint32_t AddrOffsetSectionBase;
bool isLittleEndian;
bool isDWO;
@ -149,7 +149,7 @@ class DWARFUnit {
DWARFUnit *DWOU = nullptr;
public:
DWOHolder(StringRef DWOPath);
DWOHolder(StringRef DWOPath, uint64_t DWOId);
DWARFUnit *getUnit() const { return DWOU; }
};
@ -172,8 +172,8 @@ protected:
public:
DWARFUnit(DWARFContext &Context, const DWARFSection &Section,
const DWARFDebugAbbrev *DA, const DWARFSection *RS, StringRef SS,
StringRef SOS, StringRef AOS, StringRef LS, bool LE, bool IsDWO,
const DWARFUnitSectionBase &UnitSection,
StringRef SOS, const DWARFSection *AOS, StringRef LS, bool LE,
bool IsDWO, const DWARFUnitSectionBase &UnitSection,
const DWARFUnitIndex::Entry *IndexEntry = nullptr);
virtual ~DWARFUnit();
@ -184,7 +184,7 @@ public:
StringRef getStringSection() const { return StringSection; }
StringRef getStringOffsetSection() const { return StringOffsetSection; }
void setAddrOffsetSection(StringRef AOS, uint32_t Base) {
void setAddrOffsetSection(const DWARFSection *AOS, uint32_t Base) {
AddrOffsetSection = AOS;
AddrOffsetSectionBase = Base;
}

View File

@ -13,7 +13,6 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeServerHandler.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"

View File

@ -72,7 +72,7 @@ private:
size_t TypeRecordBytes = 0;
Optional<PdbRaw_TpiVer> VerHeader;
PdbRaw_TpiVer VerHeader = PdbRaw_TpiVer::PdbTpiV80;
std::vector<ArrayRef<uint8_t>> TypeRecords;
std::vector<uint32_t> TypeHashes;
std::vector<codeview::TypeIndexOffset> TypeIndexOffsets;

View File

@ -27,8 +27,7 @@ namespace llvm {
/// for a specific function. When used in the body of said function, the
/// argument of course represents the value of the actual argument that the
/// function was called with.
class Argument : public Value {
virtual void anchor();
class Argument final : public Value {
Function *Parent;
unsigned ArgNo;

View File

@ -51,8 +51,8 @@ class ValueSymbolTable;
/// occur because it may be useful in the intermediate stage of constructing or
/// modifying a program. However, the verifier will ensure that basic blocks
/// are "well formed".
class BasicBlock : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
class BasicBlock final : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
public:
using InstListType = SymbolTableList<Instruction>;
@ -77,7 +77,7 @@ private:
public:
BasicBlock(const BasicBlock &) = delete;
BasicBlock &operator=(const BasicBlock &) = delete;
~BasicBlock() override;
~BasicBlock();
/// \brief Get the context in which this basic block lives.
LLVMContext &getContext() const;

View File

@ -40,8 +40,6 @@ class APInt;
/// don't have to worry about the lifetime of the objects.
/// @brief LLVM Constant Representation
class Constant : public User {
void anchor() override;
protected:
Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
: User(ty, vty, Ops, NumOps) {}

View File

@ -58,8 +58,6 @@ template <class ConstantClass> struct ConstantAggrKeyType;
class ConstantData : public Constant {
friend class Constant;
void anchor() override;
Value *handleOperandChangeImpl(Value *From, Value *To) {
llvm_unreachable("Constant data does not have operands!");
}
@ -93,7 +91,6 @@ class ConstantInt final : public ConstantData {
ConstantInt(IntegerType *Ty, const APInt& V);
void anchor() override;
void destroyConstantImpl();
public:
@ -274,7 +271,6 @@ class ConstantFP final : public ConstantData {
ConstantFP(Type *Ty, const APFloat& V);
void anchor() override;
void destroyConstantImpl();
public:
@ -588,7 +584,7 @@ class ConstantDataSequential : public ConstantData {
protected:
explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data)
: ConstantData(ty, VT), DataElements(Data), Next(nullptr) {}
~ConstantDataSequential() override { delete Next; }
~ConstantDataSequential() { delete Next; }
static Constant *getImpl(StringRef Bytes, Type *Ty);
@ -638,8 +634,8 @@ public:
/// The size of the elements is known to be a multiple of one byte.
uint64_t getElementByteSize() const;
/// This method returns true if this is an array of i8.
bool isString() const;
/// This method returns true if this is an array of \p CharSize integers.
bool isString(unsigned CharSize = 8) const;
/// This method returns true if the array "isString", ends with a null byte,
/// and does not contains any other null bytes.
@ -692,8 +688,6 @@ class ConstantDataArray final : public ConstantDataSequential {
return User::operator new(s, 0);
}
void anchor() override;
public:
ConstantDataArray(const ConstantDataArray &) = delete;
@ -755,8 +749,6 @@ class ConstantDataVector final : public ConstantDataSequential {
return User::operator new(s, 0);
}
void anchor() override;
public:
ConstantDataVector(const ConstantDataVector &) = delete;

View File

@ -0,0 +1,41 @@
//===-- DerivedUser.h - Base for non-IR Users -------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DERIVEDUSER_H
#define LLVM_IR_DERIVEDUSER_H
#include "llvm/IR/User.h"
namespace llvm {
/// Extension point for the Value hierarchy. All classes outside of lib/IR
/// that wish to inherit from User should instead inherit from DerivedUser
/// instead. Inheriting from this class is discouraged.
///
/// Generally speaking, Value is the base of a closed class hierarchy
/// that can't be extended by code outside of lib/IR. This class creates a
/// loophole that allows classes outside of lib/IR to extend User to leverage
/// its use/def list machinery.
class DerivedUser : public User {
protected:
typedef void (*DeleteValueTy)(DerivedUser *);
private:
friend Value;
DeleteValueTy DeleteValue;
public:
DerivedUser(Type *Ty, unsigned VK, Use *U, unsigned NumOps,
DeleteValueTy DeleteValue)
: User(Ty, VK, U, NumOps), DeleteValue(DeleteValue) {}
};
} // namespace llvm
#endif // LLVM_IR_DERIVEDUSER_H

View File

@ -123,7 +123,7 @@ private:
public:
Function(const Function&) = delete;
void operator=(const Function&) = delete;
~Function() override;
~Function();
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N = "", Module *M = nullptr) {

View File

@ -161,6 +161,10 @@ protected:
Parent = parent;
}
~GlobalValue() {
removeDeadConstantUsers(); // remove any dead constants using this.
}
public:
enum ThreadLocalMode {
NotThreadLocal = 0,
@ -172,10 +176,6 @@ public:
GlobalValue(const GlobalValue &) = delete;
~GlobalValue() override {
removeDeadConstantUsers(); // remove any dead constants using this.
}
unsigned getAlignment() const;
enum class UnnamedAddr {

View File

@ -66,7 +66,7 @@ public:
GlobalVariable(const GlobalVariable &) = delete;
GlobalVariable &operator=(const GlobalVariable &) = delete;
~GlobalVariable() override {
~GlobalVariable() {
dropAllReferences();
// FIXME: needed by operator delete

View File

@ -28,7 +28,7 @@ class FunctionType;
class PointerType;
template <class ConstantClass> class ConstantUniqueMap;
class InlineAsm : public Value {
class InlineAsm final : public Value {
public:
enum AsmDialect {
AD_ATT,
@ -48,7 +48,6 @@ private:
InlineAsm(FunctionType *Ty, const std::string &AsmString,
const std::string &Constraints, bool hasSideEffects,
bool isAlignStack, AsmDialect asmDialect);
~InlineAsm() override;
/// When the ConstantUniqueMap merges two types and makes two InlineAsms
/// identical, it destroys one of them with this method.

View File

@ -62,9 +62,6 @@ protected:
Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd)
: Instruction(Ty, iType, Ops, NumOps, InsertAtEnd) {}
// Out of line virtual method, so the vtable, etc has a home.
~TerminatorInst() override;
public:
/// Return the number of successors that this terminator has.
unsigned getNumSuccessors() const;
@ -299,9 +296,6 @@ public:
void *operator new(size_t, unsigned) = delete;
// Out of line virtual method, so the vtable, etc has a home.
~UnaryInstruction() override;
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@ -568,8 +562,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
/// if (isa<CastInst>(Instr)) { ... }
/// @brief Base class of casting instructions.
class CastInst : public UnaryInstruction {
void anchor() override;
protected:
/// @brief Constructor with insert-before-instruction semantics for subclasses
CastInst(Type *Ty, unsigned iType, Value *S,
@ -914,8 +906,6 @@ protected:
Value *LHS, Value *RHS, const Twine &Name,
BasicBlock *InsertAtEnd);
void anchor() override; // Out of line virtual method.
public:
CmpInst() = delete;

View File

@ -102,6 +102,10 @@
#define LAST_OTHER_INST(num)
#endif
#ifndef HANDLE_USER_INST
#define HANDLE_USER_INST(num, opc, Class) HANDLE_OTHER_INST(num, opc, Class)
#endif
// Terminator Instructions - These instructions are used to terminate a basic
// block of the program. Every basic block must end with one of these
// instructions for it to be a well formed basic block.
@ -185,8 +189,8 @@ HANDLE_OTHER_INST(52, FCmp , FCmpInst ) // Floating point comparison instr.
HANDLE_OTHER_INST(53, PHI , PHINode ) // PHI node instruction
HANDLE_OTHER_INST(54, Call , CallInst ) // Call a function
HANDLE_OTHER_INST(55, Select , SelectInst ) // select instruction
HANDLE_OTHER_INST(56, UserOp1, Instruction) // May be used internally in a pass
HANDLE_OTHER_INST(57, UserOp2, Instruction) // Internal to passes only
HANDLE_USER_INST (56, UserOp1, Instruction) // May be used internally in a pass
HANDLE_USER_INST (57, UserOp2, Instruction) // Internal to passes only
HANDLE_OTHER_INST(58, VAArg , VAArgInst ) // vaarg instruction
HANDLE_OTHER_INST(59, ExtractElement, ExtractElementInst)// extract from vector
HANDLE_OTHER_INST(60, InsertElement, InsertElementInst) // insert into vector
@ -220,6 +224,8 @@ HANDLE_OTHER_INST(64, LandingPad, LandingPadInst) // Landing pad instruction.
#undef HANDLE_OTHER_INST
#undef LAST_OTHER_INST
#undef HANDLE_USER_INST
#ifdef HANDLE_INST
#undef HANDLE_INST
#endif

View File

@ -36,6 +36,10 @@ class FastMathFlags;
class MDNode;
struct AAMDNodes;
template <> struct ilist_alloc_traits<Instruction> {
static inline void deleteNode(Instruction *V);
};
class Instruction : public User,
public ilist_node_with_parent<Instruction, BasicBlock> {
BasicBlock *Parent;
@ -47,13 +51,13 @@ class Instruction : public User,
HasMetadataBit = 1 << 15
};
protected:
~Instruction(); // Use deleteValue() to delete a generic Instruction.
public:
Instruction(const Instruction &) = delete;
Instruction &operator=(const Instruction &) = delete;
// Out of line virtual method, so the vtable, etc has a home.
~Instruction() override;
/// Specialize the methods defined in Value, as we know that an instruction
/// can only be used by other instructions.
Instruction *user_back() { return cast<Instruction>(*user_begin());}
@ -640,6 +644,10 @@ private:
Instruction *cloneImpl() const;
};
inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) {
V->deleteValue();
}
} // end namespace llvm
#endif // LLVM_IR_INSTRUCTION_H

View File

@ -89,9 +89,6 @@ public:
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, unsigned Align,
const Twine &Name, BasicBlock *InsertAtEnd);
// Out of line virtual method, so the vtable, etc. has a home.
~AllocaInst() override;
/// Return true if there is an allocation size parameter to the allocation
/// instruction that is not 1.
bool isArrayAllocation() const;
@ -856,7 +853,6 @@ class GetElementPtrInst : public Instruction {
ArrayRef<Value *> IdxList, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
void anchor() override;
void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
protected:
@ -1112,8 +1108,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
/// must be identical types.
/// Represent an integer comparison operator.
class ICmpInst: public CmpInst {
void anchor() override;
void AssertOK() {
assert(getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
getPredicate() <= CmpInst::LAST_ICMP_PREDICATE &&
@ -1426,8 +1420,6 @@ protected:
CallInst *cloneImpl() const;
public:
~CallInst() override;
static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> Bundles = None,
const Twine &NameStr = "",
@ -2592,8 +2584,6 @@ class PHINode : public Instruction {
return User::operator new(s);
}
void anchor() override;
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@ -2927,8 +2917,6 @@ protected:
ReturnInst *cloneImpl() const;
public:
~ReturnInst() override;
static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
Instruction *InsertBefore = nullptr) {
return new(!!retVal) ReturnInst(C, retVal, InsertBefore);

View File

@ -174,12 +174,13 @@ class MetadataAsValue : public Value {
Metadata *MD;
MetadataAsValue(Type *Ty, Metadata *MD);
~MetadataAsValue() override;
/// \brief Drop use of metadata (during teardown).
void dropUse() { MD = nullptr; }
public:
~MetadataAsValue();
static MetadataAsValue *get(LLVMContext &Context, Metadata *MD);
static MetadataAsValue *getIfExists(LLVMContext &Context, Metadata *MD);
Metadata *getMetadata() const { return MD; }

View File

@ -30,6 +30,9 @@ namespace llvm {
template <typename SubClass, unsigned ARITY>
struct FixedNumOperandTraits {
static Use *op_begin(SubClass* U) {
static_assert(
!std::is_polymorphic<SubClass>::value,
"adding virtual methods to subclasses of User breaks use lists");
return reinterpret_cast<Use*>(U) - ARITY;
}
static Use *op_end(SubClass* U) {
@ -65,6 +68,9 @@ struct OptionalOperandTraits : public FixedNumOperandTraits<SubClass, ARITY> {
template <typename SubClass, unsigned MINARITY = 0>
struct VariadicOperandTraits {
static Use *op_begin(SubClass* U) {
static_assert(
!std::is_polymorphic<SubClass>::value,
"adding virtual methods to subclasses of User breaks use lists");
return reinterpret_cast<Use*>(U) - static_cast<User*>(U)->getNumOperands();
}
static Use *op_end(SubClass* U) {

View File

@ -29,16 +29,11 @@ namespace llvm {
/// This is a utility class that provides an abstraction for the common
/// functionality between Instructions and ConstantExprs.
class Operator : public User {
protected:
// NOTE: Cannot use = delete because it's not legal to delete
// an overridden method that's not deleted in the base class. Cannot leave
// this unimplemented because that leads to an ODR-violation.
~Operator() override;
public:
// The Operator class is intended to be used as a utility, and is never itself
// instantiated.
Operator() = delete;
~Operator() = delete;
void *operator new(size_t, unsigned) = delete;
void *operator new(size_t s) = delete;

View File

@ -886,17 +886,21 @@ template <typename LHS_t> struct not_match {
template <typename OpTy> bool match(OpTy *V) {
if (auto *O = dyn_cast<Operator>(V))
if (O->getOpcode() == Instruction::Xor)
return matchIfNot(O->getOperand(0), O->getOperand(1));
if (O->getOpcode() == Instruction::Xor) {
if (isAllOnes(O->getOperand(1)))
return L.match(O->getOperand(0));
if (isAllOnes(O->getOperand(0)))
return L.match(O->getOperand(1));
}
return false;
}
private:
bool matchIfNot(Value *LHS, Value *RHS) {
return (isa<ConstantInt>(RHS) || isa<ConstantDataVector>(RHS) ||
bool isAllOnes(Value *V) {
return (isa<ConstantInt>(V) || isa<ConstantDataVector>(V) ||
// FIXME: Remove CV.
isa<ConstantVector>(RHS)) &&
cast<Constant>(RHS)->isAllOnesValue() && L.match(LHS);
isa<ConstantVector>(V)) &&
cast<Constant>(V)->isAllOnesValue();
}
};

View File

@ -46,8 +46,6 @@ class User : public Value {
template <unsigned>
friend struct HungoffOperandTraits;
virtual void anchor();
LLVM_ATTRIBUTE_ALWAYS_INLINE inline static void *
allocateFixedOperandUser(size_t, unsigned, unsigned);
@ -93,9 +91,11 @@ protected:
/// should be called if there are no uses.
void growHungoffUses(unsigned N, bool IsPhi = false);
protected:
~User() = default; // Use deleteValue() to delete a generic Instruction.
public:
User(const User &) = delete;
~User() override = default;
/// \brief Free memory allocated for User and Use objects.
void operator delete(void *Usr);

View File

@ -20,10 +20,14 @@
#if !(defined HANDLE_GLOBAL_VALUE || defined HANDLE_CONSTANT || \
defined HANDLE_INSTRUCTION || defined HANDLE_INLINE_ASM_VALUE || \
defined HANDLE_METADATA_VALUE || defined HANDLE_VALUE || \
defined HANDLE_CONSTANT_MARKER)
defined HANDLE_CONSTANT_MARKER || defined HANDLE_MEMORY_VALUE)
#error "Missing macro definition of HANDLE_VALUE*"
#endif
#ifndef HANDLE_MEMORY_VALUE
#define HANDLE_MEMORY_VALUE(ValueName) HANDLE_VALUE(ValueName)
#endif
#ifndef HANDLE_GLOBAL_VALUE
#define HANDLE_GLOBAL_VALUE(ValueName) HANDLE_CONSTANT(ValueName)
#endif
@ -54,9 +58,13 @@
HANDLE_VALUE(Argument)
HANDLE_VALUE(BasicBlock)
HANDLE_VALUE(MemoryUse)
HANDLE_VALUE(MemoryDef)
HANDLE_VALUE(MemoryPhi)
// FIXME: It's awkward that Value.def knows about classes in Analysis. While
// this doesn't introduce a strict link or include dependency, we should remove
// the circular dependency eventually.
HANDLE_MEMORY_VALUE(MemoryUse)
HANDLE_MEMORY_VALUE(MemoryDef)
HANDLE_MEMORY_VALUE(MemoryPhi)
HANDLE_GLOBAL_VALUE(Function)
HANDLE_GLOBAL_VALUE(GlobalAlias)
@ -94,6 +102,7 @@ HANDLE_CONSTANT_MARKER(ConstantDataLastVal, ConstantTokenNone)
HANDLE_CONSTANT_MARKER(ConstantAggregateFirstVal, ConstantArray)
HANDLE_CONSTANT_MARKER(ConstantAggregateLastVal, ConstantVector)
#undef HANDLE_MEMORY_VALUE
#undef HANDLE_GLOBAL_VALUE
#undef HANDLE_CONSTANT
#undef HANDLE_INSTRUCTION

View File

@ -21,6 +21,7 @@
#include "llvm-c/Types.h"
#include <cassert>
#include <iterator>
#include <memory>
namespace llvm {
@ -69,6 +70,8 @@ using ValueName = StringMapEntry<Value*>;
/// objects that watch it and listen to RAUW and Destroy events. See
/// llvm/IR/ValueHandle.h for details.
class Value {
// The least-significant bit of the first word of Value *must* be zero:
// http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
Type *VTy;
Use *UseList;
@ -200,10 +203,19 @@ private:
protected:
Value(Type *Ty, unsigned scid);
/// Value's destructor should be virtual by design, but that would require
/// that Value and all of its subclasses have a vtable that effectively
/// duplicates the information in the value ID. As a size optimization, the
/// destructor has been protected, and the caller should manually call
/// deleteValue.
~Value(); // Use deleteValue() to delete a generic Value.
public:
Value(const Value &) = delete;
void operator=(const Value &) = delete;
virtual ~Value();
/// Delete a pointer to a generic Value.
void deleteValue();
/// \brief Support for debugging, callable in GDB: V->dump()
void dump() const;
@ -643,6 +655,13 @@ protected:
void setValueSubclassData(unsigned short D) { SubclassData = D; }
};
struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };
/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
/// Those don't work because Value and Instruction's destructors are protected,
/// aren't virtual, and won't destroy the complete object.
typedef std::unique_ptr<Value, ValueDeleter> unique_value;
inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
V.print(OS);
return OS;

View File

@ -187,7 +187,6 @@ void initializeLintPass(PassRegistry&);
void initializeLiveDebugValuesPass(PassRegistry&);
void initializeLiveDebugVariablesPass(PassRegistry&);
void initializeLiveIntervalsPass(PassRegistry&);
void initializeLiveRangeShrinkPass(PassRegistry&);
void initializeLiveRegMatrixPass(PassRegistry&);
void initializeLiveStacksPass(PassRegistry&);
void initializeLiveVariablesPass(PassRegistry&);

View File

@ -57,6 +57,8 @@ protected:
ID_MachO64L, // MachO 64-bit, little endian
ID_MachO64B, // MachO 64-bit, big endian
ID_WinRes, // Windows resource (.res) file.
ID_Wasm,
ID_EndObjects
@ -132,6 +134,8 @@ public:
TypeID == ID_MachO32B || TypeID == ID_MachO64B);
}
bool isWinRes() const { return TypeID == ID_WinRes; }
Triple::ObjectFormatType getTripleObjectFormat() const {
if (isCOFF())
return Triple::COFF;

View File

@ -9,13 +9,15 @@
//
// COFF short import file is a special kind of file which contains
// only symbol names for DLL-exported symbols. This class implements
// SymbolicFile interface for the file.
// exporting of Symbols to create libraries and a SymbolicFile
// interface for the file type.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_COFF_IMPORT_FILE_H
#define LLVM_OBJECT_COFF_IMPORT_FILE_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/IRObjectFile.h"
#include "llvm/Object/ObjectFile.h"
@ -68,6 +70,36 @@ private:
}
};
struct COFFShortExport {
std::string Name;
std::string ExtName;
uint16_t Ordinal = 0;
bool Noname = false;
bool Data = false;
bool Private = false;
bool Constant = false;
bool isWeak() {
return ExtName.size() && ExtName != Name;
}
friend bool operator==(const COFFShortExport &L, const COFFShortExport &R) {
return L.Name == R.Name && L.ExtName == R.ExtName &&
L.Ordinal == R.Ordinal && L.Noname == R.Noname &&
L.Data == R.Data && L.Private == R.Private;
}
friend bool operator!=(const COFFShortExport &L, const COFFShortExport &R) {
return !(L == R);
}
};
std::error_code writeImportLibrary(StringRef DLLName,
StringRef Path,
ArrayRef<COFFShortExport> Exports,
COFF::MachineTypes Machine);
} // namespace object
} // namespace llvm

View File

@ -0,0 +1,49 @@
//===--- COFFModuleDefinition.h ---------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Windows-specific.
// A parser for the module-definition file (.def file).
// Parsed results are directly written to Config global variable.
//
// The format of module-definition files are described in this document:
// https://msdn.microsoft.com/en-us/library/28d6s79h.aspx
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_COFF_MODULE_DEFINITION_H
#define LLVM_OBJECT_COFF_MODULE_DEFINITION_H
#include "llvm/Object/COFFImportFile.h"
#include "llvm/Object/COFF.h"
namespace llvm {
namespace object {
struct COFFModuleDefinition {
std::vector<COFFShortExport> Exports;
std::string OutputFile;
uint64_t ImageBase = 0;
uint64_t StackReserve = 0;
uint64_t StackCommit = 0;
uint64_t HeapReserve = 0;
uint64_t HeapCommit = 0;
uint32_t MajorImageVersion = 0;
uint32_t MinorImageVersion = 0;
uint32_t MajorOSVersion = 0;
uint32_t MinorOSVersion = 0;
};
Expected<COFFModuleDefinition>
parseCOFFModuleDefinition(MemoryBufferRef MB, COFF::MachineTypes Machine);
} // End namespace object.
} // End namespace llvm.
#endif

View File

@ -30,7 +30,10 @@ public:
/// @brief Resize the buffer and uncompress section data into it.
/// @param Out Destination buffer.
Error decompress(SmallString<32> &Out);
template <class T> Error resizeAndDecompress(T &Out) {
Out.resize(DecompressedSize);
return decompress({Out.data(), (size_t)DecompressedSize});
}
/// @brief Uncompress section data to raw buffer provided.
/// @param Buffer Destination buffer.

View File

@ -235,10 +235,7 @@ ELFFile<ELFT>::getSection(const Elf_Sym *Sym, Elf_Sym_Range Symbols,
uint32_t Index = *IndexOrErr;
if (Index == 0)
return nullptr;
auto SectionsOrErr = sections();
if (!SectionsOrErr)
return SectionsOrErr.takeError();
return object::getSection<ELFT>(*SectionsOrErr, Index);
return getSection(Index);
}
template <class ELFT>

View File

@ -32,18 +32,6 @@
namespace llvm {
namespace object {
struct RelocToApply {
// The computed value after applying the relevant relocations.
int64_t Value = 0;
// The width of the value; how many bytes to touch when applying the
// relocation.
char Width = 0;
RelocToApply() = default;
RelocToApply(int64_t Value, char Width) : Value(Value), Width(Width) {}
};
/// @brief Base class for object file relocation visitors.
class RelocVisitor {
public:
@ -52,7 +40,7 @@ public:
// TODO: Should handle multiple applied relocations via either passing in the
// previously computed value or just count paired relocations as a single
// visit.
RelocToApply visit(uint32_t RelocType, RelocationRef R, uint64_t Value = 0) {
uint64_t visit(uint32_t RelocType, RelocationRef R, uint64_t Value = 0) {
if (isa<ELFObjectFileBase>(ObjToVisit))
return visitELF(RelocType, R, Value);
if (isa<COFFObjectFile>(ObjToVisit))
@ -61,7 +49,7 @@ public:
return visitMachO(RelocType, R, Value);
HasError = true;
return RelocToApply();
return 0;
}
bool error() { return HasError; }
@ -70,7 +58,7 @@ private:
const ObjectFile &ObjToVisit;
bool HasError = false;
RelocToApply visitELF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
uint64_t visitELF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
if (ObjToVisit.getBytesInAddress() == 8) { // 64-bit object file
switch (ObjToVisit.getArch()) {
case Triple::x86_64:
@ -87,7 +75,7 @@ private:
return visitELF_X86_64_32S(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::aarch64:
case Triple::aarch64_be:
@ -98,7 +86,7 @@ private:
return visitELF_AARCH64_ABS64(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::bpfel:
case Triple::bpfeb:
@ -109,7 +97,7 @@ private:
return visitELF_BPF_64_32(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::mips64el:
case Triple::mips64:
@ -120,7 +108,7 @@ private:
return visitELF_MIPS64_64(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::ppc64le:
case Triple::ppc64:
@ -131,7 +119,7 @@ private:
return visitELF_PPC64_ADDR64(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::systemz:
switch (RelocType) {
@ -141,7 +129,7 @@ private:
return visitELF_390_64(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::sparcv9:
switch (RelocType) {
@ -153,7 +141,7 @@ private:
return visitELF_SPARCV9_64(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::amdgcn:
switch (RelocType) {
@ -163,11 +151,11 @@ private:
return visitELF_AMDGPU_ABS64(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
default:
HasError = true;
return RelocToApply();
return 0;
}
} else if (ObjToVisit.getBytesInAddress() == 4) { // 32-bit object file
switch (ObjToVisit.getArch()) {
@ -181,7 +169,7 @@ private:
return visitELF_386_PC32(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::ppc:
switch (RelocType) {
@ -189,14 +177,14 @@ private:
return visitELF_PPC_ADDR32(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::arm:
case Triple::armeb:
switch (RelocType) {
default:
HasError = true;
return RelocToApply();
return 0;
case ELF::R_ARM_ABS32:
return visitELF_ARM_ABS32(R, Value);
}
@ -206,7 +194,7 @@ private:
return visitELF_Lanai_32(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::mipsel:
case Triple::mips:
@ -215,7 +203,7 @@ private:
return visitELF_MIPS_32(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::sparc:
switch (RelocType) {
@ -224,7 +212,7 @@ private:
return visitELF_SPARC_32(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
case Triple::hexagon:
switch (RelocType) {
@ -232,18 +220,18 @@ private:
return visitELF_HEX_32(R, Value);
default:
HasError = true;
return RelocToApply();
return 0;
}
default:
HasError = true;
return RelocToApply();
return 0;
}
} else {
report_fatal_error("Invalid word size in object file");
}
}
RelocToApply visitCOFF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
uint64_t visitCOFF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
switch (ObjToVisit.getArch()) {
case Triple::x86:
switch (RelocType) {
@ -263,10 +251,10 @@ private:
break;
}
HasError = true;
return RelocToApply();
return 0;
}
RelocToApply visitMachO(uint32_t RelocType, RelocationRef R, uint64_t Value) {
uint64_t visitMachO(uint32_t RelocType, RelocationRef R, uint64_t Value) {
switch (ObjToVisit.getArch()) {
default: break;
case Triple::x86_64:
@ -277,7 +265,7 @@ private:
}
}
HasError = true;
return RelocToApply();
return 0;
}
int64_t getELFAddend(RelocationRef R) {
@ -287,108 +275,88 @@ private:
return *AddendOrErr;
}
uint8_t getLengthMachO64(RelocationRef R) {
const MachOObjectFile *Obj = cast<MachOObjectFile>(R.getObject());
return Obj->getRelocationLength(R.getRawDataRefImpl());
}
/// Operations
/// 386-ELF
RelocToApply visitELF_386_NONE(RelocationRef R) {
return RelocToApply(0, 0);
uint64_t visitELF_386_NONE(RelocationRef R) {
return 0;
}
// Ideally the Addend here will be the addend in the data for
// the relocation. It's not actually the case for Rel relocations.
RelocToApply visitELF_386_32(RelocationRef R, uint64_t Value) {
return RelocToApply(Value, 4);
uint64_t visitELF_386_32(RelocationRef R, uint64_t Value) {
return Value;
}
RelocToApply visitELF_386_PC32(RelocationRef R, uint64_t Value) {
uint64_t Address = R.getOffset();
return RelocToApply(Value - Address, 4);
uint64_t visitELF_386_PC32(RelocationRef R, uint64_t Value) {
return Value - R.getOffset();
}
/// X86-64 ELF
RelocToApply visitELF_X86_64_NONE(RelocationRef R) {
return RelocToApply(0, 0);
uint64_t visitELF_X86_64_NONE(RelocationRef R) {
return 0;
}
RelocToApply visitELF_X86_64_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
uint64_t visitELF_X86_64_64(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R);
}
RelocToApply visitELF_X86_64_PC32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint64_t Address = R.getOffset();
return RelocToApply(Value + Addend - Address, 4);
uint64_t visitELF_X86_64_PC32(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R) - R.getOffset();
}
RelocToApply visitELF_X86_64_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_X86_64_32(RelocationRef R, uint64_t Value) {
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
}
RelocToApply visitELF_X86_64_32S(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
int32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_X86_64_32S(RelocationRef R, uint64_t Value) {
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
}
/// BPF ELF
RelocToApply visitELF_BPF_64_32(RelocationRef R, uint64_t Value) {
uint32_t Res = Value & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_BPF_64_32(RelocationRef R, uint64_t Value) {
return Value & 0xFFFFFFFF;
}
RelocToApply visitELF_BPF_64_64(RelocationRef R, uint64_t Value) {
return RelocToApply(Value, 8);
uint64_t visitELF_BPF_64_64(RelocationRef R, uint64_t Value) {
return Value;
}
/// PPC64 ELF
RelocToApply visitELF_PPC64_ADDR32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_PPC64_ADDR32(RelocationRef R, uint64_t Value) {
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
}
RelocToApply visitELF_PPC64_ADDR64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
uint64_t visitELF_PPC64_ADDR64(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R);
}
/// PPC32 ELF
RelocToApply visitELF_PPC_ADDR32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_PPC_ADDR32(RelocationRef R, uint64_t Value) {
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
}
/// Lanai ELF
RelocToApply visitELF_Lanai_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_Lanai_32(RelocationRef R, uint64_t Value) {
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
}
/// MIPS ELF
RelocToApply visitELF_MIPS_32(RelocationRef R, uint64_t Value) {
uint32_t Res = Value & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_MIPS_32(RelocationRef R, uint64_t Value) {
return Value & 0xFFFFFFFF;
}
/// MIPS64 ELF
RelocToApply visitELF_MIPS64_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
return RelocToApply(Res, 4);
uint64_t visitELF_MIPS64_32(RelocationRef R, uint64_t Value) {
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
}
RelocToApply visitELF_MIPS64_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
uint64_t Res = (Value + Addend);
return RelocToApply(Res, 8);
uint64_t visitELF_MIPS64_64(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R);
}
// AArch64 ELF
RelocToApply visitELF_AARCH64_ABS32(RelocationRef R, uint64_t Value) {
uint64_t visitELF_AARCH64_ABS32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
int64_t Res = Value + Addend;
@ -396,16 +364,15 @@ private:
if (Res < INT32_MIN || Res > UINT32_MAX)
HasError = true;
return RelocToApply(static_cast<uint32_t>(Res), 4);
return static_cast<uint32_t>(Res);
}
RelocToApply visitELF_AARCH64_ABS64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
uint64_t visitELF_AARCH64_ABS64(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R);
}
// SystemZ ELF
RelocToApply visitELF_390_32(RelocationRef R, uint64_t Value) {
uint64_t visitELF_390_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
int64_t Res = Value + Addend;
@ -413,77 +380,71 @@ private:
if (Res < INT32_MIN || Res > UINT32_MAX)
HasError = true;
return RelocToApply(static_cast<uint32_t>(Res), 4);
return static_cast<uint32_t>(Res);
}
RelocToApply visitELF_390_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
uint64_t visitELF_390_64(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R);
}
RelocToApply visitELF_SPARC_32(RelocationRef R, uint32_t Value) {
int32_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 4);
uint64_t visitELF_SPARC_32(RelocationRef R, uint32_t Value) {
return Value + getELFAddend(R);
}
RelocToApply visitELF_SPARCV9_32(RelocationRef R, uint64_t Value) {
int32_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 4);
uint64_t visitELF_SPARCV9_32(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R);
}
RelocToApply visitELF_SPARCV9_64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
uint64_t visitELF_SPARCV9_64(RelocationRef R, uint64_t Value) {
return Value + getELFAddend(R);
}
RelocToApply visitELF_ARM_ABS32(RelocationRef R, uint64_t Value) {
uint64_t visitELF_ARM_ABS32(RelocationRef R, uint64_t Value) {
int64_t Res = Value;
// Overflow check allows for both signed and unsigned interpretation.
if (Res < INT32_MIN || Res > UINT32_MAX)
HasError = true;
return RelocToApply(static_cast<uint32_t>(Res), 4);
return static_cast<uint32_t>(Res);
}
RelocToApply visitELF_HEX_32(RelocationRef R, uint64_t Value) {
uint64_t visitELF_HEX_32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 4);
return Value + Addend;
}
RelocToApply visitELF_AMDGPU_ABS32(RelocationRef R, uint64_t Value) {
uint64_t visitELF_AMDGPU_ABS32(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 4);
return Value + Addend;
}
RelocToApply visitELF_AMDGPU_ABS64(RelocationRef R, uint64_t Value) {
uint64_t visitELF_AMDGPU_ABS64(RelocationRef R, uint64_t Value) {
int64_t Addend = getELFAddend(R);
return RelocToApply(Value + Addend, 8);
return Value + Addend;
}
/// I386 COFF
RelocToApply visitCOFF_I386_SECREL(RelocationRef R, uint64_t Value) {
return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
uint64_t visitCOFF_I386_SECREL(RelocationRef R, uint64_t Value) {
return static_cast<uint32_t>(Value);
}
RelocToApply visitCOFF_I386_DIR32(RelocationRef R, uint64_t Value) {
return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
uint64_t visitCOFF_I386_DIR32(RelocationRef R, uint64_t Value) {
return static_cast<uint32_t>(Value);
}
/// AMD64 COFF
RelocToApply visitCOFF_AMD64_SECREL(RelocationRef R, uint64_t Value) {
return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
uint64_t visitCOFF_AMD64_SECREL(RelocationRef R, uint64_t Value) {
return static_cast<uint32_t>(Value);
}
RelocToApply visitCOFF_AMD64_ADDR64(RelocationRef R, uint64_t Value) {
return RelocToApply(Value, /*Width=*/8);
uint64_t visitCOFF_AMD64_ADDR64(RelocationRef R, uint64_t Value) {
return Value;
}
// X86_64 MachO
RelocToApply visitMACHO_X86_64_UNSIGNED(RelocationRef R, uint64_t Value) {
uint8_t Length = getLengthMachO64(R);
Length = 1<<Length;
return RelocToApply(Value, Length);
uint64_t visitMACHO_X86_64_UNSIGNED(RelocationRef R, uint64_t Value) {
return Value;
}
};

View File

@ -0,0 +1,82 @@
//===-- WindowsResource.h ---------------------------------------*- C++-*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===---------------------------------------------------------------------===//
//
// This file declares the .res file class. .res files are intermediate
// products of the typical resource-compilation process on Windows. This
// process is as follows:
//
// .rc file(s) ---(rc.exe)---> .res file(s) ---(cvtres.exe)---> COFF file
//
// .rc files are human-readable scripts that list all resources a program uses.
//
// They are compiled into .res files, which are a list of the resources in
// binary form.
//
// Finally the data stored in the .res is compiled into a COFF file, where it
// is organized in a directory tree structure for optimized access by the
// program during runtime.
//
// Ref: msdn.microsoft.com/en-us/library/windows/desktop/ms648007(v=vs.85).aspx
//
//===---------------------------------------------------------------------===//
#ifndef LLVM_INCLUDE_LLVM_OBJECT_RESFILE_H
#define LLVM_INCLUDE_LLVM_OBJECT_RESFILE_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Object/Binary.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
namespace llvm {
namespace object {
class WindowsResource;
class ResourceEntryRef {
public:
Error moveNext(bool &End);
private:
friend class WindowsResource;
ResourceEntryRef(BinaryStreamRef Ref, const WindowsResource *Owner,
Error &Err);
Error loadNext();
BinaryStreamReader Reader;
BinaryStreamRef HeaderBytes;
BinaryStreamRef DataBytes;
const WindowsResource *OwningRes = nullptr;
};
class WindowsResource : public Binary {
public:
~WindowsResource() override;
Expected<ResourceEntryRef> getHeadEntry();
static bool classof(const Binary *V) { return V->isWinRes(); }
static Expected<std::unique_ptr<WindowsResource>>
createWindowsResource(MemoryBufferRef Source);
private:
friend class ResourceEntryRef;
WindowsResource(MemoryBufferRef Source);
BinaryByteStream BBS;
};
} // namespace object
} // namespace llvm
#endif

View File

@ -32,7 +32,6 @@ class TargetMachine;
class PassInfo {
public:
typedef Pass* (*NormalCtor_t)();
typedef Pass *(*TargetMachineCtor_t)(TargetMachine *);
private:
StringRef PassName; // Nice name for Pass
@ -44,24 +43,20 @@ private:
std::vector<const PassInfo *> ItfImpl; // Interfaces implemented by this pass
NormalCtor_t NormalCtor;
TargetMachineCtor_t TargetMachineCtor;
public:
/// PassInfo ctor - Do not call this directly, this should only be invoked
/// through RegisterPass.
PassInfo(StringRef name, StringRef arg, const void *pi, NormalCtor_t normal,
bool isCFGOnly, bool is_analysis,
TargetMachineCtor_t machine = nullptr)
bool isCFGOnly, bool is_analysis)
: PassName(name), PassArgument(arg), PassID(pi), IsCFGOnlyPass(isCFGOnly),
IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal),
TargetMachineCtor(machine) {}
IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal) {}
/// PassInfo ctor - Do not call this directly, this should only be invoked
/// through RegisterPass. This version is for use by analysis groups; it
/// does not auto-register the pass.
PassInfo(StringRef name, const void *pi)
: PassName(name), PassArgument(""), PassID(pi), IsCFGOnlyPass(false),
IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(nullptr),
TargetMachineCtor(nullptr) {}
IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(nullptr) {}
/// getPassName - Return the friendly name for the pass, never returns null
///
@ -101,16 +96,6 @@ public:
NormalCtor = Ctor;
}
/// getTargetMachineCtor - Return a pointer to a function, that when called
/// with a TargetMachine, creates an instance of the pass and returns it.
/// This pointer may be null if there is no constructor with a TargetMachine
/// for the pass.
///
TargetMachineCtor_t getTargetMachineCtor() const { return TargetMachineCtor; }
void setTargetMachineCtor(TargetMachineCtor_t Ctor) {
TargetMachineCtor = Ctor;
}
/// createPass() - Use this method to create an instance of this pass.
Pass *createPass() const {
assert((!isAnalysisGroup() || NormalCtor) &&

View File

@ -31,8 +31,6 @@
namespace llvm {
class TargetMachine;
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis) \
static void *initialize##passName##PassOnce(PassRegistry &Registry) { \
PassInfo *PI = new PassInfo( \
@ -78,10 +76,6 @@ class TargetMachine;
template <typename PassName> Pass *callDefaultCtor() { return new PassName(); }
template <typename PassName> Pass *callTargetMachineCtor(TargetMachine *TM) {
return new PassName(TM);
}
//===---------------------------------------------------------------------------
/// RegisterPass<t> template - This template class is used to notify the system
/// that a Pass is available for use, and registers it into the internal

View File

@ -16,7 +16,6 @@
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/type_traits.h"
#include <string>
@ -32,7 +31,21 @@ namespace llvm {
class BinaryStreamReader {
public:
BinaryStreamReader() = default;
explicit BinaryStreamReader(BinaryStreamRef Stream);
explicit BinaryStreamReader(BinaryStreamRef Ref);
explicit BinaryStreamReader(BinaryStream &Stream);
explicit BinaryStreamReader(ArrayRef<uint8_t> Data,
llvm::support::endianness Endian);
explicit BinaryStreamReader(StringRef Data, llvm::support::endianness Endian);
BinaryStreamReader(const BinaryStreamReader &Other)
: Stream(Other.Stream), Offset(Other.Offset) {}
BinaryStreamReader &operator=(const BinaryStreamReader &Other) {
Stream = Other.Stream;
Offset = Other.Offset;
return *this;
}
virtual ~BinaryStreamReader() {}
/// Read as much as possible from the underlying string at the current offset
@ -244,12 +257,14 @@ public:
/// \returns the next byte in the stream.
uint8_t peek() const;
Error padToAlignment(uint32_t Align);
std::pair<BinaryStreamReader, BinaryStreamReader>
split(uint32_t Offset) const;
private:
BinaryStreamRef Stream;
uint32_t Offset;
uint32_t Offset = 0;
};
} // namespace llvm

View File

@ -16,36 +16,74 @@
#include "llvm/Support/Error.h"
#include <algorithm>
#include <cstdint>
#include <memory>
namespace llvm {
/// Common stuff for mutable and immutable StreamRefs.
template <class StreamType, class RefType> class BinaryStreamRefBase {
public:
BinaryStreamRefBase() : Stream(nullptr), ViewOffset(0), Length(0) {}
BinaryStreamRefBase(StreamType &Stream, uint32_t Offset, uint32_t Length)
: Stream(&Stream), ViewOffset(Offset), Length(Length) {}
template <class RefType, class StreamType> class BinaryStreamRefBase {
protected:
BinaryStreamRefBase() = default;
BinaryStreamRefBase(std::shared_ptr<StreamType> SharedImpl, uint32_t Offset,
uint32_t Length)
: SharedImpl(SharedImpl), BorrowedImpl(SharedImpl.get()),
ViewOffset(Offset), Length(Length) {}
BinaryStreamRefBase(StreamType &BorrowedImpl, uint32_t Offset,
uint32_t Length)
: BorrowedImpl(&BorrowedImpl), ViewOffset(Offset), Length(Length) {}
BinaryStreamRefBase(const BinaryStreamRefBase &Other) {
SharedImpl = Other.SharedImpl;
BorrowedImpl = Other.BorrowedImpl;
ViewOffset = Other.ViewOffset;
Length = Other.Length;
}
llvm::support::endianness getEndian() const { return Stream->getEndian(); }
public:
llvm::support::endianness getEndian() const {
return BorrowedImpl->getEndian();
}
uint32_t getLength() const { return Length; }
const StreamType *getStream() const { return Stream; }
/// Return a new BinaryStreamRef with the first \p N elements removed.
RefType drop_front(uint32_t N) const {
if (!Stream)
if (!BorrowedImpl)
return RefType();
N = std::min(N, Length);
return RefType(*Stream, ViewOffset + N, Length - N);
RefType Result(static_cast<const RefType &>(*this));
Result.ViewOffset += N;
Result.Length -= N;
return Result;
}
/// Return a new BinaryStreamRef with the first \p N elements removed.
RefType drop_back(uint32_t N) const {
if (!BorrowedImpl)
return RefType();
N = std::min(N, Length);
RefType Result(static_cast<const RefType &>(*this));
Result.Length -= N;
return Result;
}
/// Return a new BinaryStreamRef with only the first \p N elements remaining.
RefType keep_front(uint32_t N) const {
if (!Stream)
return RefType();
N = std::min(N, Length);
return RefType(*Stream, ViewOffset, N);
assert(N <= getLength());
return drop_back(getLength() - N);
}
/// Return a new BinaryStreamRef with only the last \p N elements remaining.
RefType keep_back(uint32_t N) const {
assert(N <= getLength());
return drop_front(getLength() - N);
}
/// Return a new BinaryStreamRef with the first and last \p N elements
/// removed.
RefType drop_symmetric(uint32_t N) const {
return drop_front(N).drop_back(N);
}
/// Return a new BinaryStreamRef with the first \p Offset elements removed,
@ -54,8 +92,10 @@ public:
return drop_front(Offset).keep_front(Len);
}
bool valid() const { return BorrowedImpl != nullptr; }
bool operator==(const RefType &Other) const {
if (Stream != Other.Stream)
if (BorrowedImpl != Other.BorrowedImpl)
return false;
if (ViewOffset != Other.ViewOffset)
return false;
@ -73,9 +113,10 @@ protected:
return Error::success();
}
StreamType *Stream;
uint32_t ViewOffset;
uint32_t Length;
std::shared_ptr<StreamType> SharedImpl;
StreamType *BorrowedImpl = nullptr;
uint32_t ViewOffset = 0;
uint32_t Length = 0;
};
/// \brief BinaryStreamRef is to BinaryStream what ArrayRef is to an Array. It
@ -86,21 +127,27 @@ protected:
/// and use inheritance to achieve polymorphism. Instead, you should pass
/// around BinaryStreamRefs by value and achieve polymorphism that way.
class BinaryStreamRef
: public BinaryStreamRefBase<BinaryStream, BinaryStreamRef> {
: public BinaryStreamRefBase<BinaryStreamRef, BinaryStream> {
friend BinaryStreamRefBase<BinaryStreamRef, BinaryStream>;
friend class WritableBinaryStreamRef;
BinaryStreamRef(std::shared_ptr<BinaryStream> Impl, uint32_t ViewOffset,
uint32_t Length)
: BinaryStreamRefBase(Impl, ViewOffset, Length) {}
public:
BinaryStreamRef() = default;
BinaryStreamRef(BinaryStream &Stream)
: BinaryStreamRefBase(Stream, 0, Stream.getLength()) {}
BinaryStreamRef(BinaryStream &Stream, uint32_t Offset, uint32_t Length)
: BinaryStreamRefBase(Stream, Offset, Length) {}
BinaryStreamRef(BinaryStream &Stream);
BinaryStreamRef(BinaryStream &Stream, uint32_t Offset, uint32_t Length);
explicit BinaryStreamRef(ArrayRef<uint8_t> Data,
llvm::support::endianness Endian);
explicit BinaryStreamRef(StringRef Data, llvm::support::endianness Endian);
BinaryStreamRef(const BinaryStreamRef &Other);
// Use BinaryStreamRef.slice() instead.
BinaryStreamRef(BinaryStreamRef &S, uint32_t Offset,
uint32_t Length) = delete;
/// Check if a Stream is valid.
bool valid() const { return Stream != nullptr; }
/// Given an Offset into this StreamRef and a Size, return a reference to a
/// buffer owned by the stream.
///
@ -108,12 +155,7 @@ public:
/// bounds of this BinaryStreamRef's view and the implementation could read
/// the data, and an appropriate error code otherwise.
Error readBytes(uint32_t Offset, uint32_t Size,
ArrayRef<uint8_t> &Buffer) const {
if (auto EC = checkOffset(Offset, Size))
return EC;
return Stream->readBytes(ViewOffset + Offset, Size, Buffer);
}
ArrayRef<uint8_t> &Buffer) const;
/// Given an Offset into this BinaryStreamRef, return a reference to the
/// largest buffer the stream could support without necessitating a copy.
@ -121,33 +163,25 @@ public:
/// \returns a success error code if implementation could read the data,
/// and an appropriate error code otherwise.
Error readLongestContiguousChunk(uint32_t Offset,
ArrayRef<uint8_t> &Buffer) const {
if (auto EC = checkOffset(Offset, 1))
return EC;
if (auto EC =
Stream->readLongestContiguousChunk(ViewOffset + Offset, Buffer))
return EC;
// This StreamRef might refer to a smaller window over a larger stream. In
// that case we will have read out more bytes than we should return, because
// we should not read past the end of the current view.
uint32_t MaxLength = Length - Offset;
if (Buffer.size() > MaxLength)
Buffer = Buffer.slice(0, MaxLength);
return Error::success();
}
ArrayRef<uint8_t> &Buffer) const;
};
class WritableBinaryStreamRef
: public BinaryStreamRefBase<WritableBinaryStream,
WritableBinaryStreamRef> {
: public BinaryStreamRefBase<WritableBinaryStreamRef,
WritableBinaryStream> {
friend BinaryStreamRefBase<WritableBinaryStreamRef, WritableBinaryStream>;
WritableBinaryStreamRef(std::shared_ptr<WritableBinaryStream> Impl,
uint32_t ViewOffset, uint32_t Length)
: BinaryStreamRefBase(Impl, ViewOffset, Length) {}
public:
WritableBinaryStreamRef() = default;
WritableBinaryStreamRef(WritableBinaryStream &Stream)
: BinaryStreamRefBase(Stream, 0, Stream.getLength()) {}
WritableBinaryStreamRef(WritableBinaryStream &Stream);
WritableBinaryStreamRef(WritableBinaryStream &Stream, uint32_t Offset,
uint32_t Length)
: BinaryStreamRefBase(Stream, Offset, Length) {}
uint32_t Length);
explicit WritableBinaryStreamRef(MutableArrayRef<uint8_t> Data,
llvm::support::endianness Endian);
WritableBinaryStreamRef(const WritableBinaryStreamRef &Other);
// Use WritableBinaryStreamRef.slice() instead.
WritableBinaryStreamRef(WritableBinaryStreamRef &S, uint32_t Offset,
@ -159,17 +193,13 @@ public:
/// \returns a success error code if the data could fit within the underlying
/// stream at the specified location and the implementation could write the
/// data, and an appropriate error code otherwise.
Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const {
if (auto EC = checkOffset(Offset, Data.size()))
return EC;
Error writeBytes(uint32_t Offset, ArrayRef<uint8_t> Data) const;
return Stream->writeBytes(ViewOffset + Offset, Data);
}
operator BinaryStreamRef() { return BinaryStreamRef(*Stream); }
/// Conver this WritableBinaryStreamRef to a read-only BinaryStreamRef.
operator BinaryStreamRef() const;
/// \brief For buffered streams, commits changes to the backing store.
Error commit() { return Stream->commit(); }
Error commit();
};
} // end namespace llvm

View File

@ -32,7 +32,20 @@ namespace llvm {
class BinaryStreamWriter {
public:
BinaryStreamWriter() = default;
explicit BinaryStreamWriter(WritableBinaryStreamRef Stream);
explicit BinaryStreamWriter(WritableBinaryStreamRef Ref);
explicit BinaryStreamWriter(WritableBinaryStream &Stream);
explicit BinaryStreamWriter(MutableArrayRef<uint8_t> Data,
llvm::support::endianness Endian);
BinaryStreamWriter(const BinaryStreamWriter &Other)
: Stream(Other.Stream), Offset(Other.Offset) {}
BinaryStreamWriter &operator=(const BinaryStreamWriter &Other) {
Stream = Other.Stream;
Offset = Other.Offset;
return *this;
}
virtual ~BinaryStreamWriter() {}
/// Write the bytes specified in \p Buffer to the underlying stream.

View File

@ -261,7 +261,7 @@ struct file_magic {
coff_object, ///< COFF object file
coff_import_library, ///< COFF import library
pecoff_executable, ///< PECOFF executable file
windows_resource, ///< Windows compiled resource file (.rc)
windows_resource, ///< Windows compiled resource file (.res)
wasm_object ///< WebAssembly Object file
};

View File

@ -62,7 +62,6 @@ def : GINodeEquiv<G_FMUL, fmul>;
def : GINodeEquiv<G_FDIV, fdiv>;
def : GINodeEquiv<G_FREM, frem>;
def : GINodeEquiv<G_FPOW, fpow>;
def : GINodeEquiv<G_INTRINSIC, intrinsic_wo_chain>;
def : GINodeEquiv<G_BR, br>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.

View File

@ -53,8 +53,7 @@ public:
: Index(Index), ModuleLoader(std::move(ModuleLoader)) {}
/// Import functions in Module \p M based on the supplied import list.
Expected<bool>
importFunctions(Module &M, const ImportMapTy &ImportList);
Expected<bool> importFunctions(Module &M, const ImportMapTy &ImportList);
private:
/// The summaries index used to trigger importing.

View File

@ -40,6 +40,7 @@ enum ExpressionType {
ET_Base,
ET_Constant,
ET_Variable,
ET_Dead,
ET_Unknown,
ET_BasicStart,
ET_Basic,
@ -380,7 +381,9 @@ public:
OS << "ExpressionTypeStore, ";
this->BasicExpression::printInternal(OS, false);
OS << " represents Store " << *Store;
OS << " with MemoryLeader " << *getMemoryLeader();
OS << " with StoredValue ";
StoredValue->printAsOperand(OS);
OS << " and MemoryLeader " << *getMemoryLeader();
}
};
@ -513,6 +516,17 @@ public:
}
};
class DeadExpression final : public Expression {
public:
DeadExpression() : Expression(ET_Dead) {}
DeadExpression(const DeadExpression &) = delete;
DeadExpression &operator=(const DeadExpression &) = delete;
static bool classof(const Expression *E) {
return E->getExpressionType() == ET_Dead;
}
};
class VariableExpression final : public Expression {
private:
Value *VariableValue;

View File

@ -121,6 +121,7 @@ private:
Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
Value *optimizeWcslen(CallInst *CI, IRBuilder<> &B);
// Wrapper for all String/Memory Library Call Optimizations
Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
@ -165,6 +166,9 @@ private:
/// hasFloatVersion - Checks if there is a float version of the specified
/// function by checking for an existing function with name FuncName + f
bool hasFloatVersion(StringRef FuncName);
/// Shared code to optimize strlen+wcslen.
Value *optimizeStringLength(CallInst *CI, IRBuilder<> &B, unsigned CharSize);
};
} // End llvm namespace

View File

@ -683,8 +683,11 @@ static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) {
#ifndef NDEBUG
static const Function *getParent(const Value *V) {
if (const Instruction *inst = dyn_cast<Instruction>(V))
if (const Instruction *inst = dyn_cast<Instruction>(V)) {
if (!inst->getParent())
return nullptr;
return inst->getParent()->getParent();
}
if (const Argument *arg = dyn_cast<Argument>(V))
return arg->getParent();

View File

@ -58,45 +58,12 @@ char BranchProbabilityInfoWrapperPass::ID = 0;
static const uint32_t LBH_TAKEN_WEIGHT = 124;
static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
/// \brief Unreachable-terminating branch taken weight.
/// \brief Unreachable-terminating branch taken probability.
///
/// This is the weight for a branch being taken to a block that terminates
/// This is the probability for a branch being taken to a block that terminates
/// (eventually) in unreachable. These are predicted as unlikely as possible.
static const uint32_t UR_TAKEN_WEIGHT = 1;
/// \brief Unreachable-terminating branch not-taken weight.
///
/// This is the weight for a branch not being taken toward a block that
/// terminates (eventually) in unreachable. Such a branch is essentially never
/// taken. Set the weight to an absurdly high value so that nested loops don't
/// easily subsume it.
static const uint32_t UR_NONTAKEN_WEIGHT = 1024*1024 - 1;
/// \brief Returns the branch probability for unreachable edge according to
/// heuristic.
///
/// This is the branch probability being taken to a block that terminates
/// (eventually) in unreachable. These are predicted as unlikely as possible.
static BranchProbability getUnreachableProbability(uint64_t UnreachableCount) {
assert(UnreachableCount > 0 && "UnreachableCount must be > 0");
return BranchProbability::getBranchProbability(
UR_TAKEN_WEIGHT,
(UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * UnreachableCount);
}
/// \brief Returns the branch probability for reachable edge according to
/// heuristic.
///
/// This is the branch probability not being taken toward a block that
/// terminates (eventually) in unreachable. Such a branch is essentially never
/// taken. Set the weight to an absurdly high value so that nested loops don't
/// easily subsume it.
static BranchProbability getReachableProbability(uint64_t ReachableCount) {
assert(ReachableCount > 0 && "ReachableCount must be > 0");
return BranchProbability::getBranchProbability(
UR_NONTAKEN_WEIGHT,
(UR_TAKEN_WEIGHT + UR_NONTAKEN_WEIGHT) * ReachableCount);
}
/// All reachable probability will equally share the remaining part.
static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1);
/// \brief Weight for a branch taken going into a cold block.
///
@ -232,8 +199,10 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(const BasicBlock *BB) {
return true;
}
auto UnreachableProb = getUnreachableProbability(UnreachableEdges.size());
auto ReachableProb = getReachableProbability(ReachableEdges.size());
auto UnreachableProb = UR_TAKEN_PROB;
auto ReachableProb =
(BranchProbability::getOne() - UR_TAKEN_PROB * UnreachableEdges.size()) /
ReachableEdges.size();
for (unsigned SuccIdx : UnreachableEdges)
setEdgeProbability(BB, SuccIdx, UnreachableProb);
@ -319,7 +288,7 @@ bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) {
// If the unreachable heuristic is more strong then we use it for this edge.
if (UnreachableIdxs.size() > 0 && ReachableIdxs.size() > 0) {
auto ToDistribute = BranchProbability::getZero();
auto UnreachableProb = getUnreachableProbability(UnreachableIdxs.size());
auto UnreachableProb = UR_TAKEN_PROB;
for (auto i : UnreachableIdxs)
if (UnreachableProb < BP[i]) {
ToDistribute += BP[i] - UnreachableProb;

View File

@ -477,10 +477,8 @@ bool CGPassManager::runOnModule(Module &M) {
if (DevirtualizedCall)
DEBUG(dbgs() << " CGSCCPASSMGR: Stopped iteration after " << Iteration
<< " times, due to -max-cg-scc-iterations\n");
if (Iteration > MaxSCCIterations)
MaxSCCIterations = Iteration;
MaxSCCIterations.updateMax(Iteration);
}
Changed |= doFinalization(CG);
return Changed;

View File

@ -126,8 +126,8 @@ static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
/// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)".
/// Returns the simplified value, or null if no simplification was performed.
static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q,
unsigned MaxRecurse) {
Instruction::BinaryOps OpcodeToExpand,
const SimplifyQuery &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
return nullptr;
@ -184,7 +184,8 @@ static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
/// Generic simplifications for associative binary operations.
/// Returns the simpler value, or null if none was found.
static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
Value *LHS, Value *RHS, const SimplifyQuery &Q,
Value *LHS, Value *RHS,
const SimplifyQuery &Q,
unsigned MaxRecurse) {
assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
@ -2260,28 +2261,49 @@ static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
if (!OpTy->getScalarType()->isIntegerTy(1))
return nullptr;
// A boolean compared to true/false can be simplified in 14 out of the 20
// (10 predicates * 2 constants) possible combinations. Cases not handled here
// require a 'not' of the LHS, so those must be transformed in InstCombine.
if (match(RHS, m_Zero())) {
switch (Pred) {
case CmpInst::ICMP_NE: // X != 0 -> X
case CmpInst::ICMP_UGT: // X >u 0 -> X
case CmpInst::ICMP_SLT: // X <s 0 -> X
return LHS;
case CmpInst::ICMP_ULT: // X <u 0 -> false
case CmpInst::ICMP_SGT: // X >s 0 -> false
return getFalse(ITy);
case CmpInst::ICMP_UGE: // X >=u 0 -> true
case CmpInst::ICMP_SLE: // X <=s 0 -> true
return getTrue(ITy);
default: break;
}
} else if (match(RHS, m_One())) {
switch (Pred) {
case CmpInst::ICMP_EQ: // X == 1 -> X
case CmpInst::ICMP_UGE: // X >=u 1 -> X
case CmpInst::ICMP_SLE: // X <=s -1 -> X
return LHS;
case CmpInst::ICMP_UGT: // X >u 1 -> false
case CmpInst::ICMP_SLT: // X <s -1 -> false
return getFalse(ITy);
case CmpInst::ICMP_ULE: // X <=u 1 -> true
case CmpInst::ICMP_SGE: // X >=s -1 -> true
return getTrue(ITy);
default: break;
}
}
switch (Pred) {
default:
break;
case ICmpInst::ICMP_EQ:
// X == 1 -> X
if (match(RHS, m_One()))
return LHS;
break;
case ICmpInst::ICMP_NE:
// X != 0 -> X
if (match(RHS, m_Zero()))
return LHS;
break;
case ICmpInst::ICMP_UGT:
// X >u 0 -> X
if (match(RHS, m_Zero()))
return LHS;
break;
case ICmpInst::ICMP_UGE:
// X >=u 1 -> X
if (match(RHS, m_One()))
return LHS;
if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
return getTrue(ITy);
break;
@ -2296,16 +2318,6 @@ static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT:
// X <s 0 -> X
if (match(RHS, m_Zero()))
return LHS;
break;
case ICmpInst::ICMP_SLE:
// X <=s -1 -> X
if (match(RHS, m_One()))
return LHS;
break;
case ICmpInst::ICMP_ULE:
if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
return getTrue(ITy);

View File

@ -1799,6 +1799,15 @@ bool MemorySSA::dominates(const MemoryAccess *Dominator,
const static char LiveOnEntryStr[] = "liveOnEntry";
void MemoryAccess::print(raw_ostream &OS) const {
switch (getValueID()) {
case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
}
llvm_unreachable("invalid value id");
}
void MemoryDef::print(raw_ostream &OS) const {
MemoryAccess *UO = getDefiningAccess();
@ -1836,8 +1845,6 @@ void MemoryPhi::print(raw_ostream &OS) const {
OS << ')';
}
MemoryAccess::~MemoryAccess() {}
void MemoryUse::print(raw_ostream &OS) const {
MemoryAccess *UO = getDefiningAccess();
OS << "MemoryUse(";
@ -2054,3 +2061,15 @@ MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
return StartingAccess;
}
} // namespace llvm
void MemoryPhi::deleteMe(DerivedUser *Self) {
delete static_cast<MemoryPhi *>(Self);
}
void MemoryDef::deleteMe(DerivedUser *Self) {
delete static_cast<MemoryDef *>(Self);
}
void MemoryUse::deleteMe(DerivedUser *Self) {
delete static_cast<MemoryUse *>(Self);
}

View File

@ -3885,7 +3885,7 @@ public:
: SCEVRewriteVisitor(SE), L(L), Valid(true) {}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant))
if (!SE.isLoopInvariant(Expr, L))
Valid = false;
return Expr;
}
@ -3919,7 +3919,7 @@ public:
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
// Only allow AddRecExprs for this loop.
if (!(SE.getLoopDisposition(Expr, L) == ScalarEvolution::LoopInvariant))
if (!SE.isLoopInvariant(Expr, L))
Valid = false;
return Expr;
}
@ -5947,6 +5947,8 @@ ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getMax())
return SE->getCouldNotCompute();
assert((isa<SCEVCouldNotCompute>(getMax()) || isa<SCEVConstant>(getMax())) &&
"No point in having a non-constant max backedge taken count!");
return getMax();
}
@ -5972,7 +5974,11 @@ bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
}
ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
: ExactNotTaken(E), MaxNotTaken(E), MaxOrZero(false) {}
: ExactNotTaken(E), MaxNotTaken(E), MaxOrZero(false) {
assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
isa<SCEVConstant>(MaxNotTaken)) &&
"No point in having a non-constant max backedge taken count!");
}
ScalarEvolution::ExitLimit::ExitLimit(
const SCEV *E, const SCEV *M, bool MaxOrZero,
@ -5981,6 +5987,9 @@ ScalarEvolution::ExitLimit::ExitLimit(
assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
!isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
"Exact is not allowed to be less precise than Max");
assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
isa<SCEVConstant>(MaxNotTaken)) &&
"No point in having a non-constant max backedge taken count!");
for (auto *PredSet : PredSetList)
for (auto *P : *PredSet)
addPredicate(P);
@ -5989,11 +5998,19 @@ ScalarEvolution::ExitLimit::ExitLimit(
ScalarEvolution::ExitLimit::ExitLimit(
const SCEV *E, const SCEV *M, bool MaxOrZero,
const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
: ExitLimit(E, M, MaxOrZero, {&PredSet}) {}
: ExitLimit(E, M, MaxOrZero, {&PredSet}) {
assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
isa<SCEVConstant>(MaxNotTaken)) &&
"No point in having a non-constant max backedge taken count!");
}
ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
bool MaxOrZero)
: ExitLimit(E, M, MaxOrZero, None) {}
: ExitLimit(E, M, MaxOrZero, None) {
assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
isa<SCEVConstant>(MaxNotTaken)) &&
"No point in having a non-constant max backedge taken count!");
}
/// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
/// computable exit into a persistent ExitNotTakenInfo array.
@ -6018,6 +6035,8 @@ ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, std::move(Predicate));
});
assert((isa<SCEVCouldNotCompute>(MaxCount) || isa<SCEVConstant>(MaxCount)) &&
"No point in having a non-constant max backedge taken count!");
}
/// Invalidate this result and free the ExitNotTakenInfo array.
@ -6279,7 +6298,7 @@ ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
// to not.
if (isa<SCEVCouldNotCompute>(MaxBECount) &&
!isa<SCEVCouldNotCompute>(BECount))
MaxBECount = BECount;
MaxBECount = getConstant(getUnsignedRange(BECount).getUnsignedMax());
return ExitLimit(BECount, MaxBECount, false,
{&EL0.Predicates, &EL1.Predicates});
@ -7583,13 +7602,20 @@ ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
loopHasNoAbnormalExits(AddRec->getLoop())) {
const SCEV *Exact =
getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
return ExitLimit(Exact, Exact, false, Predicates);
const SCEV *Max =
Exact == getCouldNotCompute()
? Exact
: getConstant(getUnsignedRange(Exact).getUnsignedMax());
return ExitLimit(Exact, Max, false, Predicates);
}
// Solve the general equation.
const SCEV *E = SolveLinEquationWithOverflow(
StepC->getAPInt(), getNegativeSCEV(Start), *this);
return ExitLimit(E, E, false, Predicates);
const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
getNegativeSCEV(Start), *this);
const SCEV *M = E == getCouldNotCompute()
? E
: getConstant(getUnsignedRange(E).getUnsignedMax());
return ExitLimit(E, M, false, Predicates);
}
ScalarEvolution::ExitLimit
@ -9218,8 +9244,9 @@ ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
getConstant(StrideForMaxBECount), false);
}
if (isa<SCEVCouldNotCompute>(MaxBECount))
MaxBECount = BECount;
if (isa<SCEVCouldNotCompute>(MaxBECount) &&
!isa<SCEVCouldNotCompute>(BECount))
MaxBECount = getConstant(getUnsignedRange(BECount).getUnsignedMax());
return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
}

View File

@ -13,6 +13,7 @@
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/CommandLine.h"
using namespace llvm;
@ -1518,6 +1519,21 @@ TargetLibraryInfoImpl &TargetLibraryAnalysis::lookupInfoImpl(const Triple &T) {
return *Impl;
}
unsigned TargetLibraryInfoImpl::getTargetWCharSize(const Triple &T) {
// See also clang/lib/Basic/Targets.cpp.
if (T.isPS4() || T.isOSWindows() || T.isArch16Bit())
return 2;
if (T.getArch() == Triple::xcore)
return 1;
return 4;
}
unsigned TargetLibraryInfoImpl::getWCharSize(const Module &M) const {
if (auto *ShortWChar = cast_or_null<ConstantAsMetadata>(
M.getModuleFlag("wchar_size")))
return cast<ConstantInt>(ShortWChar->getValue())->getZExtValue();
return getTargetWCharSize(Triple(M.getTargetTriple()));
}
TargetLibraryInfoWrapperPass::TargetLibraryInfoWrapperPass()
: ImmutablePass(ID), TLIImpl(), TLI(TLIImpl) {

View File

@ -26,6 +26,7 @@
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
@ -2953,14 +2954,16 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
return Ptr;
}
bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
unsigned CharSize) {
// Make sure the GEP has exactly three arguments.
if (GEP->getNumOperands() != 3)
return false;
// Make sure the index-ee is a pointer to array of i8.
// Make sure the index-ee is a pointer to array of \p CharSize integers.
// CharSize.
ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
if (!AT || !AT->getElementType()->isIntegerTy(8))
if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
return false;
// Check to make sure that the first operand of the GEP is an integer and
@ -2972,11 +2975,9 @@ bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) {
return true;
}
/// This function computes the length of a null-terminated C string pointed to
/// by V. If successful, it returns true and returns the string in Str.
/// If unsuccessful, it returns false.
bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
uint64_t Offset, bool TrimAtNul) {
bool llvm::getConstantDataArrayInfo(const Value *V,
ConstantDataArraySlice &Slice,
unsigned ElementSize, uint64_t Offset) {
assert(V);
// Look through bitcast instructions and geps.
@ -2987,7 +2988,7 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// The GEP operator should be based on a pointer to string constant, and is
// indexing into the string constant.
if (!isGEPBasedOnPointerToString(GEP))
if (!isGEPBasedOnPointerToString(GEP, ElementSize))
return false;
// If the second index isn't a ConstantInt, then this is a variable index
@ -2998,8 +2999,8 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
StartIdx = CI->getZExtValue();
else
return false;
return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset,
TrimAtNul);
return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
StartIdx + Offset);
}
// The GEP instruction, constant or instruction, must reference a global
@ -3009,30 +3010,72 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
return false;
// Handle the all-zeros case.
const ConstantDataArray *Array;
ArrayType *ArrayTy;
if (GV->getInitializer()->isNullValue()) {
// This is a degenerate case. The initializer is constant zero so the
// length of the string must be zero.
Str = "";
return true;
}
Type *GVTy = GV->getValueType();
if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
// A zeroinitializer for the array; There is no ConstantDataArray.
Array = nullptr;
} else {
const DataLayout &DL = GV->getParent()->getDataLayout();
uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
uint64_t Length = SizeInBytes / (ElementSize / 8);
if (Length <= Offset)
return false;
// This must be a ConstantDataArray.
const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
if (!Array || !Array->isString())
Slice.Array = nullptr;
Slice.Offset = 0;
Slice.Length = Length - Offset;
return true;
}
} else {
// This must be a ConstantDataArray.
Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
if (!Array)
return false;
ArrayTy = Array->getType();
}
if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
return false;
// Get the number of elements in the array.
uint64_t NumElts = Array->getType()->getArrayNumElements();
// Start out with the entire array in the StringRef.
Str = Array->getAsString();
uint64_t NumElts = ArrayTy->getArrayNumElements();
if (Offset > NumElts)
return false;
Slice.Array = Array;
Slice.Offset = Offset;
Slice.Length = NumElts - Offset;
return true;
}
/// This function computes the length of a null-terminated C string pointed to
/// by V. If successful, it returns true and returns the string in Str.
/// If unsuccessful, it returns false.
bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
uint64_t Offset, bool TrimAtNul) {
ConstantDataArraySlice Slice;
if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
return false;
if (Slice.Array == nullptr) {
if (TrimAtNul) {
Str = StringRef();
return true;
}
if (Slice.Length == 1) {
Str = StringRef("", 1);
return true;
}
// We cannot instantiate a StringRef as we do not have an apropriate string
// of 0s at hand.
return false;
}
// Start out with the entire array in the StringRef.
Str = Slice.Array->getAsString();
// Skip over 'offset' bytes.
Str = Str.substr(Offset);
Str = Str.substr(Slice.Offset);
if (TrimAtNul) {
// Trim off the \0 and anything after it. If the array is not nul
@ -3050,7 +3093,8 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
/// If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'. If we can't, return 0.
static uint64_t GetStringLengthH(const Value *V,
SmallPtrSetImpl<const PHINode*> &PHIs) {
SmallPtrSetImpl<const PHINode*> &PHIs,
unsigned CharSize) {
// Look through noop bitcast instructions.
V = V->stripPointerCasts();
@ -3063,7 +3107,7 @@ static uint64_t GetStringLengthH(const Value *V,
// If it was new, see if all the input strings are the same length.
uint64_t LenSoFar = ~0ULL;
for (Value *IncValue : PN->incoming_values()) {
uint64_t Len = GetStringLengthH(IncValue, PHIs);
uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
if (Len == 0) return 0; // Unknown length -> unknown.
if (Len == ~0ULL) continue;
@ -3079,9 +3123,9 @@ static uint64_t GetStringLengthH(const Value *V,
// strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs);
uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
if (Len1 == 0) return 0;
uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs);
uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
if (Len2 == 0) return 0;
if (Len1 == ~0ULL) return Len2;
if (Len2 == ~0ULL) return Len1;
@ -3090,20 +3134,30 @@ static uint64_t GetStringLengthH(const Value *V,
}
// Otherwise, see if we can read the string.
StringRef StrData;
if (!getConstantStringInfo(V, StrData))
ConstantDataArraySlice Slice;
if (!getConstantDataArrayInfo(V, Slice, CharSize))
return 0;
return StrData.size()+1;
if (Slice.Array == nullptr)
return 1;
// Search for nul characters
unsigned NullIndex = 0;
for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
break;
}
return NullIndex + 1;
}
/// If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'. If we can't, return 0.
uint64_t llvm::GetStringLength(const Value *V) {
uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
if (!V->getType()->isPointerTy()) return 0;
SmallPtrSet<const PHINode*, 32> PHIs;
uint64_t Len = GetStringLengthH(V, PHIs);
uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
// If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
// an empty string as a length.
return Len == ~0ULL ? 1 : Len;

View File

@ -2502,7 +2502,7 @@ LLParser::PerFunctionState::~PerFunctionState() {
continue;
P.second.first->replaceAllUsesWith(
UndefValue::get(P.second.first->getType()));
delete P.second.first;
P.second.first->deleteValue();
}
for (const auto &P : ForwardRefValIDs) {
@ -2510,7 +2510,7 @@ LLParser::PerFunctionState::~PerFunctionState() {
continue;
P.second.first->replaceAllUsesWith(
UndefValue::get(P.second.first->getType()));
delete P.second.first;
P.second.first->deleteValue();
}
}
@ -2642,7 +2642,7 @@ bool LLParser::PerFunctionState::SetInstName(int NameID,
getTypeString(FI->second.first->getType()) + "'");
Sentinel->replaceAllUsesWith(Inst);
delete Sentinel;
Sentinel->deleteValue();
ForwardRefValIDs.erase(FI);
}
@ -2659,7 +2659,7 @@ bool LLParser::PerFunctionState::SetInstName(int NameID,
getTypeString(FI->second.first->getType()) + "'");
Sentinel->replaceAllUsesWith(Inst);
delete Sentinel;
Sentinel->deleteValue();
ForwardRefVals.erase(FI);
}

View File

@ -4489,11 +4489,11 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
// Add instruction to end of current BB. If there is no current BB, reject
// this file.
if (!CurBB) {
delete I;
I->deleteValue();
return error("Invalid instruction with no BB");
}
if (!OperandBundles.empty()) {
delete I;
I->deleteValue();
return error("Operand bundles found with no consumer");
}
CurBB->getInstList().push_back(I);

Some files were not shown because too many files have changed in this diff Show More