Merge llvm, clang, lld, lldb, compiler-rt and libc++ r301441, and update
build glue.
This commit is contained in:
commit
62479c810b
@ -114,6 +114,21 @@ void __tsan_mutex_post_signal(void *addr, unsigned flags);
|
||||
void __tsan_mutex_pre_divert(void *addr, unsigned flags);
|
||||
void __tsan_mutex_post_divert(void *addr, unsigned flags);
|
||||
|
||||
// External race detection API.
|
||||
// Can be used by non-instrumented libraries to detect when their objects are
|
||||
// being used in an unsafe manner.
|
||||
// - __tsan_external_read/__tsan_external_write annotates the logical reads
|
||||
// and writes of the object at the specified address. 'caller_pc' should
|
||||
// be the PC of the library user, which the library can obtain with e.g.
|
||||
// `__builtin_return_address(0)`.
|
||||
// - __tsan_external_register_tag registers a 'tag' with the specified name,
|
||||
// which is later used in read/write annotations to denote the object type
|
||||
// - __tsan_external_assign_tag can optionally mark a heap object with a tag
|
||||
void *__tsan_external_register_tag(const char *object_type);
|
||||
void __tsan_external_assign_tag(void *addr, void *tag);
|
||||
void __tsan_external_read(void *addr, void *caller_pc, void *tag);
|
||||
void __tsan_external_write(void *addr, void *caller_pc, void *tag);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -7,7 +7,6 @@
|
||||
*
|
||||
* ===----------------------------------------------------------------------===
|
||||
*/
|
||||
#include <pthread.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
@ -15,6 +14,23 @@
|
||||
#include "int_lib.h"
|
||||
#include "int_util.h"
|
||||
|
||||
typedef struct emutls_address_array {
|
||||
uintptr_t size; /* number of elements in the 'data' array */
|
||||
void* data[];
|
||||
} emutls_address_array;
|
||||
|
||||
static void emutls_shutdown(emutls_address_array *array);
|
||||
|
||||
#ifndef _WIN32
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_key_t emutls_pthread_key;
|
||||
|
||||
typedef unsigned int gcc_word __attribute__((mode(word)));
|
||||
typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
|
||||
|
||||
/* Default is not to use posix_memalign, so systems like Android
|
||||
* can use thread local data without heavier POSIX memory allocators.
|
||||
*/
|
||||
@ -22,26 +38,6 @@
|
||||
#define EMUTLS_USE_POSIX_MEMALIGN 0
|
||||
#endif
|
||||
|
||||
/* For every TLS variable xyz,
|
||||
* there is one __emutls_control variable named __emutls_v.xyz.
|
||||
* If xyz has non-zero initial value, __emutls_v.xyz's "value"
|
||||
* will point to __emutls_t.xyz, which has the initial value.
|
||||
*/
|
||||
typedef unsigned int gcc_word __attribute__((mode(word)));
|
||||
typedef struct __emutls_control {
|
||||
/* Must use gcc_word here, instead of size_t, to match GCC. When
|
||||
gcc_word is larger than size_t, the upper extra bits are all
|
||||
zeros. We can use variables of size_t to operate on size and
|
||||
align. */
|
||||
gcc_word size; /* size of the object in bytes */
|
||||
gcc_word align; /* alignment of the object in bytes */
|
||||
union {
|
||||
uintptr_t index; /* data[index-1] is the object address */
|
||||
void* address; /* object address, when in single thread env */
|
||||
} object;
|
||||
void* value; /* null or non-zero initial value for the object */
|
||||
} __emutls_control;
|
||||
|
||||
static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
|
||||
void *base;
|
||||
#if EMUTLS_USE_POSIX_MEMALIGN
|
||||
@ -50,7 +46,7 @@ static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
|
||||
#else
|
||||
#define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void*))
|
||||
char* object;
|
||||
if ((object = malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
|
||||
if ((object = (char*)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
|
||||
abort();
|
||||
base = (void*)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES))
|
||||
& ~(uintptr_t)(align - 1));
|
||||
@ -69,10 +65,207 @@ static __inline void emutls_memalign_free(void *base) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static void emutls_key_destructor(void* ptr) {
|
||||
emutls_shutdown((emutls_address_array*)ptr);
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
static __inline void emutls_init(void) {
|
||||
if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
|
||||
abort();
|
||||
}
|
||||
|
||||
static __inline void emutls_init_once(void) {
|
||||
static pthread_once_t once = PTHREAD_ONCE_INIT;
|
||||
pthread_once(&once, emutls_init);
|
||||
}
|
||||
|
||||
static __inline void emutls_lock() {
|
||||
pthread_mutex_lock(&emutls_mutex);
|
||||
}
|
||||
|
||||
static __inline void emutls_unlock() {
|
||||
pthread_mutex_unlock(&emutls_mutex);
|
||||
}
|
||||
|
||||
static __inline void emutls_setspecific(emutls_address_array *value) {
|
||||
pthread_setspecific(emutls_pthread_key, (void*) value);
|
||||
}
|
||||
|
||||
static __inline emutls_address_array* emutls_getspecific() {
|
||||
return (emutls_address_array*) pthread_getspecific(emutls_pthread_key);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#include <Windows.h>
|
||||
#include <malloc.h>
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
static LPCRITICAL_SECTION emutls_mutex;
|
||||
static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
|
||||
|
||||
typedef uintptr_t gcc_word;
|
||||
typedef void * gcc_pointer;
|
||||
|
||||
static void win_error(DWORD last_err, const char *hint) {
|
||||
char *buffer = NULL;
|
||||
if (FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
|
||||
FORMAT_MESSAGE_FROM_SYSTEM |
|
||||
FORMAT_MESSAGE_MAX_WIDTH_MASK,
|
||||
NULL, last_err, 0, (LPSTR)&buffer, 1, NULL)) {
|
||||
fprintf(stderr, "Windows error: %s\n", buffer);
|
||||
} else {
|
||||
fprintf(stderr, "Unkown Windows error: %s\n", hint);
|
||||
}
|
||||
LocalFree(buffer);
|
||||
}
|
||||
|
||||
static __inline void win_abort(DWORD last_err, const char *hint) {
|
||||
win_error(last_err, hint);
|
||||
abort();
|
||||
}
|
||||
|
||||
static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
|
||||
void *base = _aligned_malloc(size, align);
|
||||
if (!base)
|
||||
win_abort(GetLastError(), "_aligned_malloc");
|
||||
return base;
|
||||
}
|
||||
|
||||
static __inline void emutls_memalign_free(void *base) {
|
||||
_aligned_free(base);
|
||||
}
|
||||
|
||||
static void emutls_exit(void) {
|
||||
if (emutls_mutex) {
|
||||
DeleteCriticalSection(emutls_mutex);
|
||||
_aligned_free(emutls_mutex);
|
||||
emutls_mutex = NULL;
|
||||
}
|
||||
if (emutls_tls_index != TLS_OUT_OF_INDEXES) {
|
||||
emutls_shutdown((emutls_address_array*)TlsGetValue(emutls_tls_index));
|
||||
TlsFree(emutls_tls_index);
|
||||
emutls_tls_index = TLS_OUT_OF_INDEXES;
|
||||
}
|
||||
}
|
||||
|
||||
#pragma warning (push)
|
||||
#pragma warning (disable : 4100)
|
||||
static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
|
||||
emutls_mutex = (LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
|
||||
if (!emutls_mutex) {
|
||||
win_error(GetLastError(), "_aligned_malloc");
|
||||
return FALSE;
|
||||
}
|
||||
InitializeCriticalSection(emutls_mutex);
|
||||
|
||||
emutls_tls_index = TlsAlloc();
|
||||
if (emutls_tls_index == TLS_OUT_OF_INDEXES) {
|
||||
emutls_exit();
|
||||
win_error(GetLastError(), "TlsAlloc");
|
||||
return FALSE;
|
||||
}
|
||||
atexit(&emutls_exit);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static __inline void emutls_init_once(void) {
|
||||
static INIT_ONCE once;
|
||||
InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
|
||||
}
|
||||
|
||||
static __inline void emutls_lock() {
|
||||
EnterCriticalSection(emutls_mutex);
|
||||
}
|
||||
|
||||
static __inline void emutls_unlock() {
|
||||
LeaveCriticalSection(emutls_mutex);
|
||||
}
|
||||
|
||||
static __inline void emutls_setspecific(emutls_address_array *value) {
|
||||
if (TlsSetValue(emutls_tls_index, (LPVOID) value) == 0)
|
||||
win_abort(GetLastError(), "TlsSetValue");
|
||||
}
|
||||
|
||||
static __inline emutls_address_array* emutls_getspecific() {
|
||||
LPVOID value = TlsGetValue(emutls_tls_index);
|
||||
if (value == NULL) {
|
||||
const DWORD err = GetLastError();
|
||||
if (err != ERROR_SUCCESS)
|
||||
win_abort(err, "TlsGetValue");
|
||||
}
|
||||
return (emutls_address_array*) value;
|
||||
}
|
||||
|
||||
/* Provide atomic load/store functions for emutls_get_index if built with MSVC.
|
||||
*/
|
||||
#if !defined(__ATOMIC_RELEASE)
|
||||
|
||||
enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
|
||||
|
||||
static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
|
||||
assert(type == __ATOMIC_ACQUIRE);
|
||||
#ifdef _WIN64
|
||||
return (uintptr_t) _load_be_u64(ptr);
|
||||
#else
|
||||
return (uintptr_t) _load_be_u32(ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
|
||||
assert(type == __ATOMIC_RELEASE);
|
||||
#ifdef _WIN64
|
||||
_store_be_u64(ptr, val);
|
||||
#else
|
||||
_store_be_u32(ptr, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#pragma warning (pop)
|
||||
|
||||
#endif
|
||||
|
||||
static size_t emutls_num_object = 0; /* number of allocated TLS objects */
|
||||
|
||||
/* Free the allocated TLS data
|
||||
*/
|
||||
static void emutls_shutdown(emutls_address_array *array) {
|
||||
if (array) {
|
||||
uintptr_t i;
|
||||
for (i = 0; i < array->size; ++i) {
|
||||
if (array->data[i])
|
||||
emutls_memalign_free(array->data[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* For every TLS variable xyz,
|
||||
* there is one __emutls_control variable named __emutls_v.xyz.
|
||||
* If xyz has non-zero initial value, __emutls_v.xyz's "value"
|
||||
* will point to __emutls_t.xyz, which has the initial value.
|
||||
*/
|
||||
typedef struct __emutls_control {
|
||||
/* Must use gcc_word here, instead of size_t, to match GCC. When
|
||||
gcc_word is larger than size_t, the upper extra bits are all
|
||||
zeros. We can use variables of size_t to operate on size and
|
||||
align. */
|
||||
gcc_word size; /* size of the object in bytes */
|
||||
gcc_word align; /* alignment of the object in bytes */
|
||||
union {
|
||||
uintptr_t index; /* data[index-1] is the object address */
|
||||
void* address; /* object address, when in single thread env */
|
||||
} object;
|
||||
void* value; /* null or non-zero initial value for the object */
|
||||
} __emutls_control;
|
||||
|
||||
/* Emulated TLS objects are always allocated at run-time. */
|
||||
static __inline void *emutls_allocate_object(__emutls_control *control) {
|
||||
/* Use standard C types, check with gcc's emutls.o. */
|
||||
typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
|
||||
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
|
||||
COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void*));
|
||||
|
||||
@ -93,45 +286,19 @@ static __inline void *emutls_allocate_object(__emutls_control *control) {
|
||||
return base;
|
||||
}
|
||||
|
||||
static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
static size_t emutls_num_object = 0; /* number of allocated TLS objects */
|
||||
|
||||
typedef struct emutls_address_array {
|
||||
uintptr_t size; /* number of elements in the 'data' array */
|
||||
void* data[];
|
||||
} emutls_address_array;
|
||||
|
||||
static pthread_key_t emutls_pthread_key;
|
||||
|
||||
static void emutls_key_destructor(void* ptr) {
|
||||
emutls_address_array* array = (emutls_address_array*)ptr;
|
||||
uintptr_t i;
|
||||
for (i = 0; i < array->size; ++i) {
|
||||
if (array->data[i])
|
||||
emutls_memalign_free(array->data[i]);
|
||||
}
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
static void emutls_init(void) {
|
||||
if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
|
||||
abort();
|
||||
}
|
||||
|
||||
/* Returns control->object.index; set index if not allocated yet. */
|
||||
static __inline uintptr_t emutls_get_index(__emutls_control *control) {
|
||||
uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
|
||||
if (!index) {
|
||||
static pthread_once_t once = PTHREAD_ONCE_INIT;
|
||||
pthread_once(&once, emutls_init);
|
||||
pthread_mutex_lock(&emutls_mutex);
|
||||
emutls_init_once();
|
||||
emutls_lock();
|
||||
index = control->object.index;
|
||||
if (!index) {
|
||||
index = ++emutls_num_object;
|
||||
__atomic_store_n(&control->object.index, index, __ATOMIC_RELEASE);
|
||||
}
|
||||
pthread_mutex_unlock(&emutls_mutex);
|
||||
emutls_unlock();
|
||||
}
|
||||
return index;
|
||||
}
|
||||
@ -142,7 +309,7 @@ static __inline void emutls_check_array_set_size(emutls_address_array *array,
|
||||
if (array == NULL)
|
||||
abort();
|
||||
array->size = size;
|
||||
pthread_setspecific(emutls_pthread_key, (void*)array);
|
||||
emutls_setspecific(array);
|
||||
}
|
||||
|
||||
/* Returns the new 'data' array size, number of elements,
|
||||
@ -156,22 +323,29 @@ static __inline uintptr_t emutls_new_data_array_size(uintptr_t index) {
|
||||
return ((index + 1 + 15) & ~((uintptr_t)15)) - 1;
|
||||
}
|
||||
|
||||
/* Returns the size in bytes required for an emutls_address_array with
|
||||
* N number of elements for data field.
|
||||
*/
|
||||
static __inline uintptr_t emutls_asize(uintptr_t N) {
|
||||
return N * sizeof(void *) + sizeof(emutls_address_array);
|
||||
}
|
||||
|
||||
/* Returns the thread local emutls_address_array.
|
||||
* Extends its size if necessary to hold address at index.
|
||||
*/
|
||||
static __inline emutls_address_array *
|
||||
emutls_get_address_array(uintptr_t index) {
|
||||
emutls_address_array* array = pthread_getspecific(emutls_pthread_key);
|
||||
emutls_address_array* array = emutls_getspecific();
|
||||
if (array == NULL) {
|
||||
uintptr_t new_size = emutls_new_data_array_size(index);
|
||||
array = malloc(new_size * sizeof(void *) + sizeof(emutls_address_array));
|
||||
array = (emutls_address_array*) malloc(emutls_asize(new_size));
|
||||
if (array)
|
||||
memset(array->data, 0, new_size * sizeof(void*));
|
||||
emutls_check_array_set_size(array, new_size);
|
||||
} else if (index > array->size) {
|
||||
uintptr_t orig_size = array->size;
|
||||
uintptr_t new_size = emutls_new_data_array_size(index);
|
||||
array = realloc(array, new_size * sizeof(void *) + sizeof(emutls_address_array));
|
||||
array = (emutls_address_array*) realloc(array, emutls_asize(new_size));
|
||||
if (array)
|
||||
memset(array->data + orig_size, 0,
|
||||
(new_size - orig_size) * sizeof(void*));
|
||||
@ -182,8 +356,8 @@ emutls_get_address_array(uintptr_t index) {
|
||||
|
||||
void* __emutls_get_address(__emutls_control* control) {
|
||||
uintptr_t index = emutls_get_index(control);
|
||||
emutls_address_array* array = emutls_get_address_array(index);
|
||||
if (array->data[index - 1] == NULL)
|
||||
array->data[index - 1] = emutls_allocate_object(control);
|
||||
return array->data[index - 1];
|
||||
emutls_address_array* array = emutls_get_address_array(index--);
|
||||
if (array->data[index] == NULL)
|
||||
array->data[index] = emutls_allocate_object(control);
|
||||
return array->data[index];
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ typedef CompactSizeClassMap SizeClassMap;
|
||||
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
|
||||
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
|
||||
PrimaryAllocator;
|
||||
#elif defined(__x86_64__)
|
||||
#elif defined(__x86_64__) || defined(__powerpc64__)
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = 0x600000000000ULL;
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
|
@ -70,12 +70,13 @@ static const char kSuppressionLeak[] = "leak";
|
||||
static const char *kSuppressionTypes[] = { kSuppressionLeak };
|
||||
static const char kStdSuppressions[] =
|
||||
#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
// The actual string allocation happens here (for more details refer to the
|
||||
// SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT definition).
|
||||
"leak:*_dl_map_object_deps*";
|
||||
#else
|
||||
"";
|
||||
// For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
// definition.
|
||||
"leak:*pthread_exit*\n"
|
||||
#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
// TLS leak in some glibc versions, described in
|
||||
// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
|
||||
"leak:*tls_get_addr*\n";
|
||||
|
||||
void InitializeSuppressions() {
|
||||
CHECK_EQ(nullptr, suppression_ctx);
|
||||
|
@ -32,7 +32,8 @@
|
||||
// new architecture inside sanitizer library.
|
||||
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
|
||||
(SANITIZER_WORDSIZE == 64) && \
|
||||
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
|
||||
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
|
||||
defined(__powerpc64__))
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
#elif defined(__i386__) && \
|
||||
(SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
|
||||
|
@ -180,6 +180,7 @@ struct SizeClassAllocator32LocalCache {
|
||||
uptr count;
|
||||
uptr max_count;
|
||||
uptr class_size;
|
||||
uptr class_id_for_transfer_batch;
|
||||
void *batch[2 * TransferBatch::kMaxNumCached];
|
||||
};
|
||||
PerClass per_class_[kNumClasses];
|
||||
@ -188,32 +189,31 @@ struct SizeClassAllocator32LocalCache {
|
||||
void InitCache() {
|
||||
if (per_class_[1].max_count)
|
||||
return;
|
||||
// TransferBatch class is declared in SizeClassAllocator.
|
||||
uptr class_id_for_transfer_batch =
|
||||
SizeClassMap::ClassID(sizeof(TransferBatch));
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
c->max_count = 2 * TransferBatch::MaxCached(i);
|
||||
uptr max_cached = TransferBatch::MaxCached(i);
|
||||
c->max_count = 2 * max_cached;
|
||||
c->class_size = Allocator::ClassIdToSize(i);
|
||||
// We transfer chunks between central and thread-local free lists in
|
||||
// batches. For small size classes we allocate batches separately. For
|
||||
// large size classes we may use one of the chunks to store the batch.
|
||||
// sizeof(TransferBatch) must be a power of 2 for more efficient
|
||||
// allocation.
|
||||
c->class_id_for_transfer_batch = (c->class_size <
|
||||
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
|
||||
class_id_for_transfer_batch : 0;
|
||||
}
|
||||
}
|
||||
|
||||
// TransferBatch class is declared in SizeClassAllocator.
|
||||
// We transfer chunks between central and thread-local free lists in batches.
|
||||
// For small size classes we allocate batches separately.
|
||||
// For large size classes we may use one of the chunks to store the batch.
|
||||
// sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
|
||||
static uptr SizeClassForTransferBatch(uptr class_id) {
|
||||
if (Allocator::ClassIdToSize(class_id) <
|
||||
TransferBatch::AllocationSizeRequiredForNElements(
|
||||
TransferBatch::MaxCached(class_id)))
|
||||
return SizeClassMap::ClassID(sizeof(TransferBatch));
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Returns a TransferBatch suitable for class_id.
|
||||
// For small size classes allocates the batch from the allocator.
|
||||
// For large size classes simply returns b.
|
||||
TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
|
||||
TransferBatch *b) {
|
||||
if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
|
||||
if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
|
||||
return (TransferBatch*)Allocate(allocator, batch_class_id);
|
||||
return b;
|
||||
}
|
||||
@ -223,7 +223,7 @@ struct SizeClassAllocator32LocalCache {
|
||||
// Does notthing for large size classes.
|
||||
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
|
||||
TransferBatch *b) {
|
||||
if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
|
||||
if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
|
||||
Deallocate(allocator, batch_class_id, b);
|
||||
}
|
||||
|
||||
|
@ -304,7 +304,7 @@ INTERCEPTOR(SIZE_T, strnlen, const char *s, SIZE_T maxlen) {
|
||||
INTERCEPTOR(char*, textdomain, const char *domainname) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
|
||||
COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
|
||||
if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
|
||||
char *domain = REAL(textdomain)(domainname);
|
||||
if (domain) {
|
||||
COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
|
||||
@ -3330,7 +3330,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
|
||||
// * GNU version returns message pointer, which points to either buf or some
|
||||
// static storage.
|
||||
#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
|
||||
SANITIZER_MAC
|
||||
SANITIZER_MAC || SANITIZER_ANDROID
|
||||
// POSIX version. Spec is not clear on whether buf is NULL-terminated.
|
||||
// At least on OSX, buf contents are valid even when the call fails.
|
||||
INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {
|
||||
|
@ -87,7 +87,7 @@ namespace __sanitizer {
|
||||
#elif defined(__mips__)
|
||||
const unsigned struct_kernel_stat_sz =
|
||||
SANITIZER_ANDROID ? FIRST_32_SECOND_64(104, 128) :
|
||||
FIRST_32_SECOND_64(144, 216);
|
||||
FIRST_32_SECOND_64(160, 216);
|
||||
const unsigned struct_kernel_stat64_sz = 104;
|
||||
#elif defined(__s390__) && !defined(__s390x__)
|
||||
const unsigned struct_kernel_stat_sz = 64;
|
||||
|
@ -460,6 +460,38 @@ struct ScudoAllocator {
|
||||
return UserPtr;
|
||||
}
|
||||
|
||||
// Place a chunk in the quarantine. In the event of a zero-sized quarantine,
|
||||
// we directly deallocate the chunk, otherwise the flow would lead to the
|
||||
// chunk being checksummed twice, once before Put and once in Recycle, with
|
||||
// no additional security value.
|
||||
void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
|
||||
uptr Size) {
|
||||
bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
|
||||
if (BypassQuarantine) {
|
||||
Chunk->eraseHeader();
|
||||
void *Ptr = Chunk->getAllocBeg(Header);
|
||||
if (LIKELY(!ThreadTornDown)) {
|
||||
getBackendAllocator().Deallocate(&Cache, Ptr);
|
||||
} else {
|
||||
SpinMutexLock Lock(&FallbackMutex);
|
||||
getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
|
||||
}
|
||||
} else {
|
||||
UnpackedHeader NewHeader = *Header;
|
||||
NewHeader.State = ChunkQuarantine;
|
||||
Chunk->compareExchangeHeader(&NewHeader, Header);
|
||||
if (LIKELY(!ThreadTornDown)) {
|
||||
AllocatorQuarantine.Put(&ThreadQuarantineCache,
|
||||
QuarantineCallback(&Cache), Chunk, Size);
|
||||
} else {
|
||||
SpinMutexLock l(&FallbackMutex);
|
||||
AllocatorQuarantine.Put(&FallbackQuarantineCache,
|
||||
QuarantineCallback(&FallbackAllocatorCache),
|
||||
Chunk, Size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deallocates a Chunk, which means adding it to the delayed free list (or
|
||||
// Quarantine).
|
||||
void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
|
||||
@ -499,24 +531,12 @@ struct ScudoAllocator {
|
||||
}
|
||||
}
|
||||
|
||||
UnpackedHeader NewHeader = OldHeader;
|
||||
NewHeader.State = ChunkQuarantine;
|
||||
Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
|
||||
|
||||
// If a small memory amount was allocated with a larger alignment, we want
|
||||
// to take that into account. Otherwise the Quarantine would be filled with
|
||||
// tiny chunks, taking a lot of VA memory. This an approximation of the
|
||||
// tiny chunks, taking a lot of VA memory. This is an approximation of the
|
||||
// usable size, that allows us to not call GetActuallyAllocatedSize.
|
||||
uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
|
||||
if (LIKELY(!ThreadTornDown)) {
|
||||
AllocatorQuarantine.Put(&ThreadQuarantineCache,
|
||||
QuarantineCallback(&Cache), Chunk, LiableSize);
|
||||
} else {
|
||||
SpinMutexLock l(&FallbackMutex);
|
||||
AllocatorQuarantine.Put(&FallbackQuarantineCache,
|
||||
QuarantineCallback(&FallbackAllocatorCache),
|
||||
Chunk, LiableSize);
|
||||
}
|
||||
quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
|
||||
}
|
||||
|
||||
// Reallocates a chunk. We can save on a new allocation if the new requested
|
||||
@ -541,11 +561,11 @@ struct ScudoAllocator {
|
||||
OldPtr);
|
||||
}
|
||||
uptr UsableSize = Chunk->getUsableSize(&OldHeader);
|
||||
UnpackedHeader NewHeader = OldHeader;
|
||||
// The new size still fits in the current chunk, and the size difference
|
||||
// is reasonable.
|
||||
if (NewSize <= UsableSize &&
|
||||
(UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
|
||||
UnpackedHeader NewHeader = OldHeader;
|
||||
NewHeader.SizeOrUnusedBytes =
|
||||
OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
|
||||
Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
|
||||
@ -558,17 +578,7 @@ struct ScudoAllocator {
|
||||
uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
|
||||
UsableSize - OldHeader.SizeOrUnusedBytes;
|
||||
memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
|
||||
NewHeader.State = ChunkQuarantine;
|
||||
Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
|
||||
if (LIKELY(!ThreadTornDown)) {
|
||||
AllocatorQuarantine.Put(&ThreadQuarantineCache,
|
||||
QuarantineCallback(&Cache), Chunk, UsableSize);
|
||||
} else {
|
||||
SpinMutexLock l(&FallbackMutex);
|
||||
AllocatorQuarantine.Put(&FallbackQuarantineCache,
|
||||
QuarantineCallback(&FallbackAllocatorCache),
|
||||
Chunk, UsableSize);
|
||||
}
|
||||
quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
|
||||
}
|
||||
return NewPtr;
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "tsan_rtl.h"
|
||||
#include "tsan_interceptors.h"
|
||||
|
||||
namespace __tsan {
|
||||
|
||||
@ -29,6 +30,20 @@ const char *GetObjectTypeFromTag(uptr tag) {
|
||||
return registered_tags[tag];
|
||||
}
|
||||
|
||||
typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
|
||||
void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
|
||||
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
|
||||
ThreadState *thr = cur_thread();
|
||||
thr->external_tag = (uptr)tag;
|
||||
if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
|
||||
bool in_ignored_lib;
|
||||
if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
|
||||
access(thr, CALLERPC, (uptr)addr, kSizeLog1);
|
||||
}
|
||||
if (caller_pc) FuncExit(thr);
|
||||
thr->external_tag = 0;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void *__tsan_external_register_tag(const char *object_type) {
|
||||
@ -54,24 +69,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
|
||||
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
|
||||
ThreadState *thr = cur_thread();
|
||||
thr->external_tag = (uptr)tag;
|
||||
FuncEntry(thr, (uptr)caller_pc);
|
||||
MemoryRead(thr, CALLERPC, (uptr)addr, kSizeLog8);
|
||||
FuncExit(thr);
|
||||
thr->external_tag = 0;
|
||||
ExternalAccess(addr, caller_pc, tag, MemoryRead);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
|
||||
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
|
||||
ThreadState *thr = cur_thread();
|
||||
thr->external_tag = (uptr)tag;
|
||||
FuncEntry(thr, (uptr)caller_pc);
|
||||
MemoryWrite(thr, CALLERPC, (uptr)addr, kSizeLog8);
|
||||
FuncExit(thr);
|
||||
thr->external_tag = 0;
|
||||
ExternalAccess(addr, caller_pc, tag, MemoryWrite);
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
|
@ -210,7 +210,7 @@ struct ThreadSignalContext {
|
||||
// The object is 64-byte aligned, because we want hot data to be located in
|
||||
// a single cache line if possible (it's accessed in every interceptor).
|
||||
static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
|
||||
static LibIgnore *libignore() {
|
||||
LibIgnore *libignore() {
|
||||
return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
|
||||
}
|
||||
|
||||
@ -269,6 +269,7 @@ ScopedInterceptor::~ScopedInterceptor() {
|
||||
void ScopedInterceptor::EnableIgnores() {
|
||||
if (ignoring_) {
|
||||
ThreadIgnoreBegin(thr_, pc_, false);
|
||||
if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
|
||||
if (in_ignored_lib_) {
|
||||
DCHECK(!thr_->in_ignored_lib);
|
||||
thr_->in_ignored_lib = true;
|
||||
@ -279,6 +280,7 @@ void ScopedInterceptor::EnableIgnores() {
|
||||
void ScopedInterceptor::DisableIgnores() {
|
||||
if (ignoring_) {
|
||||
ThreadIgnoreEnd(thr_, pc_);
|
||||
if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
|
||||
if (in_ignored_lib_) {
|
||||
DCHECK(thr_->in_ignored_lib);
|
||||
thr_->in_ignored_lib = false;
|
||||
|
@ -19,6 +19,8 @@ class ScopedInterceptor {
|
||||
bool ignoring_;
|
||||
};
|
||||
|
||||
LibIgnore *libignore();
|
||||
|
||||
} // namespace __tsan
|
||||
|
||||
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
|
||||
|
@ -169,7 +169,7 @@ static void PrintMop(const ReportMop *mop, bool first) {
|
||||
MopDesc(first, mop->write, mop->atomic), mop->size,
|
||||
(void *)mop->addr, thread_name(thrbuf, mop->tid));
|
||||
} else {
|
||||
Printf(" %s access of object %s at %p by %s",
|
||||
Printf(" %s access of %s at %p by %s",
|
||||
ExternalMopDesc(first, mop->write), object_type,
|
||||
(void *)mop->addr, thread_name(thrbuf, mop->tid));
|
||||
}
|
||||
@ -202,7 +202,7 @@ static void PrintLocation(const ReportLocation *loc) {
|
||||
loc->heap_chunk_size, loc->heap_chunk_start,
|
||||
thread_name(thrbuf, loc->tid));
|
||||
} else {
|
||||
Printf(" Location is %s object of size %zu at %p allocated by %s:\n",
|
||||
Printf(" Location is %s of size %zu at %p allocated by %s:\n",
|
||||
object_type, loc->heap_chunk_size, loc->heap_chunk_start,
|
||||
thread_name(thrbuf, loc->tid));
|
||||
}
|
||||
|
@ -381,6 +381,7 @@ struct ThreadState {
|
||||
// for better performance.
|
||||
int ignore_reads_and_writes;
|
||||
int ignore_sync;
|
||||
int suppress_reports;
|
||||
// Go does not support ignores.
|
||||
#if !SANITIZER_GO
|
||||
IgnoreSet mop_ignore_set;
|
||||
|
@ -500,7 +500,7 @@ static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
|
||||
}
|
||||
|
||||
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
|
||||
if (!flags()->report_bugs)
|
||||
if (!flags()->report_bugs || thr->suppress_reports)
|
||||
return false;
|
||||
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
|
||||
const ReportDesc *rep = srep.GetReport();
|
||||
|
@ -861,16 +861,29 @@ kill_dependency(_Tp __y) _NOEXCEPT
|
||||
return __y;
|
||||
}
|
||||
|
||||
#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
|
||||
#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
|
||||
#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
|
||||
#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
|
||||
#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
|
||||
#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
|
||||
#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
|
||||
#if defined(__CLANG_ATOMIC_BOOL_LOCK_FREE)
|
||||
# define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE
|
||||
# define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE
|
||||
# define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE
|
||||
# define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE
|
||||
# define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE
|
||||
# define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE
|
||||
# define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE
|
||||
# define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE
|
||||
# define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE
|
||||
# define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE
|
||||
#else
|
||||
# define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
|
||||
# define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
|
||||
# define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
|
||||
# define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
|
||||
# define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
|
||||
# define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
|
||||
# define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
|
||||
# define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
|
||||
# define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
|
||||
# define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
|
||||
#endif
|
||||
|
||||
// general atomic<T>
|
||||
|
||||
|
@ -307,6 +307,7 @@ long double truncl(long double x);
|
||||
extern "C++" {
|
||||
|
||||
#include <type_traits>
|
||||
#include <limits>
|
||||
|
||||
// signbit
|
||||
|
||||
@ -324,22 +325,50 @@ __libcpp_signbit(_A1 __lcpp_x) _NOEXCEPT
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, bool>::type
|
||||
typename std::enable_if<std::is_floating_point<_A1>::value, bool>::type
|
||||
signbit(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return __libcpp_signbit((typename std::__promote<_A1>::type)__lcpp_x);
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<
|
||||
std::is_integral<_A1>::value && std::is_signed<_A1>::value, bool>::type
|
||||
signbit(_A1 __lcpp_x) _NOEXCEPT
|
||||
{ return __lcpp_x < 0; }
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<
|
||||
std::is_integral<_A1>::value && !std::is_signed<_A1>::value, bool>::type
|
||||
signbit(_A1) _NOEXCEPT
|
||||
{ return false; }
|
||||
|
||||
#elif defined(_LIBCPP_MSVCRT)
|
||||
|
||||
template <typename _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, bool>::type
|
||||
typename std::enable_if<std::is_floating_point<_A1>::value, bool>::type
|
||||
signbit(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return ::signbit(static_cast<typename std::__promote<_A1>::type>(__lcpp_x));
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<
|
||||
std::is_integral<_A1>::value && std::is_signed<_A1>::value, bool>::type
|
||||
signbit(_A1 __lcpp_x) _NOEXCEPT
|
||||
{ return __lcpp_x < 0; }
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<
|
||||
std::is_integral<_A1>::value && !std::is_signed<_A1>::value, bool>::type
|
||||
signbit(_A1) _NOEXCEPT
|
||||
{ return false; }
|
||||
|
||||
#endif // signbit
|
||||
|
||||
// fpclassify
|
||||
@ -358,22 +387,34 @@ __libcpp_fpclassify(_A1 __lcpp_x) _NOEXCEPT
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, int>::type
|
||||
typename std::enable_if<std::is_floating_point<_A1>::value, int>::type
|
||||
fpclassify(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return __libcpp_fpclassify((typename std::__promote<_A1>::type)__lcpp_x);
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_integral<_A1>::value, int>::type
|
||||
fpclassify(_A1 __lcpp_x) _NOEXCEPT
|
||||
{ return __lcpp_x == 0 ? FP_ZERO : FP_NORMAL; }
|
||||
|
||||
#elif defined(_LIBCPP_MSVCRT)
|
||||
|
||||
template <typename _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, int>::type
|
||||
typename std::enable_if<std::is_floating_point<_A1>::value, bool>::type
|
||||
fpclassify(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return ::fpclassify(static_cast<typename std::__promote<_A1>::type>(__lcpp_x));
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_integral<_A1>::value, int>::type
|
||||
fpclassify(_A1 __lcpp_x) _NOEXCEPT
|
||||
{ return __lcpp_x == 0 ? FP_ZERO : FP_NORMAL; }
|
||||
|
||||
#endif // fpclassify
|
||||
|
||||
// isfinite
|
||||
@ -392,12 +433,22 @@ __libcpp_isfinite(_A1 __lcpp_x) _NOEXCEPT
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, bool>::type
|
||||
typename std::enable_if<
|
||||
std::is_arithmetic<_A1>::value && std::numeric_limits<_A1>::has_infinity,
|
||||
bool>::type
|
||||
isfinite(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return __libcpp_isfinite((typename std::__promote<_A1>::type)__lcpp_x);
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<
|
||||
std::is_arithmetic<_A1>::value && !std::numeric_limits<_A1>::has_infinity,
|
||||
bool>::type
|
||||
isfinite(_A1) _NOEXCEPT
|
||||
{ return true; }
|
||||
|
||||
#endif // isfinite
|
||||
|
||||
// isinf
|
||||
@ -416,12 +467,22 @@ __libcpp_isinf(_A1 __lcpp_x) _NOEXCEPT
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, bool>::type
|
||||
typename std::enable_if<
|
||||
std::is_arithmetic<_A1>::value && std::numeric_limits<_A1>::has_infinity,
|
||||
bool>::type
|
||||
isinf(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return __libcpp_isinf((typename std::__promote<_A1>::type)__lcpp_x);
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<
|
||||
std::is_arithmetic<_A1>::value && !std::numeric_limits<_A1>::has_infinity,
|
||||
bool>::type
|
||||
isinf(_A1) _NOEXCEPT
|
||||
{ return false; }
|
||||
|
||||
#endif // isinf
|
||||
|
||||
// isnan
|
||||
@ -440,12 +501,18 @@ __libcpp_isnan(_A1 __lcpp_x) _NOEXCEPT
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, bool>::type
|
||||
typename std::enable_if<std::is_floating_point<_A1>::value, bool>::type
|
||||
isnan(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return __libcpp_isnan((typename std::__promote<_A1>::type)__lcpp_x);
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_integral<_A1>::value, bool>::type
|
||||
isnan(_A1) _NOEXCEPT
|
||||
{ return false; }
|
||||
|
||||
#endif // isnan
|
||||
|
||||
// isnormal
|
||||
@ -464,12 +531,18 @@ __libcpp_isnormal(_A1 __lcpp_x) _NOEXCEPT
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_arithmetic<_A1>::value, bool>::type
|
||||
typename std::enable_if<std::is_floating_point<_A1>::value, bool>::type
|
||||
isnormal(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return __libcpp_isnormal((typename std::__promote<_A1>::type)__lcpp_x);
|
||||
}
|
||||
|
||||
template <class _A1>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
typename std::enable_if<std::is_integral<_A1>::value, bool>::type
|
||||
isnormal(_A1 __lcpp_x) _NOEXCEPT
|
||||
{ return __lcpp_x != 0; }
|
||||
|
||||
#endif // isnormal
|
||||
|
||||
// isgreater
|
||||
|
@ -685,7 +685,7 @@ inline _LIBCPP_INLINE_VISIBILITY
|
||||
void
|
||||
call_once(once_flag& __flag, const _Callable& __func)
|
||||
{
|
||||
if (__flag.__state_ != ~0ul)
|
||||
if (__libcpp_acquire_load(&__flag.__state_) != ~0ul)
|
||||
{
|
||||
__call_once_param<const _Callable> __p(__func);
|
||||
__call_once(__flag.__state_, &__p, &__call_once_proxy<const _Callable>);
|
||||
|
@ -397,6 +397,12 @@ class IEEEFloat final : public APFloatBase {
|
||||
/// consider inserting before falling back to scientific
|
||||
/// notation. 0 means to always use scientific notation.
|
||||
///
|
||||
/// \param TruncateZero Indicate whether to remove the trailing zero in
|
||||
/// fraction part or not. Also setting this parameter to false forcing
|
||||
/// producing of output more similar to default printf behavior.
|
||||
/// Specifically the lower e is used as exponent delimiter and exponent
|
||||
/// always contains no less than two digits.
|
||||
///
|
||||
/// Number Precision MaxPadding Result
|
||||
/// ------ --------- ---------- ------
|
||||
/// 1.01E+4 5 2 10100
|
||||
@ -406,7 +412,7 @@ class IEEEFloat final : public APFloatBase {
|
||||
/// 1.01E-2 4 2 0.0101
|
||||
/// 1.01E-2 4 1 1.01E-2
|
||||
void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
|
||||
unsigned FormatMaxPadding = 3) const;
|
||||
unsigned FormatMaxPadding = 3, bool TruncateZero = true) const;
|
||||
|
||||
/// If this value has an exact multiplicative inverse, store it in inv and
|
||||
/// return true.
|
||||
@ -649,7 +655,7 @@ class DoubleAPFloat final : public APFloatBase {
|
||||
bool isInteger() const;
|
||||
|
||||
void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
|
||||
unsigned FormatMaxPadding) const;
|
||||
unsigned FormatMaxPadding, bool TruncateZero = true) const;
|
||||
|
||||
bool getExactInverse(APFloat *inv) const;
|
||||
|
||||
@ -1144,9 +1150,9 @@ class APFloat : public APFloatBase {
|
||||
APFloat &operator=(APFloat &&RHS) = default;
|
||||
|
||||
void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
|
||||
unsigned FormatMaxPadding = 3) const {
|
||||
unsigned FormatMaxPadding = 3, bool TruncateZero = true) const {
|
||||
APFLOAT_DISPATCH_ON_SEMANTICS(
|
||||
toString(Str, FormatPrecision, FormatMaxPadding));
|
||||
toString(Str, FormatPrecision, FormatMaxPadding, TruncateZero));
|
||||
}
|
||||
|
||||
void print(raw_ostream &) const;
|
||||
|
@ -78,6 +78,8 @@ class LLVM_NODISCARD APInt {
|
||||
APINT_BITS_PER_WORD = APINT_WORD_SIZE * CHAR_BIT
|
||||
};
|
||||
|
||||
static const WordType WORD_MAX = ~WordType(0);
|
||||
|
||||
private:
|
||||
/// This union is used to store the integer value. When the
|
||||
/// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
|
||||
@ -90,6 +92,8 @@ class LLVM_NODISCARD APInt {
|
||||
|
||||
friend struct DenseMapAPIntKeyInfo;
|
||||
|
||||
friend class APSInt;
|
||||
|
||||
/// \brief Fast internal constructor
|
||||
///
|
||||
/// This constructor is used only internally for speed of construction of
|
||||
@ -134,15 +138,10 @@ class LLVM_NODISCARD APInt {
|
||||
/// zero'd out.
|
||||
APInt &clearUnusedBits() {
|
||||
// Compute how many bits are used in the final word
|
||||
unsigned wordBits = BitWidth % APINT_BITS_PER_WORD;
|
||||
if (wordBits == 0)
|
||||
// If all bits are used, we want to leave the value alone. This also
|
||||
// avoids the undefined behavior of >> when the shift is the same size as
|
||||
// the word size (64).
|
||||
return *this;
|
||||
unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1;
|
||||
|
||||
// Mask out the high bits.
|
||||
uint64_t mask = UINT64_MAX >> (APINT_BITS_PER_WORD - wordBits);
|
||||
uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - WordBits);
|
||||
if (isSingleWord())
|
||||
VAL &= mask;
|
||||
else
|
||||
@ -194,6 +193,9 @@ class LLVM_NODISCARD APInt {
|
||||
/// out-of-line slow case for lshr.
|
||||
void lshrSlowCase(unsigned ShiftAmt);
|
||||
|
||||
/// out-of-line slow case for ashr.
|
||||
void ashrSlowCase(unsigned ShiftAmt);
|
||||
|
||||
/// out-of-line slow case for operator=
|
||||
void AssignSlowCase(const APInt &RHS);
|
||||
|
||||
@ -230,6 +232,14 @@ class LLVM_NODISCARD APInt {
|
||||
/// out-of-line slow case for operator^=.
|
||||
void XorAssignSlowCase(const APInt& RHS);
|
||||
|
||||
/// Unsigned comparison. Returns -1, 0, or 1 if this APInt is less than, equal
|
||||
/// to, or greater than RHS.
|
||||
int compare(const APInt &RHS) const LLVM_READONLY;
|
||||
|
||||
/// Signed comparison. Returns -1, 0, or 1 if this APInt is less than, equal
|
||||
/// to, or greater than RHS.
|
||||
int compareSigned(const APInt &RHS) const LLVM_READONLY;
|
||||
|
||||
public:
|
||||
/// \name Constructors
|
||||
/// @{
|
||||
@ -363,7 +373,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// This checks to see if the value has all bits of the APInt are set or not.
|
||||
bool isAllOnesValue() const {
|
||||
if (isSingleWord())
|
||||
return VAL == UINT64_MAX >> (APINT_BITS_PER_WORD - BitWidth);
|
||||
return VAL == WORD_MAX >> (APINT_BITS_PER_WORD - BitWidth);
|
||||
return countPopulationSlowCase() == BitWidth;
|
||||
}
|
||||
|
||||
@ -445,7 +455,7 @@ class LLVM_NODISCARD APInt {
|
||||
assert(numBits != 0 && "numBits must be non-zero");
|
||||
assert(numBits <= BitWidth && "numBits out of range");
|
||||
if (isSingleWord())
|
||||
return VAL == (UINT64_MAX >> (APINT_BITS_PER_WORD - numBits));
|
||||
return VAL == (WORD_MAX >> (APINT_BITS_PER_WORD - numBits));
|
||||
unsigned Ones = countTrailingOnesSlowCase();
|
||||
return (numBits == Ones) &&
|
||||
((Ones + countLeadingZerosSlowCase()) == BitWidth);
|
||||
@ -509,7 +519,7 @@ class LLVM_NODISCARD APInt {
|
||||
///
|
||||
/// \returns the all-ones value for an APInt of the specified bit-width.
|
||||
static APInt getAllOnesValue(unsigned numBits) {
|
||||
return APInt(numBits, UINT64_MAX, true);
|
||||
return APInt(numBits, WORD_MAX, true);
|
||||
}
|
||||
|
||||
/// \brief Get the '0' value.
|
||||
@ -886,7 +896,26 @@ class LLVM_NODISCARD APInt {
|
||||
/// \brief Arithmetic right-shift function.
|
||||
///
|
||||
/// Arithmetic right-shift this APInt by shiftAmt.
|
||||
APInt ashr(unsigned shiftAmt) const;
|
||||
APInt ashr(unsigned ShiftAmt) const {
|
||||
APInt R(*this);
|
||||
R.ashrInPlace(ShiftAmt);
|
||||
return R;
|
||||
}
|
||||
|
||||
/// Arithmetic right-shift this APInt by ShiftAmt in place.
|
||||
void ashrInPlace(unsigned ShiftAmt) {
|
||||
assert(ShiftAmt <= BitWidth && "Invalid shift amount");
|
||||
if (isSingleWord()) {
|
||||
int64_t SExtVAL = SignExtend64(VAL, BitWidth);
|
||||
if (ShiftAmt == BitWidth)
|
||||
VAL = SExtVAL >> (APINT_BITS_PER_WORD - 1); // Fill with sign bit.
|
||||
else
|
||||
VAL = SExtVAL >> ShiftAmt;
|
||||
clearUnusedBits();
|
||||
return;
|
||||
}
|
||||
ashrSlowCase(ShiftAmt);
|
||||
}
|
||||
|
||||
/// \brief Logical right-shift function.
|
||||
///
|
||||
@ -928,7 +957,14 @@ class LLVM_NODISCARD APInt {
|
||||
/// \brief Arithmetic right-shift function.
|
||||
///
|
||||
/// Arithmetic right-shift this APInt by shiftAmt.
|
||||
APInt ashr(const APInt &shiftAmt) const;
|
||||
APInt ashr(const APInt &ShiftAmt) const {
|
||||
APInt R(*this);
|
||||
R.ashrInPlace(ShiftAmt);
|
||||
return R;
|
||||
}
|
||||
|
||||
/// Arithmetic right-shift this APInt by shiftAmt in place.
|
||||
void ashrInPlace(const APInt &shiftAmt);
|
||||
|
||||
/// \brief Logical right-shift function.
|
||||
///
|
||||
@ -1079,7 +1115,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// the validity of the less-than relationship.
|
||||
///
|
||||
/// \returns true if *this < RHS when both are considered unsigned.
|
||||
bool ult(const APInt &RHS) const LLVM_READONLY;
|
||||
bool ult(const APInt &RHS) const { return compare(RHS) < 0; }
|
||||
|
||||
/// \brief Unsigned less than comparison
|
||||
///
|
||||
@ -1098,7 +1134,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// validity of the less-than relationship.
|
||||
///
|
||||
/// \returns true if *this < RHS when both are considered signed.
|
||||
bool slt(const APInt &RHS) const LLVM_READONLY;
|
||||
bool slt(const APInt &RHS) const { return compareSigned(RHS) < 0; }
|
||||
|
||||
/// \brief Signed less than comparison
|
||||
///
|
||||
@ -1117,7 +1153,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// validity of the less-or-equal relationship.
|
||||
///
|
||||
/// \returns true if *this <= RHS when both are considered unsigned.
|
||||
bool ule(const APInt &RHS) const { return ult(RHS) || eq(RHS); }
|
||||
bool ule(const APInt &RHS) const { return compare(RHS) <= 0; }
|
||||
|
||||
/// \brief Unsigned less or equal comparison
|
||||
///
|
||||
@ -1133,7 +1169,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// validity of the less-or-equal relationship.
|
||||
///
|
||||
/// \returns true if *this <= RHS when both are considered signed.
|
||||
bool sle(const APInt &RHS) const { return slt(RHS) || eq(RHS); }
|
||||
bool sle(const APInt &RHS) const { return compareSigned(RHS) <= 0; }
|
||||
|
||||
/// \brief Signed less or equal comparison
|
||||
///
|
||||
@ -1149,7 +1185,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// the validity of the greater-than relationship.
|
||||
///
|
||||
/// \returns true if *this > RHS when both are considered unsigned.
|
||||
bool ugt(const APInt &RHS) const { return !ult(RHS) && !eq(RHS); }
|
||||
bool ugt(const APInt &RHS) const { return !ule(RHS); }
|
||||
|
||||
/// \brief Unsigned greater than comparison
|
||||
///
|
||||
@ -1168,7 +1204,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// validity of the greater-than relationship.
|
||||
///
|
||||
/// \returns true if *this > RHS when both are considered signed.
|
||||
bool sgt(const APInt &RHS) const { return !slt(RHS) && !eq(RHS); }
|
||||
bool sgt(const APInt &RHS) const { return !sle(RHS); }
|
||||
|
||||
/// \brief Signed greater than comparison
|
||||
///
|
||||
@ -1286,7 +1322,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// \brief Set every bit to 1.
|
||||
void setAllBits() {
|
||||
if (isSingleWord())
|
||||
VAL = UINT64_MAX;
|
||||
VAL = WORD_MAX;
|
||||
else
|
||||
// Set all the bits in all the words.
|
||||
memset(pVal, -1, getNumWords() * APINT_WORD_SIZE);
|
||||
@ -1316,7 +1352,7 @@ class LLVM_NODISCARD APInt {
|
||||
return;
|
||||
}
|
||||
if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
|
||||
uint64_t mask = UINT64_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
|
||||
uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
|
||||
mask <<= loBit;
|
||||
if (isSingleWord())
|
||||
VAL |= mask;
|
||||
@ -1358,7 +1394,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// \brief Toggle every bit to its opposite value.
|
||||
void flipAllBits() {
|
||||
if (isSingleWord()) {
|
||||
VAL ^= UINT64_MAX;
|
||||
VAL ^= WORD_MAX;
|
||||
clearUnusedBits();
|
||||
} else {
|
||||
flipAllBitsSlowCase();
|
||||
@ -1653,7 +1689,7 @@ class LLVM_NODISCARD APInt {
|
||||
/// referencing 2 in a space where 2 does no exist.
|
||||
unsigned nearestLogBase2() const {
|
||||
// Special case when we have a bitwidth of 1. If VAL is 1, then we
|
||||
// get 0. If VAL is 0, we get UINT64_MAX which gets truncated to
|
||||
// get 0. If VAL is 0, we get WORD_MAX which gets truncated to
|
||||
// UINT32_MAX.
|
||||
if (BitWidth == 1)
|
||||
return VAL - 1;
|
||||
|
@ -125,7 +125,10 @@ class LLVM_NODISCARD APSInt : public APInt {
|
||||
return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
|
||||
}
|
||||
APSInt& operator>>=(unsigned Amt) {
|
||||
*this = *this >> Amt;
|
||||
if (IsUnsigned)
|
||||
lshrInPlace(Amt);
|
||||
else
|
||||
ashrInPlace(Amt);
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -179,7 +182,7 @@ class LLVM_NODISCARD APSInt : public APInt {
|
||||
return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned);
|
||||
}
|
||||
APSInt& operator<<=(unsigned Amt) {
|
||||
*this = *this << Amt;
|
||||
static_cast<APInt&>(*this) <<= Amt;
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -285,12 +288,12 @@ class LLVM_NODISCARD APSInt : public APInt {
|
||||
/// \brief Compare underlying values of two numbers.
|
||||
static int compareValues(const APSInt &I1, const APSInt &I2) {
|
||||
if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
|
||||
return I1 == I2 ? 0 : I1 > I2 ? 1 : -1;
|
||||
return I1.IsUnsigned ? I1.compare(I2) : I1.compareSigned(I2);
|
||||
|
||||
// Check for a bit-width mismatch.
|
||||
if (I1.getBitWidth() > I2.getBitWidth())
|
||||
return compareValues(I1, I2.extend(I1.getBitWidth()));
|
||||
else if (I2.getBitWidth() > I1.getBitWidth())
|
||||
if (I2.getBitWidth() > I1.getBitWidth())
|
||||
return compareValues(I1.extend(I2.getBitWidth()), I2);
|
||||
|
||||
// We have a signedness mismatch. Check for negative values and do an
|
||||
@ -305,7 +308,7 @@ class LLVM_NODISCARD APSInt : public APInt {
|
||||
return 1;
|
||||
}
|
||||
|
||||
return I1.eq(I2) ? 0 : I1.ugt(I2) ? 1 : -1;
|
||||
return I1.compare(I2);
|
||||
}
|
||||
|
||||
static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); }
|
||||
|
@ -15,7 +15,6 @@
|
||||
#define LLVM_ADT_BITVECTOR_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
@ -35,9 +34,8 @@ class BitVector {
|
||||
static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
|
||||
"Unsupported word size");
|
||||
|
||||
BitWord *Bits; // Actual bits.
|
||||
unsigned Size; // Size of bitvector in bits.
|
||||
unsigned Capacity; // Number of BitWords allocated in the Bits array.
|
||||
MutableArrayRef<BitWord> Bits; // Actual bits.
|
||||
unsigned Size; // Size of bitvector in bits.
|
||||
|
||||
public:
|
||||
typedef unsigned size_type;
|
||||
@ -77,16 +75,14 @@ class BitVector {
|
||||
|
||||
|
||||
/// BitVector default ctor - Creates an empty bitvector.
|
||||
BitVector() : Size(0), Capacity(0) {
|
||||
Bits = nullptr;
|
||||
}
|
||||
BitVector() : Size(0) {}
|
||||
|
||||
/// BitVector ctor - Creates a bitvector of specified number of bits. All
|
||||
/// bits are initialized to the specified value.
|
||||
explicit BitVector(unsigned s, bool t = false) : Size(s) {
|
||||
Capacity = NumBitWords(s);
|
||||
Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
|
||||
init_words(Bits, Capacity, t);
|
||||
size_t Capacity = NumBitWords(s);
|
||||
Bits = allocate(Capacity);
|
||||
init_words(Bits, t);
|
||||
if (t)
|
||||
clear_unused_bits();
|
||||
}
|
||||
@ -94,25 +90,21 @@ class BitVector {
|
||||
/// BitVector copy ctor.
|
||||
BitVector(const BitVector &RHS) : Size(RHS.size()) {
|
||||
if (Size == 0) {
|
||||
Bits = nullptr;
|
||||
Capacity = 0;
|
||||
Bits = MutableArrayRef<BitWord>();
|
||||
return;
|
||||
}
|
||||
|
||||
Capacity = NumBitWords(RHS.size());
|
||||
Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
|
||||
std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
|
||||
size_t Capacity = NumBitWords(RHS.size());
|
||||
Bits = allocate(Capacity);
|
||||
std::memcpy(Bits.data(), RHS.Bits.data(), Capacity * sizeof(BitWord));
|
||||
}
|
||||
|
||||
BitVector(BitVector &&RHS)
|
||||
: Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
|
||||
RHS.Bits = nullptr;
|
||||
RHS.Size = RHS.Capacity = 0;
|
||||
BitVector(BitVector &&RHS) : Bits(RHS.Bits), Size(RHS.Size) {
|
||||
RHS.Bits = MutableArrayRef<BitWord>();
|
||||
RHS.Size = 0;
|
||||
}
|
||||
|
||||
~BitVector() {
|
||||
std::free(Bits);
|
||||
}
|
||||
~BitVector() { std::free(Bits.data()); }
|
||||
|
||||
/// empty - Tests whether there are no bits in this bitvector.
|
||||
bool empty() const { return Size == 0; }
|
||||
@ -163,6 +155,22 @@ class BitVector {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// find_last - Returns the index of the last set bit, -1 if none of the bits
|
||||
/// are set.
|
||||
int find_last() const {
|
||||
if (Size == 0)
|
||||
return -1;
|
||||
|
||||
unsigned N = NumBitWords(size());
|
||||
assert(N > 0);
|
||||
|
||||
unsigned i = N - 1;
|
||||
while (i > 0 && Bits[i] == BitWord(0))
|
||||
--i;
|
||||
|
||||
return int((i + 1) * BITWORD_SIZE - countLeadingZeros(Bits[i])) - 1;
|
||||
}
|
||||
|
||||
/// find_first_unset - Returns the index of the first unset bit, -1 if all
|
||||
/// of the bits are set.
|
||||
int find_first_unset() const {
|
||||
@ -174,6 +182,30 @@ class BitVector {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/// find_last_unset - Returns the index of the last unset bit, -1 if all of
|
||||
/// the bits are set.
|
||||
int find_last_unset() const {
|
||||
if (Size == 0)
|
||||
return -1;
|
||||
|
||||
const unsigned N = NumBitWords(size());
|
||||
assert(N > 0);
|
||||
|
||||
unsigned i = N - 1;
|
||||
BitWord W = Bits[i];
|
||||
|
||||
// The last word in the BitVector has some unused bits, so we need to set
|
||||
// them all to 1 first. Set them all to 1 so they don't get treated as
|
||||
// valid unset bits.
|
||||
unsigned UnusedCount = BITWORD_SIZE - size() % BITWORD_SIZE;
|
||||
W |= maskLeadingOnes<BitWord>(UnusedCount);
|
||||
|
||||
while (W == ~BitWord(0) && --i > 0)
|
||||
W = Bits[i];
|
||||
|
||||
return int((i + 1) * BITWORD_SIZE - countLeadingOnes(W)) - 1;
|
||||
}
|
||||
|
||||
/// find_next - Returns the index of the next set bit following the
|
||||
/// "Prev" bit. Returns -1 if the next set bit is not found.
|
||||
int find_next(unsigned Prev) const {
|
||||
@ -228,10 +260,10 @@ class BitVector {
|
||||
|
||||
/// resize - Grow or shrink the bitvector.
|
||||
void resize(unsigned N, bool t = false) {
|
||||
if (N > Capacity * BITWORD_SIZE) {
|
||||
unsigned OldCapacity = Capacity;
|
||||
if (N > getBitCapacity()) {
|
||||
unsigned OldCapacity = Bits.size();
|
||||
grow(N);
|
||||
init_words(&Bits[OldCapacity], (Capacity-OldCapacity), t);
|
||||
init_words(Bits.drop_front(OldCapacity), t);
|
||||
}
|
||||
|
||||
// Set any old unused bits that are now included in the BitVector. This
|
||||
@ -248,19 +280,19 @@ class BitVector {
|
||||
}
|
||||
|
||||
void reserve(unsigned N) {
|
||||
if (N > Capacity * BITWORD_SIZE)
|
||||
if (N > getBitCapacity())
|
||||
grow(N);
|
||||
}
|
||||
|
||||
// Set, reset, flip
|
||||
BitVector &set() {
|
||||
init_words(Bits, Capacity, true);
|
||||
init_words(Bits, true);
|
||||
clear_unused_bits();
|
||||
return *this;
|
||||
}
|
||||
|
||||
BitVector &set(unsigned Idx) {
|
||||
assert(Bits && "Bits never allocated");
|
||||
assert(Bits.data() && "Bits never allocated");
|
||||
Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
|
||||
return *this;
|
||||
}
|
||||
@ -295,7 +327,7 @@ class BitVector {
|
||||
}
|
||||
|
||||
BitVector &reset() {
|
||||
init_words(Bits, Capacity, false);
|
||||
init_words(Bits, false);
|
||||
return *this;
|
||||
}
|
||||
|
||||
@ -562,21 +594,21 @@ class BitVector {
|
||||
|
||||
Size = RHS.size();
|
||||
unsigned RHSWords = NumBitWords(Size);
|
||||
if (Size <= Capacity * BITWORD_SIZE) {
|
||||
if (Size <= getBitCapacity()) {
|
||||
if (Size)
|
||||
std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord));
|
||||
std::memcpy(Bits.data(), RHS.Bits.data(), RHSWords * sizeof(BitWord));
|
||||
clear_unused_bits();
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Grow the bitvector to have enough elements.
|
||||
Capacity = RHSWords;
|
||||
assert(Capacity > 0 && "negative capacity?");
|
||||
BitWord *NewBits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
|
||||
std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord));
|
||||
unsigned NewCapacity = RHSWords;
|
||||
assert(NewCapacity > 0 && "negative capacity?");
|
||||
auto NewBits = allocate(NewCapacity);
|
||||
std::memcpy(NewBits.data(), RHS.Bits.data(), NewCapacity * sizeof(BitWord));
|
||||
|
||||
// Destroy the old bits.
|
||||
std::free(Bits);
|
||||
std::free(Bits.data());
|
||||
Bits = NewBits;
|
||||
|
||||
return *this;
|
||||
@ -585,13 +617,12 @@ class BitVector {
|
||||
const BitVector &operator=(BitVector &&RHS) {
|
||||
if (this == &RHS) return *this;
|
||||
|
||||
std::free(Bits);
|
||||
std::free(Bits.data());
|
||||
Bits = RHS.Bits;
|
||||
Size = RHS.Size;
|
||||
Capacity = RHS.Capacity;
|
||||
|
||||
RHS.Bits = nullptr;
|
||||
RHS.Size = RHS.Capacity = 0;
|
||||
RHS.Bits = MutableArrayRef<BitWord>();
|
||||
RHS.Size = 0;
|
||||
|
||||
return *this;
|
||||
}
|
||||
@ -599,7 +630,6 @@ class BitVector {
|
||||
void swap(BitVector &RHS) {
|
||||
std::swap(Bits, RHS.Bits);
|
||||
std::swap(Size, RHS.Size);
|
||||
std::swap(Capacity, RHS.Capacity);
|
||||
}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
@ -659,14 +689,14 @@ class BitVector {
|
||||
|
||||
uint32_t NumWords = NumBitWords(Size);
|
||||
|
||||
auto Src = ArrayRef<BitWord>(Bits, NumWords).drop_back(Count);
|
||||
auto Dest = MutableArrayRef<BitWord>(Bits, NumWords).drop_front(Count);
|
||||
auto Src = Bits.take_front(NumWords).drop_back(Count);
|
||||
auto Dest = Bits.take_front(NumWords).drop_front(Count);
|
||||
|
||||
// Since we always move Word-sized chunks of data with src and dest both
|
||||
// aligned to a word-boundary, we don't need to worry about endianness
|
||||
// here.
|
||||
std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
|
||||
std::memset(Bits, 0, Count * sizeof(BitWord));
|
||||
std::memset(Bits.data(), 0, Count * sizeof(BitWord));
|
||||
clear_unused_bits();
|
||||
}
|
||||
|
||||
@ -679,14 +709,19 @@ class BitVector {
|
||||
|
||||
uint32_t NumWords = NumBitWords(Size);
|
||||
|
||||
auto Src = ArrayRef<BitWord>(Bits, NumWords).drop_front(Count);
|
||||
auto Dest = MutableArrayRef<BitWord>(Bits, NumWords).drop_back(Count);
|
||||
auto Src = Bits.take_front(NumWords).drop_front(Count);
|
||||
auto Dest = Bits.take_front(NumWords).drop_back(Count);
|
||||
assert(Dest.size() == Src.size());
|
||||
|
||||
std::memmove(Dest.begin(), Src.begin(), Dest.size() * sizeof(BitWord));
|
||||
std::memset(Dest.end(), 0, Count * sizeof(BitWord));
|
||||
}
|
||||
|
||||
MutableArrayRef<BitWord> allocate(size_t NumWords) {
|
||||
BitWord *RawBits = (BitWord *)std::malloc(NumWords * sizeof(BitWord));
|
||||
return MutableArrayRef<BitWord>(RawBits, NumWords);
|
||||
}
|
||||
|
||||
int next_unset_in_word(int WordIndex, BitWord Word) const {
|
||||
unsigned Result = WordIndex * BITWORD_SIZE + countTrailingOnes(Word);
|
||||
return Result < size() ? Result : -1;
|
||||
@ -700,8 +735,8 @@ class BitVector {
|
||||
void set_unused_bits(bool t = true) {
|
||||
// Set high words first.
|
||||
unsigned UsedWords = NumBitWords(Size);
|
||||
if (Capacity > UsedWords)
|
||||
init_words(&Bits[UsedWords], (Capacity-UsedWords), t);
|
||||
if (Bits.size() > UsedWords)
|
||||
init_words(Bits.drop_front(UsedWords), t);
|
||||
|
||||
// Then set any stray high bits of the last used word.
|
||||
unsigned ExtraBits = Size % BITWORD_SIZE;
|
||||
@ -720,16 +755,17 @@ class BitVector {
|
||||
}
|
||||
|
||||
void grow(unsigned NewSize) {
|
||||
Capacity = std::max(NumBitWords(NewSize), Capacity * 2);
|
||||
assert(Capacity > 0 && "realloc-ing zero space");
|
||||
Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord));
|
||||
|
||||
size_t NewCapacity = std::max<size_t>(NumBitWords(NewSize), Bits.size() * 2);
|
||||
assert(NewCapacity > 0 && "realloc-ing zero space");
|
||||
BitWord *NewBits =
|
||||
(BitWord *)std::realloc(Bits.data(), NewCapacity * sizeof(BitWord));
|
||||
Bits = MutableArrayRef<BitWord>(NewBits, NewCapacity);
|
||||
clear_unused_bits();
|
||||
}
|
||||
|
||||
void init_words(BitWord *B, unsigned NumWords, bool t) {
|
||||
if (NumWords > 0)
|
||||
memset(B, 0 - (int)t, NumWords*sizeof(BitWord));
|
||||
void init_words(MutableArrayRef<BitWord> B, bool t) {
|
||||
if (B.size() > 0)
|
||||
memset(B.data(), 0 - (int)t, B.size() * sizeof(BitWord));
|
||||
}
|
||||
|
||||
template<bool AddBits, bool InvertMask>
|
||||
@ -761,7 +797,8 @@ class BitVector {
|
||||
|
||||
public:
|
||||
/// Return the size (in bytes) of the bit vector.
|
||||
size_t getMemorySize() const { return Capacity * sizeof(BitWord); }
|
||||
size_t getMemorySize() const { return Bits.size() * sizeof(BitWord); }
|
||||
size_t getBitCapacity() const { return Bits.size() * BITWORD_SIZE; }
|
||||
};
|
||||
|
||||
static inline size_t capacity_in_bytes(const BitVector &X) {
|
||||
|
@ -117,9 +117,7 @@ class SmallBitVector {
|
||||
}
|
||||
|
||||
// Return the size.
|
||||
size_t getSmallSize() const {
|
||||
return getSmallRawBits() >> SmallNumDataBits;
|
||||
}
|
||||
size_t getSmallSize() const { return getSmallRawBits() >> SmallNumDataBits; }
|
||||
|
||||
void setSmallSize(size_t Size) {
|
||||
setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
|
||||
@ -216,6 +214,16 @@ class SmallBitVector {
|
||||
return getPointer()->find_first();
|
||||
}
|
||||
|
||||
int find_last() const {
|
||||
if (isSmall()) {
|
||||
uintptr_t Bits = getSmallBits();
|
||||
if (Bits == 0)
|
||||
return -1;
|
||||
return NumBaseBits - countLeadingZeros(Bits);
|
||||
}
|
||||
return getPointer()->find_last();
|
||||
}
|
||||
|
||||
/// Returns the index of the first unset bit, -1 if all of the bits are set.
|
||||
int find_first_unset() const {
|
||||
if (isSmall()) {
|
||||
@ -228,6 +236,17 @@ class SmallBitVector {
|
||||
return getPointer()->find_first_unset();
|
||||
}
|
||||
|
||||
int find_last_unset() const {
|
||||
if (isSmall()) {
|
||||
if (count() == getSmallSize())
|
||||
return -1;
|
||||
|
||||
uintptr_t Bits = getSmallBits();
|
||||
return NumBaseBits - countLeadingOnes(Bits);
|
||||
}
|
||||
return getPointer()->find_last_unset();
|
||||
}
|
||||
|
||||
/// Returns the index of the next set bit following the "Prev" bit.
|
||||
/// Returns -1 if the next set bit is not found.
|
||||
int find_next(unsigned Prev) const {
|
||||
|
@ -76,6 +76,36 @@ static inline std::string toHex(StringRef Input) {
|
||||
return Output;
|
||||
}
|
||||
|
||||
static inline uint8_t hexFromNibbles(char MSB, char LSB) {
|
||||
unsigned U1 = hexDigitValue(MSB);
|
||||
unsigned U2 = hexDigitValue(LSB);
|
||||
assert(U1 != -1U && U2 != -1U);
|
||||
|
||||
return static_cast<uint8_t>((U1 << 4) | U2);
|
||||
}
|
||||
|
||||
/// Convert hexadecimal string \p Input to its binary representation.
|
||||
/// The return string is half the size of \p Input.
|
||||
static inline std::string fromHex(StringRef Input) {
|
||||
if (Input.empty())
|
||||
return std::string();
|
||||
|
||||
std::string Output;
|
||||
Output.reserve((Input.size() + 1) / 2);
|
||||
if (Input.size() % 2 == 1) {
|
||||
Output.push_back(hexFromNibbles('0', Input.front()));
|
||||
Input = Input.drop_front();
|
||||
}
|
||||
|
||||
assert(Input.size() % 2 == 0);
|
||||
while (!Input.empty()) {
|
||||
uint8_t Hex = hexFromNibbles(Input[0], Input[1]);
|
||||
Output.push_back(Hex);
|
||||
Input = Input.drop_front(2);
|
||||
}
|
||||
return Output;
|
||||
}
|
||||
|
||||
static inline std::string utostr(uint64_t X, bool isNeg = false) {
|
||||
char Buffer[21];
|
||||
char *BufPtr = std::end(Buffer);
|
||||
|
@ -140,7 +140,8 @@ class Triple {
|
||||
Myriad,
|
||||
AMD,
|
||||
Mesa,
|
||||
LastVendorType = Mesa
|
||||
SUSE,
|
||||
LastVendorType = SUSE
|
||||
};
|
||||
enum OSType {
|
||||
UnknownOS,
|
||||
|
@ -35,6 +35,7 @@ class Function;
|
||||
class Instruction;
|
||||
class DominatorTree;
|
||||
class AssumptionCache;
|
||||
struct KnownBits;
|
||||
|
||||
class DemandedBits {
|
||||
public:
|
||||
@ -58,8 +59,7 @@ class DemandedBits {
|
||||
void determineLiveOperandBits(const Instruction *UserI,
|
||||
const Instruction *I, unsigned OperandNo,
|
||||
const APInt &AOut, APInt &AB,
|
||||
APInt &KnownZero, APInt &KnownOne,
|
||||
APInt &KnownZero2, APInt &KnownOne2);
|
||||
KnownBits &Known, KnownBits &Known2);
|
||||
|
||||
bool Analyzed;
|
||||
|
||||
|
@ -47,7 +47,32 @@ namespace llvm {
|
||||
class Type;
|
||||
class Value;
|
||||
|
||||
struct SimplifyQuery {
|
||||
const DataLayout &DL;
|
||||
const TargetLibraryInfo *TLI = nullptr;
|
||||
const DominatorTree *DT = nullptr;
|
||||
AssumptionCache *AC = nullptr;
|
||||
const Instruction *CxtI = nullptr;
|
||||
SimplifyQuery(const DataLayout &DL) : DL(DL) {}
|
||||
|
||||
SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
|
||||
const DominatorTree *DT, AssumptionCache *AC = nullptr,
|
||||
const Instruction *CXTI = nullptr)
|
||||
: DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI) {}
|
||||
SimplifyQuery getWithInstruction(Instruction *I) const {
|
||||
SimplifyQuery Copy(*this);
|
||||
Copy.CxtI = I;
|
||||
return Copy;
|
||||
}
|
||||
};
|
||||
|
||||
// NOTE: the explicit multiple argument versions of these functions are
|
||||
// deprecated.
|
||||
// Please use the SimplifyQuery versions in new code.
|
||||
|
||||
/// Given operands for an Add, fold the result or return null.
|
||||
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -56,6 +81,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a Sub, fold the result or return null.
|
||||
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -64,6 +91,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an FAdd, fold the result or return null.
|
||||
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -72,6 +101,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an FSub, fold the result or return null.
|
||||
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -80,6 +111,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an FMul, fold the result or return null.
|
||||
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -88,6 +121,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a Mul, fold the result or return null.
|
||||
Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -95,6 +129,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an SDiv, fold the result or return null.
|
||||
Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -102,6 +137,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a UDiv, fold the result or return null.
|
||||
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -109,6 +145,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an FDiv, fold the result or return null.
|
||||
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -117,6 +155,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an SRem, fold the result or return null.
|
||||
Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -124,6 +163,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a URem, fold the result or return null.
|
||||
Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -131,6 +171,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an FRem, fold the result or return null.
|
||||
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -139,6 +181,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a Shl, fold the result or return null.
|
||||
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -147,6 +191,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a LShr, fold the result or return null.
|
||||
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -155,6 +201,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a AShr, fold the result or return nulll.
|
||||
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -163,6 +211,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an And, fold the result or return null.
|
||||
Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -170,6 +219,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an Or, fold the result or return null.
|
||||
Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -177,6 +227,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an Xor, fold the result or return null.
|
||||
Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -184,6 +235,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an ICmpInst, fold the result or return null.
|
||||
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -192,6 +245,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an FCmpInst, fold the result or return null.
|
||||
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
FastMathFlags FMF, const SimplifyQuery &Q);
|
||||
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
FastMathFlags FMF, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -200,6 +255,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a SelectInst, fold the result or return null.
|
||||
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -208,6 +265,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a GetElementPtrInst, fold the result or return null.
|
||||
Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -216,6 +275,9 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an InsertValueInst, fold the result or return null.
|
||||
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
|
||||
ArrayRef<unsigned> Idxs,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
|
||||
ArrayRef<unsigned> Idxs, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -224,6 +286,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an ExtractValueInst, fold the result or return null.
|
||||
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -232,6 +296,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for an ExtractElementInst, fold the result or return null.
|
||||
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -240,6 +306,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a CastInst, fold the result or return null.
|
||||
Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -248,6 +316,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a ShuffleVectorInst, fold the result or return null.
|
||||
Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
|
||||
Type *RetTy, const SimplifyQuery &Q);
|
||||
Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
|
||||
Type *RetTy, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -259,6 +329,8 @@ namespace llvm {
|
||||
|
||||
|
||||
/// Given operands for a CmpInst, fold the result or return null.
|
||||
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -267,6 +339,8 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given operands for a BinaryOperator, fold the result or return null.
|
||||
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -278,7 +352,9 @@ namespace llvm {
|
||||
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
|
||||
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
|
||||
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
const FastMathFlags &FMF, const DataLayout &DL,
|
||||
FastMathFlags FMF, const SimplifyQuery &Q);
|
||||
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
FastMathFlags FMF, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
AssumptionCache *AC = nullptr,
|
||||
@ -286,6 +362,8 @@ namespace llvm {
|
||||
|
||||
/// Given a function and iterators over arguments, fold the result or return
|
||||
/// null.
|
||||
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
|
||||
User::op_iterator ArgEnd, const SimplifyQuery &Q);
|
||||
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
|
||||
User::op_iterator ArgEnd, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
@ -294,6 +372,7 @@ namespace llvm {
|
||||
const Instruction *CxtI = nullptr);
|
||||
|
||||
/// Given a function and set of arguments, fold the result or return null.
|
||||
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q);
|
||||
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
@ -302,6 +381,8 @@ namespace llvm {
|
||||
|
||||
/// See if we can compute a simplified version of this instruction. If not,
|
||||
/// return null.
|
||||
Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
|
||||
OptimizationRemarkEmitter *ORE = nullptr);
|
||||
Value *SimplifyInstruction(Instruction *I, const DataLayout &DL,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
|
@ -158,7 +158,7 @@ class LoopBase {
|
||||
/// True if terminator in the block can branch to another block that is
|
||||
/// outside of the current loop.
|
||||
bool isLoopExiting(const BlockT *BB) const {
|
||||
for (const auto Succ : children<const BlockT*>(BB)) {
|
||||
for (const auto &Succ : children<const BlockT*>(BB)) {
|
||||
if (!contains(Succ))
|
||||
return true;
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ template<class BlockT, class LoopT>
|
||||
void LoopBase<BlockT, LoopT>::
|
||||
getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
|
||||
for (const auto BB : blocks())
|
||||
for (const auto Succ : children<BlockT*>(BB))
|
||||
for (const auto &Succ : children<BlockT*>(BB))
|
||||
if (!contains(Succ)) {
|
||||
// Not in current loop? It must be an exit block.
|
||||
ExitingBlocks.push_back(BB);
|
||||
@ -61,7 +61,7 @@ template<class BlockT, class LoopT>
|
||||
void LoopBase<BlockT, LoopT>::
|
||||
getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
|
||||
for (const auto BB : blocks())
|
||||
for (const auto Succ : children<BlockT*>(BB))
|
||||
for (const auto &Succ : children<BlockT*>(BB))
|
||||
if (!contains(Succ))
|
||||
// Not in current loop? It must be an exit block.
|
||||
ExitBlocks.push_back(Succ);
|
||||
@ -83,7 +83,7 @@ template<class BlockT, class LoopT>
|
||||
void LoopBase<BlockT, LoopT>::
|
||||
getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
|
||||
for (const auto BB : blocks())
|
||||
for (const auto Succ : children<BlockT*>(BB))
|
||||
for (const auto &Succ : children<BlockT*>(BB))
|
||||
if (!contains(Succ))
|
||||
// Not in current loop? It must be an exit block.
|
||||
ExitEdges.emplace_back(BB, Succ);
|
||||
|
@ -708,10 +708,24 @@ class RegionInfoBase {
|
||||
/// The top level region.
|
||||
RegionT *TopLevelRegion;
|
||||
|
||||
private:
|
||||
/// Map every BB to the smallest region, that contains BB.
|
||||
BBtoRegionMap BBtoRegion;
|
||||
|
||||
protected:
|
||||
/// \brief Update refences to a RegionInfoT held by the RegionT managed here
|
||||
///
|
||||
/// This is a post-move helper. Regions hold references to the owning
|
||||
/// RegionInfo object. After a move these need to be fixed.
|
||||
template<typename TheRegionT>
|
||||
void updateRegionTree(RegionInfoT &RI, TheRegionT *R) {
|
||||
if (!R)
|
||||
return;
|
||||
R->RI = &RI;
|
||||
for (auto &SubR : *R)
|
||||
updateRegionTree(RI, SubR.get());
|
||||
}
|
||||
|
||||
private:
|
||||
/// \brief Wipe this region tree's state without releasing any resources.
|
||||
///
|
||||
/// This is essentially a post-move helper only. It leaves the object in an
|
||||
@ -879,10 +893,12 @@ class RegionInfo : public RegionInfoBase<RegionTraits<Function>> {
|
||||
|
||||
~RegionInfo() override;
|
||||
|
||||
RegionInfo(RegionInfo &&Arg)
|
||||
: Base(std::move(static_cast<Base &>(Arg))) {}
|
||||
RegionInfo(RegionInfo &&Arg) : Base(std::move(static_cast<Base &>(Arg))) {
|
||||
updateRegionTree(*this, TopLevelRegion);
|
||||
}
|
||||
RegionInfo &operator=(RegionInfo &&RHS) {
|
||||
Base::operator=(std::move(static_cast<Base &>(RHS)));
|
||||
updateRegionTree(*this, TopLevelRegion);
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -877,6 +877,47 @@ class ScalarEvolution {
|
||||
bool ControlsExit,
|
||||
bool AllowPredicates = false);
|
||||
|
||||
// Helper functions for computeExitLimitFromCond to avoid exponential time
|
||||
// complexity.
|
||||
|
||||
class ExitLimitCache {
|
||||
// It may look like we need key on the whole (L, TBB, FBB, ControlsExit,
|
||||
// AllowPredicates) tuple, but recursive calls to
|
||||
// computeExitLimitFromCondCached from computeExitLimitFromCondImpl only
|
||||
// vary the in \c ExitCond and \c ControlsExit parameters. We remember the
|
||||
// initial values of the other values to assert our assumption.
|
||||
SmallDenseMap<PointerIntPair<Value *, 1>, ExitLimit> TripCountMap;
|
||||
|
||||
const Loop *L;
|
||||
BasicBlock *TBB;
|
||||
BasicBlock *FBB;
|
||||
bool AllowPredicates;
|
||||
|
||||
public:
|
||||
ExitLimitCache(const Loop *L, BasicBlock *TBB, BasicBlock *FBB,
|
||||
bool AllowPredicates)
|
||||
: L(L), TBB(TBB), FBB(FBB), AllowPredicates(AllowPredicates) {}
|
||||
|
||||
Optional<ExitLimit> find(const Loop *L, Value *ExitCond, BasicBlock *TBB,
|
||||
BasicBlock *FBB, bool ControlsExit,
|
||||
bool AllowPredicates);
|
||||
|
||||
void insert(const Loop *L, Value *ExitCond, BasicBlock *TBB,
|
||||
BasicBlock *FBB, bool ControlsExit, bool AllowPredicates,
|
||||
const ExitLimit &EL);
|
||||
};
|
||||
|
||||
typedef ExitLimitCache ExitLimitCacheTy;
|
||||
ExitLimit computeExitLimitFromCondCached(ExitLimitCacheTy &Cache,
|
||||
const Loop *L, Value *ExitCond,
|
||||
BasicBlock *TBB, BasicBlock *FBB,
|
||||
bool ControlsExit,
|
||||
bool AllowPredicates);
|
||||
ExitLimit computeExitLimitFromCondImpl(ExitLimitCacheTy &Cache, const Loop *L,
|
||||
Value *ExitCond, BasicBlock *TBB,
|
||||
BasicBlock *FBB, bool ControlsExit,
|
||||
bool AllowPredicates);
|
||||
|
||||
/// Compute the number of times the backedge of the specified loop will
|
||||
/// execute if its exit condition were a conditional branch of the ICmpInst
|
||||
/// ExitCond, TBB, and FBB. If AllowPredicates is set, this call will try
|
||||
|
@ -29,6 +29,7 @@ template <typename T> class ArrayRef;
|
||||
class DominatorTree;
|
||||
class GEPOperator;
|
||||
class Instruction;
|
||||
struct KnownBits;
|
||||
class Loop;
|
||||
class LoopInfo;
|
||||
class OptimizationRemarkEmitter;
|
||||
@ -49,7 +50,7 @@ template <typename T> class ArrayRef;
|
||||
/// where V is a vector, the known zero and known one values are the
|
||||
/// same width as the vector element, and the bit is set only if it is true
|
||||
/// for all of the elements in the vector.
|
||||
void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne,
|
||||
void computeKnownBits(const Value *V, KnownBits &Known,
|
||||
const DataLayout &DL, unsigned Depth = 0,
|
||||
AssumptionCache *AC = nullptr,
|
||||
const Instruction *CxtI = nullptr,
|
||||
|
@ -793,6 +793,9 @@ class DIEUnit {
|
||||
uint32_t Length; /// The length in bytes of all of the DIEs in this unit.
|
||||
const uint16_t Version; /// The Dwarf version number for this unit.
|
||||
const uint8_t AddrSize; /// The size in bytes of an address for this unit.
|
||||
protected:
|
||||
~DIEUnit() = default;
|
||||
|
||||
public:
|
||||
DIEUnit(uint16_t Version, uint8_t AddrSize, dwarf::Tag UnitTag);
|
||||
DIEUnit(const DIEUnit &RHS) = delete;
|
||||
@ -808,6 +811,10 @@ class DIEUnit {
|
||||
this->Section = Section;
|
||||
}
|
||||
|
||||
virtual const MCSymbol *getCrossSectionRelativeBaseAddress() const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Return the section that this DIEUnit will be emitted into.
|
||||
///
|
||||
/// \returns Section pointer which can be NULL.
|
||||
@ -822,6 +829,10 @@ class DIEUnit {
|
||||
const DIE &getUnitDie() const { return Die; }
|
||||
};
|
||||
|
||||
struct BasicDIEUnit final : DIEUnit {
|
||||
BasicDIEUnit(uint16_t Version, uint8_t AddrSize, dwarf::Tag UnitTag)
|
||||
: DIEUnit(Version, AddrSize, UnitTag) {}
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// DIELoc - Represents an expression location.
|
||||
|
@ -18,20 +18,52 @@
|
||||
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include <cstdint>
|
||||
#include <bitset>
|
||||
#include <functional>
|
||||
|
||||
namespace llvm {
|
||||
class MachineInstr;
|
||||
class MachineInstrBuilder;
|
||||
class MachineFunction;
|
||||
class MachineOperand;
|
||||
class MachineRegisterInfo;
|
||||
class RegisterBankInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterInfo;
|
||||
|
||||
/// Container class for CodeGen predicate results.
|
||||
/// This is convenient because std::bitset does not have a constructor
|
||||
/// with an initializer list of set bits.
|
||||
///
|
||||
/// Each InstructionSelector subclass should define a PredicateBitset class with:
|
||||
/// const unsigned MAX_SUBTARGET_PREDICATES = 192;
|
||||
/// using PredicateBitset = PredicateBitsetImpl<MAX_SUBTARGET_PREDICATES>;
|
||||
/// and updating the constant to suit the target. Tablegen provides a suitable
|
||||
/// definition for the predicates in use in <Target>GenGlobalISel.inc when
|
||||
/// GET_GLOBALISEL_PREDICATE_BITSET is defined.
|
||||
template <std::size_t MaxPredicates>
|
||||
class PredicateBitsetImpl : public std::bitset<MaxPredicates> {
|
||||
public:
|
||||
// Cannot inherit constructors because it's not supported by VC++..
|
||||
PredicateBitsetImpl() = default;
|
||||
|
||||
PredicateBitsetImpl(const std::bitset<MaxPredicates> &B)
|
||||
: std::bitset<MaxPredicates>(B) {}
|
||||
|
||||
PredicateBitsetImpl(std::initializer_list<unsigned> Init) {
|
||||
for (auto I : Init)
|
||||
std::bitset<MaxPredicates>::set(I);
|
||||
}
|
||||
};
|
||||
|
||||
/// Provides the logic to select generic machine instructions.
|
||||
class InstructionSelector {
|
||||
public:
|
||||
virtual ~InstructionSelector() {}
|
||||
|
||||
/// This is executed before selecting a function.
|
||||
virtual void beginFunction(const MachineFunction &MF) {}
|
||||
|
||||
/// Select the (possibly generic) instruction \p I to only use target-specific
|
||||
/// opcodes. It is OK to insert multiple instructions, but they cannot be
|
||||
/// generic pre-isel instructions.
|
||||
@ -46,6 +78,8 @@ class InstructionSelector {
|
||||
virtual bool select(MachineInstr &I) const = 0;
|
||||
|
||||
protected:
|
||||
typedef std::function<void(MachineInstrBuilder &)> ComplexRendererFn;
|
||||
|
||||
InstructionSelector();
|
||||
|
||||
/// Mutate the newly-selected instruction \p I to constrain its (possibly
|
||||
|
@ -65,7 +65,6 @@ class MachineOperand {
|
||||
MO_CFIIndex, ///< MCCFIInstruction index.
|
||||
MO_IntrinsicID, ///< Intrinsic ID for ISel
|
||||
MO_Predicate, ///< Generic predicate for ISel
|
||||
MO_Placeholder, ///< Placeholder for GlobalISel ComplexPattern result.
|
||||
};
|
||||
|
||||
private:
|
||||
@ -768,11 +767,6 @@ class MachineOperand {
|
||||
return Op;
|
||||
}
|
||||
|
||||
static MachineOperand CreatePlaceholder() {
|
||||
MachineOperand Op(MachineOperand::MO_Placeholder);
|
||||
return Op;
|
||||
}
|
||||
|
||||
friend class MachineInstr;
|
||||
friend class MachineRegisterInfo;
|
||||
private:
|
||||
|
@ -654,6 +654,15 @@ class SelectionDAG {
|
||||
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
|
||||
}
|
||||
|
||||
/// Return an ISD::BUILD_VECTOR node. The number of elements in VT,
|
||||
/// which must be a vector type, must match the number of operands in Ops.
|
||||
/// The operands must have the same type as (or, for integers, a type wider
|
||||
/// than) VT's element type.
|
||||
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef<SDUse> Ops) {
|
||||
// VerifySDNode (via InsertNode) checks BUILD_VECTOR later.
|
||||
return getNode(ISD::BUILD_VECTOR, DL, VT, Ops);
|
||||
}
|
||||
|
||||
/// Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all
|
||||
/// elements. VT must be a vector type. Op's type must be the same as (or,
|
||||
/// for integers, a type wider than) VT's element type.
|
||||
|
@ -546,7 +546,7 @@ enum class TrampolineType : uint16_t { TrampIncremental, BranchIsland };
|
||||
// These values correspond to the CV_SourceChksum_t enumeration.
|
||||
enum class FileChecksumKind : uint8_t { None, MD5, SHA1, SHA256 };
|
||||
|
||||
enum LineFlags : uint32_t {
|
||||
enum LineFlags : uint16_t {
|
||||
HaveColumns = 1, // CV_LINES_HAVE_COLUMNS
|
||||
};
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ template <> class VarStreamArrayExtractor<codeview::LineColumnEntry> {
|
||||
BinaryStreamReader Reader(Stream);
|
||||
if (auto EC = Reader.readObject(BlockHeader))
|
||||
return EC;
|
||||
bool HasColumn = Header->Flags & LineFlags::HaveColumns;
|
||||
bool HasColumn = Header->Flags & uint32_t(LineFlags::HaveColumns);
|
||||
uint32_t LineInfoSize =
|
||||
BlockHeader->NumLines *
|
||||
(sizeof(LineNumberEntry) + (HasColumn ? sizeof(ColumnNumberEntry) : 0));
|
||||
|
@ -18,9 +18,9 @@ namespace llvm {
|
||||
class DWARFCompileUnit : public DWARFUnit {
|
||||
public:
|
||||
DWARFCompileUnit(DWARFContext &Context, const DWARFSection &Section,
|
||||
const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
|
||||
StringRef SOS, StringRef AOS, StringRef LS, bool LE,
|
||||
bool IsDWO, const DWARFUnitSectionBase &UnitSection,
|
||||
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
|
||||
StringRef SS, StringRef SOS, StringRef AOS, StringRef LS,
|
||||
bool LE, bool IsDWO, const DWARFUnitSectionBase &UnitSection,
|
||||
const DWARFUnitIndex::Entry *Entry)
|
||||
: DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
|
||||
UnitSection, Entry) {}
|
||||
|
@ -50,6 +50,11 @@ class raw_ostream;
|
||||
// entire size of the debug info sections.
|
||||
typedef DenseMap<uint64_t, std::pair<uint8_t, int64_t>> RelocAddrMap;
|
||||
|
||||
/// Reads a value from data extractor and applies a relocation to the result if
|
||||
/// one exists for the given offset.
|
||||
uint64_t getRelocatedValue(const DataExtractor &Data, uint32_t Size,
|
||||
uint32_t *Off, const RelocAddrMap *Relocs);
|
||||
|
||||
/// DWARFContext
|
||||
/// This data structure is the top level entity that deals with dwarf debug
|
||||
/// information parsing. The actual data is supplied through pure virtual
|
||||
@ -216,7 +221,7 @@ class DWARFContext : public DIContext {
|
||||
virtual StringRef getEHFrameSection() = 0;
|
||||
virtual const DWARFSection &getLineSection() = 0;
|
||||
virtual StringRef getStringSection() = 0;
|
||||
virtual StringRef getRangeSection() = 0;
|
||||
virtual const DWARFSection& getRangeSection() = 0;
|
||||
virtual StringRef getMacinfoSection() = 0;
|
||||
virtual StringRef getPubNamesSection() = 0;
|
||||
virtual StringRef getPubTypesSection() = 0;
|
||||
@ -231,7 +236,7 @@ class DWARFContext : public DIContext {
|
||||
virtual const DWARFSection &getLocDWOSection() = 0;
|
||||
virtual StringRef getStringDWOSection() = 0;
|
||||
virtual StringRef getStringOffsetDWOSection() = 0;
|
||||
virtual StringRef getRangeDWOSection() = 0;
|
||||
virtual const DWARFSection &getRangeDWOSection() = 0;
|
||||
virtual StringRef getAddrSection() = 0;
|
||||
virtual const DWARFSection& getAppleNamesSection() = 0;
|
||||
virtual const DWARFSection& getAppleTypesSection() = 0;
|
||||
@ -271,7 +276,7 @@ class DWARFContextInMemory : public DWARFContext {
|
||||
StringRef EHFrameSection;
|
||||
DWARFSection LineSection;
|
||||
StringRef StringSection;
|
||||
StringRef RangeSection;
|
||||
DWARFSection RangeSection;
|
||||
StringRef MacinfoSection;
|
||||
StringRef PubNamesSection;
|
||||
StringRef PubTypesSection;
|
||||
@ -286,7 +291,7 @@ class DWARFContextInMemory : public DWARFContext {
|
||||
DWARFSection LocDWOSection;
|
||||
StringRef StringDWOSection;
|
||||
StringRef StringOffsetDWOSection;
|
||||
StringRef RangeDWOSection;
|
||||
DWARFSection RangeDWOSection;
|
||||
StringRef AddrSection;
|
||||
DWARFSection AppleNamesSection;
|
||||
DWARFSection AppleTypesSection;
|
||||
@ -319,7 +324,7 @@ class DWARFContextInMemory : public DWARFContext {
|
||||
StringRef getEHFrameSection() override { return EHFrameSection; }
|
||||
const DWARFSection &getLineSection() override { return LineSection; }
|
||||
StringRef getStringSection() override { return StringSection; }
|
||||
StringRef getRangeSection() override { return RangeSection; }
|
||||
const DWARFSection &getRangeSection() override { return RangeSection; }
|
||||
StringRef getMacinfoSection() override { return MacinfoSection; }
|
||||
StringRef getPubNamesSection() override { return PubNamesSection; }
|
||||
StringRef getPubTypesSection() override { return PubTypesSection; }
|
||||
@ -346,7 +351,7 @@ class DWARFContextInMemory : public DWARFContext {
|
||||
return StringOffsetDWOSection;
|
||||
}
|
||||
|
||||
StringRef getRangeDWOSection() override { return RangeDWOSection; }
|
||||
const DWARFSection &getRangeDWOSection() override { return RangeDWOSection; }
|
||||
|
||||
StringRef getAddrSection() override {
|
||||
return AddrSection;
|
||||
|
@ -11,6 +11,8 @@
|
||||
#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGRANGELIST_H
|
||||
|
||||
#include "llvm/Support/DataExtractor.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
@ -71,7 +73,7 @@ class DWARFDebugRangeList {
|
||||
|
||||
void clear();
|
||||
void dump(raw_ostream &OS) const;
|
||||
bool extract(DataExtractor data, uint32_t *offset_ptr);
|
||||
bool extract(DataExtractor data, uint32_t *offset_ptr, const RelocAddrMap& Relocs);
|
||||
const std::vector<RangeListEntry> &getEntries() { return Entries; }
|
||||
|
||||
/// getAbsoluteRanges - Returns absolute address ranges defined by this range
|
||||
|
@ -30,9 +30,9 @@ class DWARFTypeUnit : public DWARFUnit {
|
||||
|
||||
public:
|
||||
DWARFTypeUnit(DWARFContext &Context, const DWARFSection &Section,
|
||||
const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
|
||||
StringRef SOS, StringRef AOS, StringRef LS, bool LE, bool IsDWO,
|
||||
const DWARFUnitSectionBase &UnitSection,
|
||||
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
|
||||
StringRef SS, StringRef SOS, StringRef AOS, StringRef LS,
|
||||
bool LE, bool IsDWO, const DWARFUnitSectionBase &UnitSection,
|
||||
const DWARFUnitIndex::Entry *Entry)
|
||||
: DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, IsDWO,
|
||||
UnitSection, Entry) {}
|
||||
|
@ -56,9 +56,9 @@ class DWARFUnitSectionBase {
|
||||
~DWARFUnitSectionBase() = default;
|
||||
|
||||
virtual void parseImpl(DWARFContext &Context, const DWARFSection &Section,
|
||||
const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
|
||||
StringRef SOS, StringRef AOS, StringRef LS,
|
||||
bool isLittleEndian, bool isDWO) = 0;
|
||||
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
|
||||
StringRef SS, StringRef SOS, StringRef AOS,
|
||||
StringRef LS, bool isLittleEndian, bool isDWO) = 0;
|
||||
};
|
||||
|
||||
const DWARFUnitIndex &getDWARFUnitIndex(DWARFContext &Context,
|
||||
@ -88,9 +88,9 @@ class DWARFUnitSection final : public SmallVector<std::unique_ptr<UnitType>, 1>,
|
||||
|
||||
private:
|
||||
void parseImpl(DWARFContext &Context, const DWARFSection &Section,
|
||||
const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
|
||||
StringRef SOS, StringRef AOS, StringRef LS, bool LE,
|
||||
bool IsDWO) override {
|
||||
const DWARFDebugAbbrev *DA, const DWARFSection *RS,
|
||||
StringRef SS, StringRef SOS, StringRef AOS, StringRef LS,
|
||||
bool LE, bool IsDWO) override {
|
||||
if (Parsed)
|
||||
return;
|
||||
const auto &Index = getDWARFUnitIndex(Context, UnitType::Section);
|
||||
@ -115,7 +115,7 @@ class DWARFUnit {
|
||||
const DWARFSection &InfoSection;
|
||||
|
||||
const DWARFDebugAbbrev *Abbrev;
|
||||
StringRef RangeSection;
|
||||
const DWARFSection *RangeSection;
|
||||
uint32_t RangeSectionBase;
|
||||
StringRef LineSection;
|
||||
StringRef StringSection;
|
||||
@ -171,7 +171,7 @@ class DWARFUnit {
|
||||
|
||||
public:
|
||||
DWARFUnit(DWARFContext &Context, const DWARFSection &Section,
|
||||
const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
|
||||
const DWARFDebugAbbrev *DA, const DWARFSection *RS, StringRef SS,
|
||||
StringRef SOS, StringRef AOS, StringRef LS, bool LE, bool IsDWO,
|
||||
const DWARFUnitSectionBase &UnitSection,
|
||||
const DWARFUnitIndex::Entry *IndexEntry = nullptr);
|
||||
@ -192,7 +192,7 @@ class DWARFUnit {
|
||||
// Recursively update address to Die map.
|
||||
void updateAddressDieMap(DWARFDie Die);
|
||||
|
||||
void setRangesSection(StringRef RS, uint32_t Base) {
|
||||
void setRangesSection(const DWARFSection *RS, uint32_t Base) {
|
||||
RangeSection = RS;
|
||||
RangeSectionBase = Base;
|
||||
}
|
||||
|
@ -102,7 +102,8 @@ class DIARawSymbol : public IPDBRawSymbol {
|
||||
uint32_t getVirtualBaseDispIndex() const override;
|
||||
uint32_t getVirtualBaseOffset() const override;
|
||||
uint32_t getVirtualTableShapeId() const override;
|
||||
std::unique_ptr<PDBSymbolTypeVTable> getVirtualBaseTableType() const override;
|
||||
std::unique_ptr<PDBSymbolTypeBuiltin>
|
||||
getVirtualBaseTableType() const override;
|
||||
PDB_DataKind getDataKind() const override;
|
||||
PDB_SymType getSymTag() const override;
|
||||
PDB_UniqueId getGuid() const override;
|
||||
|
@ -113,7 +113,7 @@ class IPDBRawSymbol {
|
||||
virtual Variant getValue() const = 0;
|
||||
virtual uint32_t getVirtualBaseDispIndex() const = 0;
|
||||
virtual uint32_t getVirtualBaseOffset() const = 0;
|
||||
virtual std::unique_ptr<PDBSymbolTypeVTable>
|
||||
virtual std::unique_ptr<PDBSymbolTypeBuiltin>
|
||||
getVirtualBaseTableType() const = 0;
|
||||
virtual uint32_t getVirtualTableShapeId() const = 0;
|
||||
virtual PDB_DataKind getDataKind() const = 0;
|
||||
|
@ -40,6 +40,8 @@ class ModStream {
|
||||
iterator_range<codeview::ModuleSubstreamArray::Iterator>
|
||||
lines(bool *HadError) const;
|
||||
|
||||
bool hasLineInfo() const;
|
||||
|
||||
Error commit();
|
||||
|
||||
private:
|
||||
|
@ -101,7 +101,8 @@ class NativeRawSymbol : public IPDBRawSymbol {
|
||||
uint32_t getVirtualBaseDispIndex() const override;
|
||||
uint32_t getVirtualBaseOffset() const override;
|
||||
uint32_t getVirtualTableShapeId() const override;
|
||||
std::unique_ptr<PDBSymbolTypeVTable> getVirtualBaseTableType() const override;
|
||||
std::unique_ptr<PDBSymbolTypeBuiltin>
|
||||
getVirtualBaseTableType() const override;
|
||||
PDB_DataKind getDataKind() const override;
|
||||
PDB_SymType getSymTag() const override;
|
||||
PDB_UniqueId getGuid() const override;
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
|
||||
#include <list>
|
||||
#include <memory>
|
||||
@ -32,40 +33,63 @@ class PDBSymbolTypeVTable;
|
||||
|
||||
class ClassLayout;
|
||||
class BaseClassLayout;
|
||||
class StorageItemBase;
|
||||
class LayoutItemBase;
|
||||
class UDTLayoutBase;
|
||||
|
||||
class StorageItemBase {
|
||||
class LayoutItemBase {
|
||||
public:
|
||||
StorageItemBase(const UDTLayoutBase &Parent, const PDBSymbol &Symbol,
|
||||
const std::string &Name, uint32_t OffsetInParent,
|
||||
uint32_t Size);
|
||||
virtual ~StorageItemBase() {}
|
||||
LayoutItemBase(const UDTLayoutBase *Parent, const PDBSymbol *Symbol,
|
||||
const std::string &Name, uint32_t OffsetInParent,
|
||||
uint32_t Size, bool IsElided);
|
||||
virtual ~LayoutItemBase() {}
|
||||
|
||||
virtual uint32_t deepPaddingSize() const;
|
||||
uint32_t deepPaddingSize() const;
|
||||
virtual uint32_t immediatePadding() const { return 0; }
|
||||
virtual uint32_t tailPadding() const;
|
||||
|
||||
const UDTLayoutBase &getParent() const { return Parent; }
|
||||
const UDTLayoutBase *getParent() const { return Parent; }
|
||||
StringRef getName() const { return Name; }
|
||||
uint32_t getOffsetInParent() const { return OffsetInParent; }
|
||||
uint32_t getSize() const { return SizeOf; }
|
||||
const PDBSymbol &getSymbol() const { return Symbol; }
|
||||
uint32_t getLayoutSize() const { return LayoutSize; }
|
||||
const PDBSymbol *getSymbol() const { return Symbol; }
|
||||
const BitVector &usedBytes() const { return UsedBytes; }
|
||||
bool isElided() const { return IsElided; }
|
||||
virtual bool isVBPtr() const { return false; }
|
||||
|
||||
uint32_t containsOffset(uint32_t Off) const {
|
||||
uint32_t Begin = getOffsetInParent();
|
||||
uint32_t End = Begin + getSize();
|
||||
return (Off >= Begin && Off < End);
|
||||
}
|
||||
|
||||
protected:
|
||||
const UDTLayoutBase &Parent;
|
||||
const PDBSymbol &Symbol;
|
||||
const PDBSymbol *Symbol = nullptr;
|
||||
const UDTLayoutBase *Parent = nullptr;
|
||||
BitVector UsedBytes;
|
||||
std::string Name;
|
||||
uint32_t OffsetInParent = 0;
|
||||
uint32_t SizeOf = 0;
|
||||
uint32_t LayoutSize = 0;
|
||||
bool IsElided = false;
|
||||
};
|
||||
|
||||
class DataMemberLayoutItem : public StorageItemBase {
|
||||
class VBPtrLayoutItem : public LayoutItemBase {
|
||||
public:
|
||||
VBPtrLayoutItem(const UDTLayoutBase &Parent,
|
||||
std::unique_ptr<PDBSymbolTypeBuiltin> Sym, uint32_t Offset,
|
||||
uint32_t Size);
|
||||
virtual bool isVBPtr() const { return true; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<PDBSymbolTypeBuiltin> Type;
|
||||
};
|
||||
|
||||
class DataMemberLayoutItem : public LayoutItemBase {
|
||||
public:
|
||||
DataMemberLayoutItem(const UDTLayoutBase &Parent,
|
||||
std::unique_ptr<PDBSymbolData> DataMember);
|
||||
|
||||
virtual uint32_t deepPaddingSize() const;
|
||||
|
||||
const PDBSymbolData &getDataMember();
|
||||
bool hasUDTLayout() const;
|
||||
const ClassLayout &getUDTLayout() const;
|
||||
@ -75,77 +99,73 @@ class DataMemberLayoutItem : public StorageItemBase {
|
||||
std::unique_ptr<ClassLayout> UdtLayout;
|
||||
};
|
||||
|
||||
class VTableLayoutItem : public StorageItemBase {
|
||||
class VTableLayoutItem : public LayoutItemBase {
|
||||
public:
|
||||
VTableLayoutItem(const UDTLayoutBase &Parent,
|
||||
std::unique_ptr<PDBSymbolTypeVTable> VTable);
|
||||
ArrayRef<PDBSymbolFunc *> funcs() const { return VTableFuncs; }
|
||||
|
||||
uint32_t getElementSize() const { return ElementSize; }
|
||||
|
||||
void setFunction(uint32_t Index, PDBSymbolFunc &Func) {
|
||||
VTableFuncs[Index] = &Func;
|
||||
}
|
||||
|
||||
private:
|
||||
uint32_t ElementSize = 0;
|
||||
std::unique_ptr<PDBSymbolTypeVTableShape> Shape;
|
||||
std::unique_ptr<PDBSymbolTypeVTable> VTable;
|
||||
std::vector<PDBSymbolFunc *> VTableFuncs;
|
||||
};
|
||||
|
||||
class UDTLayoutBase {
|
||||
class UDTLayoutBase : public LayoutItemBase {
|
||||
template <typename T> using UniquePtrVector = std::vector<std::unique_ptr<T>>;
|
||||
|
||||
public:
|
||||
UDTLayoutBase(const PDBSymbol &Symbol, const std::string &Name,
|
||||
uint32_t Size);
|
||||
UDTLayoutBase(const UDTLayoutBase *Parent, const PDBSymbol &Sym,
|
||||
const std::string &Name, uint32_t OffsetInParent, uint32_t Size,
|
||||
bool IsElided);
|
||||
|
||||
uint32_t shallowPaddingSize() const;
|
||||
uint32_t deepPaddingSize() const;
|
||||
uint32_t tailPadding() const override;
|
||||
|
||||
const BitVector &usedBytes() const { return UsedBytes; }
|
||||
ArrayRef<LayoutItemBase *> layout_items() const { return LayoutItems; }
|
||||
|
||||
uint32_t getClassSize() const { return SizeOf; }
|
||||
ArrayRef<BaseClassLayout *> bases() const { return AllBases; }
|
||||
ArrayRef<BaseClassLayout *> regular_bases() const { return NonVirtualBases; }
|
||||
ArrayRef<BaseClassLayout *> virtual_bases() const { return VirtualBases; }
|
||||
|
||||
ArrayRef<std::unique_ptr<StorageItemBase>> layout_items() const {
|
||||
return ChildStorage;
|
||||
}
|
||||
|
||||
VTableLayoutItem *findVTableAtOffset(uint32_t RelativeOffset);
|
||||
|
||||
StringRef getUDTName() const { return Name; }
|
||||
|
||||
ArrayRef<BaseClassLayout *> bases() const { return BaseClasses; }
|
||||
ArrayRef<std::unique_ptr<PDBSymbolTypeBaseClass>> vbases() const {
|
||||
return VirtualBases;
|
||||
}
|
||||
uint32_t directVirtualBaseCount() const { return DirectVBaseCount; }
|
||||
|
||||
ArrayRef<std::unique_ptr<PDBSymbolFunc>> funcs() const { return Funcs; }
|
||||
|
||||
ArrayRef<std::unique_ptr<PDBSymbol>> other_items() const { return Other; }
|
||||
|
||||
const PDBSymbol &getSymbolBase() const { return SymbolBase; }
|
||||
|
||||
protected:
|
||||
bool hasVBPtrAtOffset(uint32_t Off) const;
|
||||
void initializeChildren(const PDBSymbol &Sym);
|
||||
|
||||
void addChildToLayout(std::unique_ptr<StorageItemBase> Child);
|
||||
void addVirtualOverride(PDBSymbolFunc &Func);
|
||||
void addVirtualIntro(PDBSymbolFunc &Func);
|
||||
void addChildToLayout(std::unique_ptr<LayoutItemBase> Child);
|
||||
|
||||
const PDBSymbol &SymbolBase;
|
||||
std::string Name;
|
||||
uint32_t SizeOf = 0;
|
||||
uint32_t DirectVBaseCount = 0;
|
||||
|
||||
BitVector UsedBytes;
|
||||
UniquePtrVector<PDBSymbol> Other;
|
||||
UniquePtrVector<PDBSymbolFunc> Funcs;
|
||||
UniquePtrVector<PDBSymbolTypeBaseClass> VirtualBases;
|
||||
UniquePtrVector<StorageItemBase> ChildStorage;
|
||||
std::vector<std::list<StorageItemBase *>> ChildrenPerByte;
|
||||
std::vector<BaseClassLayout *> BaseClasses;
|
||||
UniquePtrVector<LayoutItemBase> ChildStorage;
|
||||
std::vector<LayoutItemBase *> LayoutItems;
|
||||
|
||||
std::vector<BaseClassLayout *> AllBases;
|
||||
ArrayRef<BaseClassLayout *> NonVirtualBases;
|
||||
ArrayRef<BaseClassLayout *> VirtualBases;
|
||||
|
||||
VTableLayoutItem *VTable = nullptr;
|
||||
VBPtrLayoutItem *VBPtr = nullptr;
|
||||
};
|
||||
|
||||
class BaseClassLayout : public UDTLayoutBase {
|
||||
public:
|
||||
BaseClassLayout(const UDTLayoutBase &Parent, uint32_t OffsetInParent,
|
||||
bool Elide, std::unique_ptr<PDBSymbolTypeBaseClass> Base);
|
||||
|
||||
const PDBSymbolTypeBaseClass &getBase() const { return *Base; }
|
||||
bool isVirtualBase() const { return IsVirtualBase; }
|
||||
bool isEmptyBase() { return SizeOf == 1 && LayoutSize == 0; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<PDBSymbolTypeBaseClass> Base;
|
||||
bool IsVirtualBase;
|
||||
};
|
||||
|
||||
class ClassLayout : public UDTLayoutBase {
|
||||
@ -156,24 +176,13 @@ class ClassLayout : public UDTLayoutBase {
|
||||
ClassLayout(ClassLayout &&Other) = default;
|
||||
|
||||
const PDBSymbolTypeUDT &getClass() const { return UDT; }
|
||||
uint32_t immediatePadding() const override;
|
||||
|
||||
private:
|
||||
BitVector ImmediateUsedBytes;
|
||||
std::unique_ptr<PDBSymbolTypeUDT> OwnedStorage;
|
||||
const PDBSymbolTypeUDT &UDT;
|
||||
};
|
||||
|
||||
class BaseClassLayout : public UDTLayoutBase, public StorageItemBase {
|
||||
public:
|
||||
BaseClassLayout(const UDTLayoutBase &Parent,
|
||||
std::unique_ptr<PDBSymbolTypeBaseClass> Base);
|
||||
|
||||
const PDBSymbolTypeBaseClass &getBase() const { return *Base; }
|
||||
bool isVirtualBase() const { return IsVirtualBase; }
|
||||
|
||||
private:
|
||||
std::unique_ptr<PDBSymbolTypeBaseClass> Base;
|
||||
bool IsVirtualBase;
|
||||
};
|
||||
}
|
||||
} // namespace llvm
|
||||
|
||||
|
@ -348,7 +348,7 @@ class SerializationTraits<ChannelT, Error> {
|
||||
// key of the deserializers map to save us from duplicating the string in
|
||||
// the serializer. This should be changed to use a stringpool if we switch
|
||||
// to a map type that may move keys in memory.
|
||||
std::lock_guard<std::mutex> Lock(DeserializersMutex);
|
||||
std::lock_guard<std::recursive_mutex> Lock(DeserializersMutex);
|
||||
auto I =
|
||||
Deserializers.insert(Deserializers.begin(),
|
||||
std::make_pair(std::move(Name),
|
||||
@ -358,7 +358,7 @@ class SerializationTraits<ChannelT, Error> {
|
||||
|
||||
{
|
||||
assert(KeyName != nullptr && "No keyname pointer");
|
||||
std::lock_guard<std::mutex> Lock(SerializersMutex);
|
||||
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
|
||||
// FIXME: Move capture Serialize once we have C++14.
|
||||
Serializers[ErrorInfoT::classID()] =
|
||||
[KeyName, Serialize](ChannelT &C, const ErrorInfoBase &EIB) -> Error {
|
||||
@ -372,7 +372,8 @@ class SerializationTraits<ChannelT, Error> {
|
||||
}
|
||||
|
||||
static Error serialize(ChannelT &C, Error &&Err) {
|
||||
std::lock_guard<std::mutex> Lock(SerializersMutex);
|
||||
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
|
||||
|
||||
if (!Err)
|
||||
return serializeSeq(C, std::string());
|
||||
|
||||
@ -386,7 +387,7 @@ class SerializationTraits<ChannelT, Error> {
|
||||
}
|
||||
|
||||
static Error deserialize(ChannelT &C, Error &Err) {
|
||||
std::lock_guard<std::mutex> Lock(DeserializersMutex);
|
||||
std::lock_guard<std::recursive_mutex> Lock(DeserializersMutex);
|
||||
|
||||
std::string Key;
|
||||
if (auto Err = deserializeSeq(C, Key))
|
||||
@ -406,8 +407,6 @@ class SerializationTraits<ChannelT, Error> {
|
||||
private:
|
||||
|
||||
static Error serializeAsStringError(ChannelT &C, const ErrorInfoBase &EIB) {
|
||||
assert(EIB.dynamicClassID() != StringError::classID() &&
|
||||
"StringError serialization not registered");
|
||||
std::string ErrMsg;
|
||||
{
|
||||
raw_string_ostream ErrMsgStream(ErrMsg);
|
||||
@ -417,17 +416,17 @@ class SerializationTraits<ChannelT, Error> {
|
||||
inconvertibleErrorCode()));
|
||||
}
|
||||
|
||||
static std::mutex SerializersMutex;
|
||||
static std::mutex DeserializersMutex;
|
||||
static std::recursive_mutex SerializersMutex;
|
||||
static std::recursive_mutex DeserializersMutex;
|
||||
static std::map<const void*, WrappedErrorSerializer> Serializers;
|
||||
static std::map<std::string, WrappedErrorDeserializer> Deserializers;
|
||||
};
|
||||
|
||||
template <typename ChannelT>
|
||||
std::mutex SerializationTraits<ChannelT, Error>::SerializersMutex;
|
||||
std::recursive_mutex SerializationTraits<ChannelT, Error>::SerializersMutex;
|
||||
|
||||
template <typename ChannelT>
|
||||
std::mutex SerializationTraits<ChannelT, Error>::DeserializersMutex;
|
||||
std::recursive_mutex SerializationTraits<ChannelT, Error>::DeserializersMutex;
|
||||
|
||||
template <typename ChannelT>
|
||||
std::map<const void*,
|
||||
@ -439,27 +438,39 @@ std::map<std::string,
|
||||
typename SerializationTraits<ChannelT, Error>::WrappedErrorDeserializer>
|
||||
SerializationTraits<ChannelT, Error>::Deserializers;
|
||||
|
||||
/// Registers a serializer and deserializer for the given error type on the
|
||||
/// given channel type.
|
||||
template <typename ChannelT, typename ErrorInfoT, typename SerializeFtor,
|
||||
typename DeserializeFtor>
|
||||
void registerErrorSerialization(std::string Name, SerializeFtor &&Serialize,
|
||||
DeserializeFtor &&Deserialize) {
|
||||
SerializationTraits<ChannelT, Error>::template registerErrorType<ErrorInfoT>(
|
||||
std::move(Name),
|
||||
std::forward<SerializeFtor>(Serialize),
|
||||
std::forward<DeserializeFtor>(Deserialize));
|
||||
}
|
||||
|
||||
/// Registers serialization/deserialization for StringError.
|
||||
template <typename ChannelT>
|
||||
void registerStringError() {
|
||||
static bool AlreadyRegistered = false;
|
||||
if (!AlreadyRegistered) {
|
||||
SerializationTraits<ChannelT, Error>::
|
||||
template registerErrorType<StringError>(
|
||||
"StringError",
|
||||
[](ChannelT &C, const StringError &SE) {
|
||||
return serializeSeq(C, SE.getMessage());
|
||||
},
|
||||
[](ChannelT &C, Error &Err) {
|
||||
ErrorAsOutParameter EAO(&Err);
|
||||
std::string Msg;
|
||||
if (auto E2 = deserializeSeq(C, Msg))
|
||||
return E2;
|
||||
Err =
|
||||
make_error<StringError>(std::move(Msg),
|
||||
orcError(
|
||||
OrcErrorCode::UnknownErrorCodeFromRemote));
|
||||
return Error::success();
|
||||
});
|
||||
registerErrorSerialization<ChannelT, StringError>(
|
||||
"StringError",
|
||||
[](ChannelT &C, const StringError &SE) {
|
||||
return serializeSeq(C, SE.getMessage());
|
||||
},
|
||||
[](ChannelT &C, Error &Err) -> Error {
|
||||
ErrorAsOutParameter EAO(&Err);
|
||||
std::string Msg;
|
||||
if (auto E2 = deserializeSeq(C, Msg))
|
||||
return E2;
|
||||
Err =
|
||||
make_error<StringError>(std::move(Msg),
|
||||
orcError(
|
||||
OrcErrorCode::UnknownErrorCodeFromRemote));
|
||||
return Error::success();
|
||||
});
|
||||
AlreadyRegistered = true;
|
||||
}
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ class AttributeList {
|
||||
unsigned getSlotIndex(unsigned Slot) const;
|
||||
|
||||
/// \brief Return the attributes at the given slot.
|
||||
AttributeList getSlotAttributes(unsigned Slot) const;
|
||||
AttributeSet getSlotAttributes(unsigned Slot) const;
|
||||
|
||||
void dump() const;
|
||||
};
|
||||
|
@ -93,7 +93,7 @@ class ConstantRange {
|
||||
///
|
||||
/// NB! The returned set does *not* contain **all** possible values of X for
|
||||
/// which "X BinOpC Y" does not wrap -- some viable values of X may be
|
||||
/// missing, so you cannot use this to contrain X's range. E.g. in the last
|
||||
/// missing, so you cannot use this to constrain X's range. E.g. in the last
|
||||
/// example, "(-2) + 1" is both nsw and nuw (so the "X" could be -2), but (-2)
|
||||
/// is not in the set returned.
|
||||
///
|
||||
|
@ -157,6 +157,10 @@ class DominatorTree : public DominatorTreeBase<BasicBlock> {
|
||||
/// This should only be used for debugging as it aborts the program if the
|
||||
/// verification fails.
|
||||
void verifyDomTree() const;
|
||||
|
||||
// Pop up a GraphViz/gv window with the Dominator Tree rendered using `dot`.
|
||||
void viewGraph(const Twine &Name, const Twine &Title);
|
||||
void viewGraph();
|
||||
};
|
||||
|
||||
//===-------------------------------------
|
||||
|
@ -629,6 +629,8 @@ def int_amdgcn_readfirstlane :
|
||||
GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
|
||||
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
||||
|
||||
// The lane argument must be uniform across the currently active threads of the
|
||||
// current wave. Otherwise, the result is undefined.
|
||||
def int_amdgcn_readlane :
|
||||
GCCBuiltin<"__builtin_amdgcn_readlane">,
|
||||
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
|
||||
|
@ -319,7 +319,7 @@ class Module {
|
||||
/// exist, add a prototype for the function and return it. This function
|
||||
/// guarantees to return a constant of pointer to the specified function type
|
||||
/// or a ConstantExpr BitCast of that type if the named function has a
|
||||
/// different type. This version of the method takes a null terminated list of
|
||||
/// different type. This version of the method takes a list of
|
||||
/// function arguments, which makes it easier for clients to use.
|
||||
template<typename... ArgsTy>
|
||||
Constant *getOrInsertFunction(StringRef Name,
|
||||
|
@ -482,6 +482,17 @@ class Value {
|
||||
static_cast<const Value *>(this)->stripPointerCasts());
|
||||
}
|
||||
|
||||
/// \brief Strip off pointer casts, all-zero GEPs, aliases and barriers.
|
||||
///
|
||||
/// Returns the original uncasted value. If this is called on a non-pointer
|
||||
/// value, it returns 'this'. This function should be used only in
|
||||
/// Alias analysis.
|
||||
const Value *stripPointerCastsAndBarriers() const;
|
||||
Value *stripPointerCastsAndBarriers() {
|
||||
return const_cast<Value *>(
|
||||
static_cast<const Value *>(this)->stripPointerCastsAndBarriers());
|
||||
}
|
||||
|
||||
/// \brief Strip off pointer casts and all-zero GEPs.
|
||||
///
|
||||
/// Returns the original uncasted value. If this is called on a non-pointer
|
||||
|
@ -54,6 +54,7 @@ class MCTargetOptions {
|
||||
int DwarfVersion = 0;
|
||||
|
||||
std::string ABIName;
|
||||
std::string SplitDwarfFile;
|
||||
|
||||
/// Additional paths to search for `.include` directives when using the
|
||||
/// integrated assembler.
|
||||
|
@ -14,9 +14,19 @@
|
||||
#ifndef LLVM_OBJECT_ELF_H
|
||||
#define LLVM_OBJECT_ELF_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/Object/ELFTypes.h"
|
||||
#include "llvm/Support/MemoryBuffer.h"
|
||||
#include "llvm/Object/Error.h"
|
||||
#include "llvm/Support/ELF.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <utility>
|
||||
|
||||
namespace llvm {
|
||||
namespace object {
|
||||
@ -41,27 +51,27 @@ template <class ELFT>
|
||||
class ELFFile {
|
||||
public:
|
||||
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
|
||||
typedef typename ELFT::uint uintX_t;
|
||||
typedef typename ELFT::Ehdr Elf_Ehdr;
|
||||
typedef typename ELFT::Shdr Elf_Shdr;
|
||||
typedef typename ELFT::Sym Elf_Sym;
|
||||
typedef typename ELFT::Dyn Elf_Dyn;
|
||||
typedef typename ELFT::Phdr Elf_Phdr;
|
||||
typedef typename ELFT::Rel Elf_Rel;
|
||||
typedef typename ELFT::Rela Elf_Rela;
|
||||
typedef typename ELFT::Verdef Elf_Verdef;
|
||||
typedef typename ELFT::Verdaux Elf_Verdaux;
|
||||
typedef typename ELFT::Verneed Elf_Verneed;
|
||||
typedef typename ELFT::Vernaux Elf_Vernaux;
|
||||
typedef typename ELFT::Versym Elf_Versym;
|
||||
typedef typename ELFT::Hash Elf_Hash;
|
||||
typedef typename ELFT::GnuHash Elf_GnuHash;
|
||||
typedef typename ELFT::DynRange Elf_Dyn_Range;
|
||||
typedef typename ELFT::ShdrRange Elf_Shdr_Range;
|
||||
typedef typename ELFT::SymRange Elf_Sym_Range;
|
||||
typedef typename ELFT::RelRange Elf_Rel_Range;
|
||||
typedef typename ELFT::RelaRange Elf_Rela_Range;
|
||||
typedef typename ELFT::PhdrRange Elf_Phdr_Range;
|
||||
using uintX_t = typename ELFT::uint;
|
||||
using Elf_Ehdr = typename ELFT::Ehdr;
|
||||
using Elf_Shdr = typename ELFT::Shdr;
|
||||
using Elf_Sym = typename ELFT::Sym;
|
||||
using Elf_Dyn = typename ELFT::Dyn;
|
||||
using Elf_Phdr = typename ELFT::Phdr;
|
||||
using Elf_Rel = typename ELFT::Rel;
|
||||
using Elf_Rela = typename ELFT::Rela;
|
||||
using Elf_Verdef = typename ELFT::Verdef;
|
||||
using Elf_Verdaux = typename ELFT::Verdaux;
|
||||
using Elf_Verneed = typename ELFT::Verneed;
|
||||
using Elf_Vernaux = typename ELFT::Vernaux;
|
||||
using Elf_Versym = typename ELFT::Versym;
|
||||
using Elf_Hash = typename ELFT::Hash;
|
||||
using Elf_GnuHash = typename ELFT::GnuHash;
|
||||
using Elf_Dyn_Range = typename ELFT::DynRange;
|
||||
using Elf_Shdr_Range = typename ELFT::ShdrRange;
|
||||
using Elf_Sym_Range = typename ELFT::SymRange;
|
||||
using Elf_Rel_Range = typename ELFT::RelRange;
|
||||
using Elf_Rela_Range = typename ELFT::RelaRange;
|
||||
using Elf_Phdr_Range = typename ELFT::PhdrRange;
|
||||
|
||||
const uint8_t *base() const {
|
||||
return reinterpret_cast<const uint8_t *>(Buf.data());
|
||||
@ -70,7 +80,6 @@ class ELFFile {
|
||||
size_t getBufSize() const { return Buf.size(); }
|
||||
|
||||
private:
|
||||
|
||||
StringRef Buf;
|
||||
|
||||
public:
|
||||
@ -161,10 +170,10 @@ class ELFFile {
|
||||
Expected<ArrayRef<uint8_t>> getSectionContents(const Elf_Shdr *Sec) const;
|
||||
};
|
||||
|
||||
typedef ELFFile<ELFType<support::little, false>> ELF32LEFile;
|
||||
typedef ELFFile<ELFType<support::little, true>> ELF64LEFile;
|
||||
typedef ELFFile<ELFType<support::big, false>> ELF32BEFile;
|
||||
typedef ELFFile<ELFType<support::big, true>> ELF64BEFile;
|
||||
using ELF32LEFile = ELFFile<ELFType<support::little, false>>;
|
||||
using ELF64LEFile = ELFFile<ELFType<support::little, true>>;
|
||||
using ELF32BEFile = ELFFile<ELFType<support::big, false>>;
|
||||
using ELF64BEFile = ELFFile<ELFType<support::big, true>>;
|
||||
|
||||
template <class ELFT>
|
||||
inline Expected<const typename ELFT::Shdr *>
|
||||
@ -194,7 +203,7 @@ ELFFile<ELFT>::getSectionIndex(const Elf_Sym *Sym, Elf_Sym_Range Syms,
|
||||
ArrayRef<Elf_Word> ShndxTable) const {
|
||||
uint32_t Index = Sym->st_shndx;
|
||||
if (Index == ELF::SHN_XINDEX) {
|
||||
auto ErrorOrIndex = object::getExtendedSymbolTableIndex<ELFT>(
|
||||
auto ErrorOrIndex = getExtendedSymbolTableIndex<ELFT>(
|
||||
Sym, Syms.begin(), ShndxTable);
|
||||
if (!ErrorOrIndex)
|
||||
return ErrorOrIndex.takeError();
|
||||
@ -519,7 +528,8 @@ inline unsigned hashSysV(StringRef SymbolName) {
|
||||
}
|
||||
return h;
|
||||
}
|
||||
|
||||
} // end namespace object
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_OBJECT_ELF_H
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "llvm/Object/ObjectFile.h"
|
||||
#include "llvm/Object/SymbolicFile.h"
|
||||
#include "llvm/Support/ARMAttributeParser.h"
|
||||
#include "llvm/Support/ARMBuildAttributes.h"
|
||||
#include "llvm/Support/Casting.h"
|
||||
#include "llvm/Support/ELF.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
@ -42,13 +43,11 @@ namespace llvm {
|
||||
namespace object {
|
||||
|
||||
class elf_symbol_iterator;
|
||||
class ELFSymbolRef;
|
||||
class ELFRelocationRef;
|
||||
|
||||
class ELFObjectFileBase : public ObjectFile {
|
||||
friend class ELFSymbolRef;
|
||||
friend class ELFSectionRef;
|
||||
friend class ELFRelocationRef;
|
||||
friend class ELFSectionRef;
|
||||
friend class ELFSymbolRef;
|
||||
|
||||
protected:
|
||||
ELFObjectFileBase(unsigned int Type, MemoryBufferRef Source);
|
||||
@ -65,7 +64,8 @@ class ELFObjectFileBase : public ObjectFile {
|
||||
virtual ErrorOr<int64_t> getRelocationAddend(DataRefImpl Rel) const = 0;
|
||||
|
||||
public:
|
||||
typedef iterator_range<elf_symbol_iterator> elf_symbol_iterator_range;
|
||||
using elf_symbol_iterator_range = iterator_range<elf_symbol_iterator>;
|
||||
|
||||
virtual elf_symbol_iterator_range getDynamicSymbolIterators() const = 0;
|
||||
|
||||
elf_symbol_iterator_range symbols() const;
|
||||
@ -201,14 +201,14 @@ template <class ELFT> class ELFObjectFile : public ELFObjectFileBase {
|
||||
public:
|
||||
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
|
||||
|
||||
typedef typename ELFFile<ELFT>::uintX_t uintX_t;
|
||||
using uintX_t = typename ELFFile<ELFT>::uintX_t;
|
||||
|
||||
typedef typename ELFFile<ELFT>::Elf_Sym Elf_Sym;
|
||||
typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr;
|
||||
typedef typename ELFFile<ELFT>::Elf_Ehdr Elf_Ehdr;
|
||||
typedef typename ELFFile<ELFT>::Elf_Rel Elf_Rel;
|
||||
typedef typename ELFFile<ELFT>::Elf_Rela Elf_Rela;
|
||||
typedef typename ELFFile<ELFT>::Elf_Dyn Elf_Dyn;
|
||||
using Elf_Sym = typename ELFFile<ELFT>::Elf_Sym;
|
||||
using Elf_Shdr = typename ELFFile<ELFT>::Elf_Shdr;
|
||||
using Elf_Ehdr = typename ELFFile<ELFT>::Elf_Ehdr;
|
||||
using Elf_Rel = typename ELFFile<ELFT>::Elf_Rel;
|
||||
using Elf_Rela = typename ELFFile<ELFT>::Elf_Rela;
|
||||
using Elf_Dyn = typename ELFFile<ELFT>::Elf_Dyn;
|
||||
|
||||
protected:
|
||||
ELFFile<ELFT> EF;
|
||||
@ -398,10 +398,10 @@ template <class ELFT> class ELFObjectFile : public ELFObjectFileBase {
|
||||
bool isRelocatableObject() const override;
|
||||
};
|
||||
|
||||
typedef ELFObjectFile<ELFType<support::little, false>> ELF32LEObjectFile;
|
||||
typedef ELFObjectFile<ELFType<support::little, true>> ELF64LEObjectFile;
|
||||
typedef ELFObjectFile<ELFType<support::big, false>> ELF32BEObjectFile;
|
||||
typedef ELFObjectFile<ELFType<support::big, true>> ELF64BEObjectFile;
|
||||
using ELF32LEObjectFile = ELFObjectFile<ELFType<support::little, false>>;
|
||||
using ELF64LEObjectFile = ELFObjectFile<ELFType<support::little, true>>;
|
||||
using ELF32BEObjectFile = ELFObjectFile<ELFType<support::big, false>>;
|
||||
using ELF64BEObjectFile = ELFObjectFile<ELFType<support::big, true>>;
|
||||
|
||||
template <class ELFT>
|
||||
void ELFObjectFile<ELFT>::moveSymbolNext(DataRefImpl &Sym) const {
|
||||
|
@ -11,10 +11,15 @@
|
||||
#define LLVM_OBJECT_ELFTYPES_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/Object/Error.h"
|
||||
#include "llvm/Support/ELF.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/ErrorOr.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
|
||||
namespace llvm {
|
||||
namespace object {
|
||||
@ -45,58 +50,58 @@ template <endianness E, bool Is64> struct ELFType {
|
||||
static const endianness TargetEndianness = E;
|
||||
static const bool Is64Bits = Is64;
|
||||
|
||||
typedef typename std::conditional<Is64, uint64_t, uint32_t>::type uint;
|
||||
typedef Elf_Ehdr_Impl<ELFType<E, Is64>> Ehdr;
|
||||
typedef Elf_Shdr_Impl<ELFType<E, Is64>> Shdr;
|
||||
typedef Elf_Sym_Impl<ELFType<E, Is64>> Sym;
|
||||
typedef Elf_Dyn_Impl<ELFType<E, Is64>> Dyn;
|
||||
typedef Elf_Phdr_Impl<ELFType<E, Is64>> Phdr;
|
||||
typedef Elf_Rel_Impl<ELFType<E, Is64>, false> Rel;
|
||||
typedef Elf_Rel_Impl<ELFType<E, Is64>, true> Rela;
|
||||
typedef Elf_Verdef_Impl<ELFType<E, Is64>> Verdef;
|
||||
typedef Elf_Verdaux_Impl<ELFType<E, Is64>> Verdaux;
|
||||
typedef Elf_Verneed_Impl<ELFType<E, Is64>> Verneed;
|
||||
typedef Elf_Vernaux_Impl<ELFType<E, Is64>> Vernaux;
|
||||
typedef Elf_Versym_Impl<ELFType<E, Is64>> Versym;
|
||||
typedef Elf_Hash_Impl<ELFType<E, Is64>> Hash;
|
||||
typedef Elf_GnuHash_Impl<ELFType<E, Is64>> GnuHash;
|
||||
typedef Elf_Chdr_Impl<ELFType<E, Is64>> Chdr;
|
||||
typedef ArrayRef<Dyn> DynRange;
|
||||
typedef ArrayRef<Shdr> ShdrRange;
|
||||
typedef ArrayRef<Sym> SymRange;
|
||||
typedef ArrayRef<Rel> RelRange;
|
||||
typedef ArrayRef<Rela> RelaRange;
|
||||
typedef ArrayRef<Phdr> PhdrRange;
|
||||
using uint = typename std::conditional<Is64, uint64_t, uint32_t>::type;
|
||||
using Ehdr = Elf_Ehdr_Impl<ELFType<E, Is64>>;
|
||||
using Shdr = Elf_Shdr_Impl<ELFType<E, Is64>>;
|
||||
using Sym = Elf_Sym_Impl<ELFType<E, Is64>>;
|
||||
using Dyn = Elf_Dyn_Impl<ELFType<E, Is64>>;
|
||||
using Phdr = Elf_Phdr_Impl<ELFType<E, Is64>>;
|
||||
using Rel = Elf_Rel_Impl<ELFType<E, Is64>, false>;
|
||||
using Rela = Elf_Rel_Impl<ELFType<E, Is64>, true>;
|
||||
using Verdef = Elf_Verdef_Impl<ELFType<E, Is64>>;
|
||||
using Verdaux = Elf_Verdaux_Impl<ELFType<E, Is64>>;
|
||||
using Verneed = Elf_Verneed_Impl<ELFType<E, Is64>>;
|
||||
using Vernaux = Elf_Vernaux_Impl<ELFType<E, Is64>>;
|
||||
using Versym = Elf_Versym_Impl<ELFType<E, Is64>>;
|
||||
using Hash = Elf_Hash_Impl<ELFType<E, Is64>>;
|
||||
using GnuHash = Elf_GnuHash_Impl<ELFType<E, Is64>>;
|
||||
using Chdr = Elf_Chdr_Impl<ELFType<E, Is64>>;
|
||||
using DynRange = ArrayRef<Dyn>;
|
||||
using ShdrRange = ArrayRef<Shdr>;
|
||||
using SymRange = ArrayRef<Sym>;
|
||||
using RelRange = ArrayRef<Rel>;
|
||||
using RelaRange = ArrayRef<Rela>;
|
||||
using PhdrRange = ArrayRef<Phdr>;
|
||||
|
||||
typedef packed<uint16_t> Half;
|
||||
typedef packed<uint32_t> Word;
|
||||
typedef packed<int32_t> Sword;
|
||||
typedef packed<uint64_t> Xword;
|
||||
typedef packed<int64_t> Sxword;
|
||||
typedef packed<uint> Addr;
|
||||
typedef packed<uint> Off;
|
||||
using Half = packed<uint16_t>;
|
||||
using Word = packed<uint32_t>;
|
||||
using Sword = packed<int32_t>;
|
||||
using Xword = packed<uint64_t>;
|
||||
using Sxword = packed<int64_t>;
|
||||
using Addr = packed<uint>;
|
||||
using Off = packed<uint>;
|
||||
};
|
||||
|
||||
typedef ELFType<support::little, false> ELF32LE;
|
||||
typedef ELFType<support::big, false> ELF32BE;
|
||||
typedef ELFType<support::little, true> ELF64LE;
|
||||
typedef ELFType<support::big, true> ELF64BE;
|
||||
using ELF32LE = ELFType<support::little, false>;
|
||||
using ELF32BE = ELFType<support::big, false>;
|
||||
using ELF64LE = ELFType<support::little, true>;
|
||||
using ELF64BE = ELFType<support::big, true>;
|
||||
|
||||
// Use an alignment of 2 for the typedefs since that is the worst case for
|
||||
// ELF files in archives.
|
||||
|
||||
// Templates to choose Elf_Addr and Elf_Off depending on is64Bits.
|
||||
template <endianness target_endianness> struct ELFDataTypeTypedefHelperCommon {
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
uint16_t, target_endianness, 2> Elf_Half;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
uint32_t, target_endianness, 2> Elf_Word;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
int32_t, target_endianness, 2> Elf_Sword;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
uint64_t, target_endianness, 2> Elf_Xword;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
int64_t, target_endianness, 2> Elf_Sxword;
|
||||
using Elf_Half = support::detail::packed_endian_specific_integral<
|
||||
uint16_t, target_endianness, 2>;
|
||||
using Elf_Word = support::detail::packed_endian_specific_integral<
|
||||
uint32_t, target_endianness, 2>;
|
||||
using Elf_Sword = support::detail::packed_endian_specific_integral<
|
||||
int32_t, target_endianness, 2>;
|
||||
using Elf_Xword = support::detail::packed_endian_specific_integral<
|
||||
uint64_t, target_endianness, 2>;
|
||||
using Elf_Sxword = support::detail::packed_endian_specific_integral<
|
||||
int64_t, target_endianness, 2>;
|
||||
};
|
||||
|
||||
template <class ELFT> struct ELFDataTypeTypedefHelper;
|
||||
@ -105,34 +110,34 @@ template <class ELFT> struct ELFDataTypeTypedefHelper;
|
||||
template <endianness TargetEndianness>
|
||||
struct ELFDataTypeTypedefHelper<ELFType<TargetEndianness, false>>
|
||||
: ELFDataTypeTypedefHelperCommon<TargetEndianness> {
|
||||
typedef uint32_t value_type;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2> Elf_Addr;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2> Elf_Off;
|
||||
using value_type = uint32_t;
|
||||
using Elf_Addr = support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2>;
|
||||
using Elf_Off = support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2>;
|
||||
};
|
||||
|
||||
/// ELF 64bit types.
|
||||
template <endianness TargetEndianness>
|
||||
struct ELFDataTypeTypedefHelper<ELFType<TargetEndianness, true>>
|
||||
: ELFDataTypeTypedefHelperCommon<TargetEndianness> {
|
||||
typedef uint64_t value_type;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2> Elf_Addr;
|
||||
typedef support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2> Elf_Off;
|
||||
using value_type = uint64_t;
|
||||
using Elf_Addr = support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2>;
|
||||
using Elf_Off = support::detail::packed_endian_specific_integral<
|
||||
value_type, TargetEndianness, 2>;
|
||||
};
|
||||
|
||||
// I really don't like doing this, but the alternative is copypasta.
|
||||
|
||||
#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) \
|
||||
typedef typename ELFT::Addr Elf_Addr; \
|
||||
typedef typename ELFT::Off Elf_Off; \
|
||||
typedef typename ELFT::Half Elf_Half; \
|
||||
typedef typename ELFT::Word Elf_Word; \
|
||||
typedef typename ELFT::Sword Elf_Sword; \
|
||||
typedef typename ELFT::Xword Elf_Xword; \
|
||||
typedef typename ELFT::Sxword Elf_Sxword;
|
||||
using Elf_Addr = typename ELFT::Addr; \
|
||||
using Elf_Off = typename ELFT::Off; \
|
||||
using Elf_Half = typename ELFT::Half; \
|
||||
using Elf_Word = typename ELFT::Word; \
|
||||
using Elf_Sword = typename ELFT::Sword; \
|
||||
using Elf_Xword = typename ELFT::Xword; \
|
||||
using Elf_Sxword = typename ELFT::Sxword;
|
||||
|
||||
#define LLD_ELF_COMMA ,
|
||||
#define LLVM_ELF_IMPORT_TYPES(E, W) \
|
||||
@ -222,6 +227,7 @@ struct Elf_Sym_Impl : Elf_Sym_Base<ELFT> {
|
||||
uint64_t getValue() const { return st_value; }
|
||||
void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
|
||||
void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
|
||||
|
||||
void setBindingAndType(unsigned char b, unsigned char t) {
|
||||
st_info = (b << 4) + (t & 0x0f);
|
||||
}
|
||||
@ -238,22 +244,29 @@ struct Elf_Sym_Impl : Elf_Sym_Base<ELFT> {
|
||||
}
|
||||
|
||||
bool isAbsolute() const { return st_shndx == ELF::SHN_ABS; }
|
||||
|
||||
bool isCommon() const {
|
||||
return getType() == ELF::STT_COMMON || st_shndx == ELF::SHN_COMMON;
|
||||
}
|
||||
|
||||
bool isDefined() const { return !isUndefined(); }
|
||||
|
||||
bool isProcessorSpecific() const {
|
||||
return st_shndx >= ELF::SHN_LOPROC && st_shndx <= ELF::SHN_HIPROC;
|
||||
}
|
||||
|
||||
bool isOSSpecific() const {
|
||||
return st_shndx >= ELF::SHN_LOOS && st_shndx <= ELF::SHN_HIOS;
|
||||
}
|
||||
|
||||
bool isReserved() const {
|
||||
// ELF::SHN_HIRESERVE is 0xffff so st_shndx <= ELF::SHN_HIRESERVE is always
|
||||
// true and some compilers warn about it.
|
||||
return st_shndx >= ELF::SHN_LORESERVE;
|
||||
}
|
||||
|
||||
bool isUndefined() const { return st_shndx == ELF::SHN_UNDEF; }
|
||||
|
||||
bool isExternal() const {
|
||||
return getBinding() != ELF::STB_LOCAL;
|
||||
}
|
||||
@ -277,14 +290,12 @@ struct Elf_Versym_Impl {
|
||||
Elf_Half vs_index; // Version index with flags (e.g. VERSYM_HIDDEN)
|
||||
};
|
||||
|
||||
template <class ELFT> struct Elf_Verdaux_Impl;
|
||||
|
||||
/// Elf_Verdef: This is the structure of entries in the SHT_GNU_verdef section
|
||||
/// (.gnu.version_d). This structure is identical for ELF32 and ELF64.
|
||||
template <class ELFT>
|
||||
struct Elf_Verdef_Impl {
|
||||
LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
|
||||
typedef Elf_Verdaux_Impl<ELFT> Elf_Verdaux;
|
||||
using Elf_Verdaux = Elf_Verdaux_Impl<ELFT>;
|
||||
Elf_Half vd_version; // Version of this structure (e.g. VER_DEF_CURRENT)
|
||||
Elf_Half vd_flags; // Bitwise flags (VER_DEF_*)
|
||||
Elf_Half vd_ndx; // Version index, used in .gnu.version entries
|
||||
@ -361,10 +372,10 @@ template <class ELFT>
|
||||
struct Elf_Dyn_Impl : Elf_Dyn_Base<ELFT> {
|
||||
using Elf_Dyn_Base<ELFT>::d_tag;
|
||||
using Elf_Dyn_Base<ELFT>::d_un;
|
||||
typedef typename std::conditional<ELFT::Is64Bits,
|
||||
int64_t, int32_t>::type intX_t;
|
||||
typedef typename std::conditional<ELFT::Is64Bits,
|
||||
uint64_t, uint32_t>::type uintX_t;
|
||||
using intX_t = typename std::conditional<ELFT::Is64Bits,
|
||||
int64_t, int32_t>::type;
|
||||
using uintX_t = typename std::conditional<ELFT::Is64Bits,
|
||||
uint64_t, uint32_t>::type;
|
||||
intX_t getTag() const { return d_tag; }
|
||||
uintX_t getVal() const { return d_un.d_val; }
|
||||
uintX_t getPtr() const { return d_un.d_ptr; }
|
||||
@ -430,6 +441,7 @@ struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
|
||||
return (t << 32) | ((t >> 8) & 0xff000000) | ((t >> 24) & 0x00ff0000) |
|
||||
((t >> 40) & 0x0000ff00) | ((t >> 56) & 0x000000ff);
|
||||
}
|
||||
|
||||
void setRInfo(uint64_t R, bool IsMips64EL) {
|
||||
if (IsMips64EL)
|
||||
r_info = (R >> 32) | ((R & 0xff000000) << 8) | ((R & 0x00ff0000) << 24) |
|
||||
@ -483,15 +495,15 @@ struct Elf_Ehdr_Impl {
|
||||
Elf_Half e_shnum; // Number of entries in the section header table
|
||||
Elf_Half e_shstrndx; // Section header table index of section name
|
||||
// string table
|
||||
|
||||
bool checkMagic() const {
|
||||
return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
|
||||
}
|
||||
|
||||
unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
|
||||
unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
|
||||
};
|
||||
|
||||
template <class ELFT> struct Elf_Phdr_Impl;
|
||||
|
||||
template <endianness TargetEndianness>
|
||||
struct Elf_Phdr_Impl<ELFType<TargetEndianness, false>> {
|
||||
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
|
||||
@ -582,7 +594,7 @@ struct Elf_Chdr_Impl<ELFType<TargetEndianness, true>> {
|
||||
template <class ELFT>
|
||||
struct Elf_Mips_RegInfo;
|
||||
|
||||
template <llvm::support::endianness TargetEndianness>
|
||||
template <support::endianness TargetEndianness>
|
||||
struct Elf_Mips_RegInfo<ELFType<TargetEndianness, false>> {
|
||||
LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
|
||||
Elf_Word ri_gprmask; // bit-mask of used general registers
|
||||
@ -590,7 +602,7 @@ struct Elf_Mips_RegInfo<ELFType<TargetEndianness, false>> {
|
||||
Elf_Addr ri_gp_value; // gp register value
|
||||
};
|
||||
|
||||
template <llvm::support::endianness TargetEndianness>
|
||||
template <support::endianness TargetEndianness>
|
||||
struct Elf_Mips_RegInfo<ELFType<TargetEndianness, true>> {
|
||||
LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
|
||||
Elf_Word ri_gprmask; // bit-mask of used general registers
|
||||
@ -609,7 +621,7 @@ template <class ELFT> struct Elf_Mips_Options {
|
||||
Elf_Word info; // Kind-specific information
|
||||
|
||||
Elf_Mips_RegInfo<ELFT> &getRegInfo() {
|
||||
assert(kind == llvm::ELF::ODK_REGINFO);
|
||||
assert(kind == ELF::ODK_REGINFO);
|
||||
return *reinterpret_cast<Elf_Mips_RegInfo<ELFT> *>(
|
||||
(uint8_t *)this + sizeof(Elf_Mips_Options));
|
||||
}
|
||||
@ -637,4 +649,4 @@ template <class ELFT> struct Elf_Mips_ABIFlags {
|
||||
} // end namespace object.
|
||||
} // end namespace llvm.
|
||||
|
||||
#endif
|
||||
#endif // LLVM_OBJECT_ELFTYPES_H
|
||||
|
@ -25,23 +25,31 @@
|
||||
#define LLVM_OBJECT_IRSYMTAB_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/iterator_range.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/IR/GlobalValue.h"
|
||||
#include "llvm/Object/SymbolicFile.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
namespace irsymtab {
|
||||
|
||||
namespace storage {
|
||||
|
||||
// The data structures in this namespace define the low-level serialization
|
||||
// format. Clients that just want to read a symbol table should use the
|
||||
// irsymtab::Reader class.
|
||||
|
||||
typedef support::ulittle32_t Word;
|
||||
using Word = support::ulittle32_t;
|
||||
|
||||
/// A reference to a string in the string table.
|
||||
struct Str {
|
||||
Word Offset, Size;
|
||||
|
||||
StringRef get(StringRef Strtab) const {
|
||||
return {Strtab.data() + Offset, Size};
|
||||
}
|
||||
@ -50,6 +58,7 @@ struct Str {
|
||||
/// A reference to a range of objects in the symbol table.
|
||||
template <typename T> struct Range {
|
||||
Word Offset, Size;
|
||||
|
||||
ArrayRef<T> get(StringRef Symtab) const {
|
||||
return {reinterpret_cast<const T *>(Symtab.data() + Offset), Size};
|
||||
}
|
||||
@ -122,7 +131,7 @@ struct Header {
|
||||
Str COFFLinkerOpts;
|
||||
};
|
||||
|
||||
}
|
||||
} // end namespace storage
|
||||
|
||||
/// Fills in Symtab and Strtab with a valid symbol and string table for Mods.
|
||||
Error build(ArrayRef<Module *> Mods, SmallVector<char, 0> &Symtab,
|
||||
@ -152,18 +161,22 @@ struct Symbol {
|
||||
int getComdatIndex() const { return ComdatIndex; }
|
||||
|
||||
using S = storage::Symbol;
|
||||
|
||||
GlobalValue::VisibilityTypes getVisibility() const {
|
||||
return GlobalValue::VisibilityTypes((Flags >> S::FB_visibility) & 3);
|
||||
}
|
||||
|
||||
bool isUndefined() const { return (Flags >> S::FB_undefined) & 1; }
|
||||
bool isWeak() const { return (Flags >> S::FB_weak) & 1; }
|
||||
bool isCommon() const { return (Flags >> S::FB_common) & 1; }
|
||||
bool isIndirect() const { return (Flags >> S::FB_indirect) & 1; }
|
||||
bool isUsed() const { return (Flags >> S::FB_used) & 1; }
|
||||
bool isTLS() const { return (Flags >> S::FB_tls) & 1; }
|
||||
|
||||
bool canBeOmittedFromSymbolTable() const {
|
||||
return (Flags >> S::FB_may_omit) & 1;
|
||||
}
|
||||
|
||||
bool isGlobal() const { return (Flags >> S::FB_global) & 1; }
|
||||
bool isFormatSpecific() const { return (Flags >> S::FB_format_specific) & 1; }
|
||||
bool isUnnamedAddr() const { return (Flags >> S::FB_unnamed_addr) & 1; }
|
||||
@ -173,6 +186,7 @@ struct Symbol {
|
||||
assert(isCommon());
|
||||
return CommonSize;
|
||||
}
|
||||
|
||||
uint32_t getCommonAlignment() const {
|
||||
assert(isCommon());
|
||||
return CommonAlign;
|
||||
@ -197,9 +211,11 @@ class Reader {
|
||||
ArrayRef<storage::Uncommon> Uncommons;
|
||||
|
||||
StringRef str(storage::Str S) const { return S.get(Strtab); }
|
||||
|
||||
template <typename T> ArrayRef<T> range(storage::Range<T> R) const {
|
||||
return R.get(Symtab);
|
||||
}
|
||||
|
||||
const storage::Header &header() const {
|
||||
return *reinterpret_cast<const storage::Header *>(Symtab.data());
|
||||
}
|
||||
@ -215,7 +231,7 @@ class Reader {
|
||||
Uncommons = range(header().Uncommons);
|
||||
}
|
||||
|
||||
typedef iterator_range<object::content_iterator<SymbolRef>> symbol_range;
|
||||
using symbol_range = iterator_range<object::content_iterator<SymbolRef>>;
|
||||
|
||||
/// Returns the symbol table for the entire bitcode file.
|
||||
/// The symbols enumerated by this method are ephemeral, but they can be
|
||||
@ -298,8 +314,7 @@ inline Reader::symbol_range Reader::module_symbols(unsigned I) const {
|
||||
SymbolRef(MEnd, MEnd, nullptr, this)};
|
||||
}
|
||||
|
||||
}
|
||||
} // end namespace irsymtab
|
||||
} // end namespace llvm
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif // LLVM_OBJECT_IRSYMTAB_H
|
||||
|
@ -16,10 +16,25 @@
|
||||
#define LLVM_OBJECT_MACHO_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/iterator_range.h"
|
||||
#include "llvm/ADT/SmallString.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/StringExtras.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/ADT/Triple.h"
|
||||
#include "llvm/MC/SubtargetFeature.h"
|
||||
#include "llvm/Object/Binary.h"
|
||||
#include "llvm/Object/ObjectFile.h"
|
||||
#include "llvm/Object/SymbolicFile.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include "llvm/Support/Format.h"
|
||||
#include "llvm/Support/MachO.h"
|
||||
#include "llvm/Support/MemoryBuffer.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <system_error>
|
||||
|
||||
namespace llvm {
|
||||
namespace object {
|
||||
@ -28,11 +43,10 @@ namespace object {
|
||||
/// data in code entry in the table in a Mach-O object file.
|
||||
class DiceRef {
|
||||
DataRefImpl DicePimpl;
|
||||
const ObjectFile *OwningObject;
|
||||
const ObjectFile *OwningObject = nullptr;
|
||||
|
||||
public:
|
||||
DiceRef() : OwningObject(nullptr) { }
|
||||
|
||||
DiceRef() = default;
|
||||
DiceRef(DataRefImpl DiceP, const ObjectFile *Owner);
|
||||
|
||||
bool operator==(const DiceRef &Other) const;
|
||||
@ -47,7 +61,7 @@ class DiceRef {
|
||||
DataRefImpl getRawDataRefImpl() const;
|
||||
const ObjectFile *getObjectFile() const;
|
||||
};
|
||||
typedef content_iterator<DiceRef> dice_iterator;
|
||||
using dice_iterator = content_iterator<DiceRef>;
|
||||
|
||||
/// ExportEntry encapsulates the current-state-of-the-walk used when doing a
|
||||
/// non-recursive walk of the trie data structure. This allows you to iterate
|
||||
@ -71,6 +85,7 @@ class ExportEntry {
|
||||
|
||||
private:
|
||||
friend class MachOObjectFile;
|
||||
|
||||
void moveToFirst();
|
||||
void moveToEnd();
|
||||
uint64_t readULEB128(const uint8_t *&p);
|
||||
@ -80,25 +95,26 @@ class ExportEntry {
|
||||
// Represents a node in the mach-o exports trie.
|
||||
struct NodeState {
|
||||
NodeState(const uint8_t *Ptr);
|
||||
|
||||
const uint8_t *Start;
|
||||
const uint8_t *Current;
|
||||
uint64_t Flags;
|
||||
uint64_t Address;
|
||||
uint64_t Other;
|
||||
const char *ImportName;
|
||||
unsigned ChildCount;
|
||||
unsigned NextChildIndex;
|
||||
unsigned ParentStringLength;
|
||||
bool IsExportNode;
|
||||
uint64_t Flags = 0;
|
||||
uint64_t Address = 0;
|
||||
uint64_t Other = 0;
|
||||
const char *ImportName = nullptr;
|
||||
unsigned ChildCount = 0;
|
||||
unsigned NextChildIndex = 0;
|
||||
unsigned ParentStringLength = 0;
|
||||
bool IsExportNode = false;
|
||||
};
|
||||
|
||||
ArrayRef<uint8_t> Trie;
|
||||
SmallString<256> CumulativeString;
|
||||
SmallVector<NodeState, 16> Stack;
|
||||
bool Malformed;
|
||||
bool Done;
|
||||
bool Malformed = false;
|
||||
bool Done = false;
|
||||
};
|
||||
typedef content_iterator<ExportEntry> export_iterator;
|
||||
using export_iterator = content_iterator<ExportEntry>;
|
||||
|
||||
// Segment info so SegIndex/SegOffset pairs in a Mach-O Bind or Rebase entry
|
||||
// can be checked and translated. Only the SegIndex/SegOffset pairs from
|
||||
@ -106,7 +122,7 @@ typedef content_iterator<ExportEntry> export_iterator;
|
||||
// address() methods below.
|
||||
class BindRebaseSegInfo {
|
||||
public:
|
||||
BindRebaseSegInfo(const object::MachOObjectFile *Obj);
|
||||
BindRebaseSegInfo(const MachOObjectFile *Obj);
|
||||
|
||||
// Used to check a Mach-O Bind or Rebase entry for errors when iterating.
|
||||
const char *checkSegAndOffset(int32_t SegIndex, uint64_t SegOffset,
|
||||
@ -130,6 +146,7 @@ class BindRebaseSegInfo {
|
||||
int32_t SegmentIndex;
|
||||
};
|
||||
const SectionInfo &findSection(int32_t SegIndex, uint64_t SegOffset);
|
||||
|
||||
SmallVector<SectionInfo, 32> Sections;
|
||||
int32_t MaxSegIndex;
|
||||
};
|
||||
@ -159,6 +176,7 @@ class MachORebaseEntry {
|
||||
|
||||
private:
|
||||
friend class MachOObjectFile;
|
||||
|
||||
void moveToFirst();
|
||||
void moveToEnd();
|
||||
uint64_t readULEB128(const char **error);
|
||||
@ -167,15 +185,15 @@ class MachORebaseEntry {
|
||||
const MachOObjectFile *O;
|
||||
ArrayRef<uint8_t> Opcodes;
|
||||
const uint8_t *Ptr;
|
||||
uint64_t SegmentOffset;
|
||||
int32_t SegmentIndex;
|
||||
uint64_t RemainingLoopCount;
|
||||
uint64_t AdvanceAmount;
|
||||
uint8_t RebaseType;
|
||||
uint64_t SegmentOffset = 0;
|
||||
int32_t SegmentIndex = -1;
|
||||
uint64_t RemainingLoopCount = 0;
|
||||
uint64_t AdvanceAmount = 0;
|
||||
uint8_t RebaseType = 0;
|
||||
uint8_t PointerSize;
|
||||
bool Done;
|
||||
bool Done = false;
|
||||
};
|
||||
typedef content_iterator<MachORebaseEntry> rebase_iterator;
|
||||
using rebase_iterator = content_iterator<MachORebaseEntry>;
|
||||
|
||||
/// MachOBindEntry encapsulates the current state in the decompression of
|
||||
/// binding opcodes. This allows you to iterate through the compressed table of
|
||||
@ -209,6 +227,7 @@ class MachOBindEntry {
|
||||
|
||||
private:
|
||||
friend class MachOObjectFile;
|
||||
|
||||
void moveToFirst();
|
||||
void moveToEnd();
|
||||
uint64_t readULEB128(const char **error);
|
||||
@ -218,21 +237,21 @@ class MachOBindEntry {
|
||||
const MachOObjectFile *O;
|
||||
ArrayRef<uint8_t> Opcodes;
|
||||
const uint8_t *Ptr;
|
||||
uint64_t SegmentOffset;
|
||||
int32_t SegmentIndex;
|
||||
uint64_t SegmentOffset = 0;
|
||||
int32_t SegmentIndex = -1;
|
||||
StringRef SymbolName;
|
||||
bool LibraryOrdinalSet;
|
||||
int Ordinal;
|
||||
uint32_t Flags;
|
||||
int64_t Addend;
|
||||
uint64_t RemainingLoopCount;
|
||||
uint64_t AdvanceAmount;
|
||||
uint8_t BindType;
|
||||
bool LibraryOrdinalSet = false;
|
||||
int Ordinal = 0;
|
||||
uint32_t Flags = 0;
|
||||
int64_t Addend = 0;
|
||||
uint64_t RemainingLoopCount = 0;
|
||||
uint64_t AdvanceAmount = 0;
|
||||
uint8_t BindType = 0;
|
||||
uint8_t PointerSize;
|
||||
Kind TableKind;
|
||||
bool Done;
|
||||
bool Done = false;
|
||||
};
|
||||
typedef content_iterator<MachOBindEntry> bind_iterator;
|
||||
using bind_iterator = content_iterator<MachOBindEntry>;
|
||||
|
||||
class MachOObjectFile : public ObjectFile {
|
||||
public:
|
||||
@ -240,8 +259,8 @@ class MachOObjectFile : public ObjectFile {
|
||||
const char *Ptr; // Where in memory the load command is.
|
||||
MachO::load_command C; // The command itself.
|
||||
};
|
||||
typedef SmallVector<LoadCommandInfo, 4> LoadCommandList;
|
||||
typedef LoadCommandList::const_iterator load_command_iterator;
|
||||
using LoadCommandList = SmallVector<LoadCommandInfo, 4>;
|
||||
using load_command_iterator = LoadCommandList::const_iterator;
|
||||
|
||||
static Expected<std::unique_ptr<MachOObjectFile>>
|
||||
create(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
|
||||
@ -563,7 +582,7 @@ class MachOObjectFile : public ObjectFile {
|
||||
case MachO::PLATFORM_BRIDGEOS: return "bridgeos";
|
||||
default:
|
||||
std::string ret;
|
||||
llvm::raw_string_ostream ss(ret);
|
||||
raw_string_ostream ss(ret);
|
||||
ss << format_hex(platform, 8, true);
|
||||
return ss.str();
|
||||
}
|
||||
@ -576,7 +595,7 @@ class MachOObjectFile : public ObjectFile {
|
||||
case MachO::TOOL_LD: return "ld";
|
||||
default:
|
||||
std::string ret;
|
||||
llvm::raw_string_ostream ss(ret);
|
||||
raw_string_ostream ss(ret);
|
||||
ss << format_hex(tools, 8, true);
|
||||
return ss.str();
|
||||
}
|
||||
@ -595,7 +614,6 @@ class MachOObjectFile : public ObjectFile {
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
|
||||
Error &Err, uint32_t UniversalCputype = 0,
|
||||
uint32_t UniversalIndex = 0);
|
||||
@ -606,23 +624,23 @@ class MachOObjectFile : public ObjectFile {
|
||||
MachO::mach_header_64 Header64;
|
||||
MachO::mach_header Header;
|
||||
};
|
||||
typedef SmallVector<const char*, 1> SectionList;
|
||||
using SectionList = SmallVector<const char*, 1>;
|
||||
SectionList Sections;
|
||||
typedef SmallVector<const char*, 1> LibraryList;
|
||||
using LibraryList = SmallVector<const char*, 1>;
|
||||
LibraryList Libraries;
|
||||
LoadCommandList LoadCommands;
|
||||
typedef SmallVector<StringRef, 1> LibraryShortName;
|
||||
using LibraryShortName = SmallVector<StringRef, 1>;
|
||||
using BuildToolList = SmallVector<const char*, 1>;
|
||||
BuildToolList BuildTools;
|
||||
mutable LibraryShortName LibrariesShortNames;
|
||||
std::unique_ptr<BindRebaseSegInfo> BindRebaseSectionTable;
|
||||
const char *SymtabLoadCmd;
|
||||
const char *DysymtabLoadCmd;
|
||||
const char *DataInCodeLoadCmd;
|
||||
const char *LinkOptHintsLoadCmd;
|
||||
const char *DyldInfoLoadCmd;
|
||||
const char *UuidLoadCmd;
|
||||
bool HasPageZeroSegment;
|
||||
const char *SymtabLoadCmd = nullptr;
|
||||
const char *DysymtabLoadCmd = nullptr;
|
||||
const char *DataInCodeLoadCmd = nullptr;
|
||||
const char *LinkOptHintsLoadCmd = nullptr;
|
||||
const char *DyldInfoLoadCmd = nullptr;
|
||||
const char *UuidLoadCmd = nullptr;
|
||||
bool HasPageZeroSegment = false;
|
||||
};
|
||||
|
||||
/// DiceRef
|
||||
@ -679,7 +697,7 @@ inline const ObjectFile *DiceRef::getObjectFile() const {
|
||||
return OwningObject;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
} // end namespace object
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_OBJECT_MACHO_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===- ModuleSummaryIndexObjectFile.h - Summary index file implementation -=//
|
||||
//===- ModuleSummaryIndexObjectFile.h - Summary index file implementation -===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -14,14 +14,22 @@
|
||||
#ifndef LLVM_OBJECT_MODULESUMMARYINDEXOBJECTFILE_H
|
||||
#define LLVM_OBJECT_MODULESUMMARYINDEXOBJECTFILE_H
|
||||
|
||||
#include "llvm/IR/DiagnosticInfo.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/Object/Binary.h"
|
||||
#include "llvm/Object/SymbolicFile.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/ErrorOr.h"
|
||||
#include "llvm/Support/MemoryBuffer.h"
|
||||
#include <memory>
|
||||
#include <system_error>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class ModuleSummaryIndex;
|
||||
class Module;
|
||||
|
||||
namespace object {
|
||||
|
||||
class ObjectFile;
|
||||
|
||||
/// This class is used to read just the module summary index related
|
||||
@ -41,15 +49,18 @@ class ModuleSummaryIndexObjectFile : public SymbolicFile {
|
||||
void moveSymbolNext(DataRefImpl &Symb) const override {
|
||||
llvm_unreachable("not implemented");
|
||||
}
|
||||
|
||||
std::error_code printSymbolName(raw_ostream &OS,
|
||||
DataRefImpl Symb) const override {
|
||||
llvm_unreachable("not implemented");
|
||||
return std::error_code();
|
||||
}
|
||||
|
||||
uint32_t getSymbolFlags(DataRefImpl Symb) const override {
|
||||
llvm_unreachable("not implemented");
|
||||
return 0;
|
||||
}
|
||||
|
||||
basic_symbol_iterator symbol_begin() const override {
|
||||
llvm_unreachable("not implemented");
|
||||
return basic_symbol_iterator(BasicSymbolRef());
|
||||
@ -85,7 +96,8 @@ class ModuleSummaryIndexObjectFile : public SymbolicFile {
|
||||
static Expected<std::unique_ptr<ModuleSummaryIndexObjectFile>>
|
||||
create(MemoryBufferRef Object);
|
||||
};
|
||||
}
|
||||
|
||||
} // end namespace object
|
||||
|
||||
/// Parse the module summary index out of an IR file and return the module
|
||||
/// summary index object if found, or nullptr if not. If Identifier is
|
||||
@ -94,6 +106,7 @@ class ModuleSummaryIndexObjectFile : public SymbolicFile {
|
||||
/// containing minimized bitcode just for the thin link.
|
||||
Expected<std::unique_ptr<ModuleSummaryIndex>>
|
||||
getModuleSummaryIndexForFile(StringRef Path, StringRef Identifier = "");
|
||||
}
|
||||
|
||||
#endif
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_OBJECT_MODULESUMMARYINDEXOBJECTFILE_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===- ModuleSymbolTable.h - symbol table for in-memory IR ----------------===//
|
||||
//===- ModuleSymbolTable.h - symbol table for in-memory IR ------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -16,22 +16,24 @@
|
||||
#ifndef LLVM_OBJECT_MODULESYMBOLTABLE_H
|
||||
#define LLVM_OBJECT_MODULESYMBOLTABLE_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/PointerUnion.h"
|
||||
#include "llvm/ADT/Triple.h"
|
||||
#include "llvm/IR/Mangler.h"
|
||||
#include "llvm/Object/SymbolicFile.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class GlobalValue;
|
||||
class RecordStreamer;
|
||||
|
||||
class ModuleSymbolTable {
|
||||
public:
|
||||
typedef std::pair<std::string, uint32_t> AsmSymbol;
|
||||
typedef PointerUnion<GlobalValue *, AsmSymbol *> Symbol;
|
||||
using AsmSymbol = std::pair<std::string, uint32_t>;
|
||||
using Symbol = PointerUnion<GlobalValue *, AsmSymbol *>;
|
||||
|
||||
private:
|
||||
Module *FirstMod = nullptr;
|
||||
@ -57,6 +59,6 @@ class ModuleSymbolTable {
|
||||
function_ref<void(StringRef, object::BasicSymbolRef::Flags)> AsmSymbol);
|
||||
};
|
||||
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_OBJECT_MODULESYMBOLTABLE_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-- RelocVisitor.h - Visitor for object file relocations -*- C++ -*-===//
|
||||
//===- RelocVisitor.h - Visitor for object file relocations -----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -16,34 +16,38 @@
|
||||
#ifndef LLVM_OBJECT_RELOCVISITOR_H
|
||||
#define LLVM_OBJECT_RELOCVISITOR_H
|
||||
|
||||
#include "llvm/ADT/Triple.h"
|
||||
#include "llvm/Object/COFF.h"
|
||||
#include "llvm/Object/ELFObjectFile.h"
|
||||
#include "llvm/Object/MachO.h"
|
||||
#include "llvm/Object/ObjectFile.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/Casting.h"
|
||||
#include "llvm/Support/ELF.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/ErrorOr.h"
|
||||
#include "llvm/Support/MachO.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include <cstdint>
|
||||
#include <system_error>
|
||||
|
||||
namespace llvm {
|
||||
namespace object {
|
||||
|
||||
struct RelocToApply {
|
||||
// The computed value after applying the relevant relocations.
|
||||
int64_t Value;
|
||||
int64_t Value = 0;
|
||||
|
||||
// The width of the value; how many bytes to touch when applying the
|
||||
// relocation.
|
||||
char Width;
|
||||
char Width = 0;
|
||||
|
||||
RelocToApply() = default;
|
||||
RelocToApply(int64_t Value, char Width) : Value(Value), Width(Width) {}
|
||||
RelocToApply() : Value(0), Width(0) {}
|
||||
};
|
||||
|
||||
/// @brief Base class for object file relocation visitors.
|
||||
class RelocVisitor {
|
||||
public:
|
||||
explicit RelocVisitor(const ObjectFile &Obj)
|
||||
: ObjToVisit(Obj), HasError(false) {}
|
||||
explicit RelocVisitor(const ObjectFile &Obj) : ObjToVisit(Obj) {}
|
||||
|
||||
// TODO: Should handle multiple applied relocations via either passing in the
|
||||
// previously computed value or just count paired relocations as a single
|
||||
@ -64,22 +68,22 @@ class RelocVisitor {
|
||||
|
||||
private:
|
||||
const ObjectFile &ObjToVisit;
|
||||
bool HasError;
|
||||
bool HasError = false;
|
||||
|
||||
RelocToApply visitELF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
|
||||
if (ObjToVisit.getBytesInAddress() == 8) { // 64-bit object file
|
||||
switch (ObjToVisit.getArch()) {
|
||||
case Triple::x86_64:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_X86_64_NONE:
|
||||
case ELF::R_X86_64_NONE:
|
||||
return visitELF_X86_64_NONE(R);
|
||||
case llvm::ELF::R_X86_64_64:
|
||||
case ELF::R_X86_64_64:
|
||||
return visitELF_X86_64_64(R, Value);
|
||||
case llvm::ELF::R_X86_64_PC32:
|
||||
case ELF::R_X86_64_PC32:
|
||||
return visitELF_X86_64_PC32(R, Value);
|
||||
case llvm::ELF::R_X86_64_32:
|
||||
case ELF::R_X86_64_32:
|
||||
return visitELF_X86_64_32(R, Value);
|
||||
case llvm::ELF::R_X86_64_32S:
|
||||
case ELF::R_X86_64_32S:
|
||||
return visitELF_X86_64_32S(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -88,9 +92,9 @@ class RelocVisitor {
|
||||
case Triple::aarch64:
|
||||
case Triple::aarch64_be:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_AARCH64_ABS32:
|
||||
case ELF::R_AARCH64_ABS32:
|
||||
return visitELF_AARCH64_ABS32(R, Value);
|
||||
case llvm::ELF::R_AARCH64_ABS64:
|
||||
case ELF::R_AARCH64_ABS64:
|
||||
return visitELF_AARCH64_ABS64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -99,9 +103,9 @@ class RelocVisitor {
|
||||
case Triple::bpfel:
|
||||
case Triple::bpfeb:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_BPF_64_64:
|
||||
case ELF::R_BPF_64_64:
|
||||
return visitELF_BPF_64_64(R, Value);
|
||||
case llvm::ELF::R_BPF_64_32:
|
||||
case ELF::R_BPF_64_32:
|
||||
return visitELF_BPF_64_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -110,9 +114,9 @@ class RelocVisitor {
|
||||
case Triple::mips64el:
|
||||
case Triple::mips64:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_MIPS_32:
|
||||
case ELF::R_MIPS_32:
|
||||
return visitELF_MIPS64_32(R, Value);
|
||||
case llvm::ELF::R_MIPS_64:
|
||||
case ELF::R_MIPS_64:
|
||||
return visitELF_MIPS64_64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -121,9 +125,9 @@ class RelocVisitor {
|
||||
case Triple::ppc64le:
|
||||
case Triple::ppc64:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_PPC64_ADDR32:
|
||||
case ELF::R_PPC64_ADDR32:
|
||||
return visitELF_PPC64_ADDR32(R, Value);
|
||||
case llvm::ELF::R_PPC64_ADDR64:
|
||||
case ELF::R_PPC64_ADDR64:
|
||||
return visitELF_PPC64_ADDR64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -131,9 +135,9 @@ class RelocVisitor {
|
||||
}
|
||||
case Triple::systemz:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_390_32:
|
||||
case ELF::R_390_32:
|
||||
return visitELF_390_32(R, Value);
|
||||
case llvm::ELF::R_390_64:
|
||||
case ELF::R_390_64:
|
||||
return visitELF_390_64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -141,11 +145,11 @@ class RelocVisitor {
|
||||
}
|
||||
case Triple::sparcv9:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_SPARC_32:
|
||||
case llvm::ELF::R_SPARC_UA32:
|
||||
case ELF::R_SPARC_32:
|
||||
case ELF::R_SPARC_UA32:
|
||||
return visitELF_SPARCV9_32(R, Value);
|
||||
case llvm::ELF::R_SPARC_64:
|
||||
case llvm::ELF::R_SPARC_UA64:
|
||||
case ELF::R_SPARC_64:
|
||||
case ELF::R_SPARC_UA64:
|
||||
return visitELF_SPARCV9_64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -153,9 +157,9 @@ class RelocVisitor {
|
||||
}
|
||||
case Triple::amdgcn:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_AMDGPU_ABS32:
|
||||
case ELF::R_AMDGPU_ABS32:
|
||||
return visitELF_AMDGPU_ABS32(R, Value);
|
||||
case llvm::ELF::R_AMDGPU_ABS64:
|
||||
case ELF::R_AMDGPU_ABS64:
|
||||
return visitELF_AMDGPU_ABS64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -169,11 +173,11 @@ class RelocVisitor {
|
||||
switch (ObjToVisit.getArch()) {
|
||||
case Triple::x86:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_386_NONE:
|
||||
case ELF::R_386_NONE:
|
||||
return visitELF_386_NONE(R);
|
||||
case llvm::ELF::R_386_32:
|
||||
case ELF::R_386_32:
|
||||
return visitELF_386_32(R, Value);
|
||||
case llvm::ELF::R_386_PC32:
|
||||
case ELF::R_386_PC32:
|
||||
return visitELF_386_PC32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -181,7 +185,7 @@ class RelocVisitor {
|
||||
}
|
||||
case Triple::ppc:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_PPC_ADDR32:
|
||||
case ELF::R_PPC_ADDR32:
|
||||
return visitELF_PPC_ADDR32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -193,12 +197,12 @@ class RelocVisitor {
|
||||
default:
|
||||
HasError = true;
|
||||
return RelocToApply();
|
||||
case llvm::ELF::R_ARM_ABS32:
|
||||
case ELF::R_ARM_ABS32:
|
||||
return visitELF_ARM_ABS32(R, Value);
|
||||
}
|
||||
case Triple::lanai:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_LANAI_32:
|
||||
case ELF::R_LANAI_32:
|
||||
return visitELF_Lanai_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -207,7 +211,7 @@ class RelocVisitor {
|
||||
case Triple::mipsel:
|
||||
case Triple::mips:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_MIPS_32:
|
||||
case ELF::R_MIPS_32:
|
||||
return visitELF_MIPS_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -215,8 +219,8 @@ class RelocVisitor {
|
||||
}
|
||||
case Triple::sparc:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_SPARC_32:
|
||||
case llvm::ELF::R_SPARC_UA32:
|
||||
case ELF::R_SPARC_32:
|
||||
case ELF::R_SPARC_UA32:
|
||||
return visitELF_SPARC_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -224,7 +228,7 @@ class RelocVisitor {
|
||||
}
|
||||
case Triple::hexagon:
|
||||
switch (RelocType) {
|
||||
case llvm::ELF::R_HEX_32:
|
||||
case ELF::R_HEX_32:
|
||||
return visitELF_HEX_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
@ -483,6 +487,7 @@ class RelocVisitor {
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
} // end namespace object
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_OBJECT_RELOCVISITOR_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-------- StackMapParser.h - StackMap Parsing Support -------*- C++ -*-===//
|
||||
//===- StackMapParser.h - StackMap Parsing Support --------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -11,7 +11,11 @@
|
||||
#define LLVM_CODEGEN_STACKMAPPARSER_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/iterator_range.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
@ -19,12 +23,11 @@ namespace llvm {
|
||||
template <support::endianness Endianness>
|
||||
class StackMapV2Parser {
|
||||
public:
|
||||
|
||||
template <typename AccessorT>
|
||||
class AccessorIterator {
|
||||
public:
|
||||
|
||||
AccessorIterator(AccessorT A) : A(A) {}
|
||||
|
||||
AccessorIterator& operator++() { A = A.next(); return *this; }
|
||||
AccessorIterator operator++(int) {
|
||||
auto tmp = *this;
|
||||
@ -48,8 +51,8 @@ class StackMapV2Parser {
|
||||
/// Accessor for function records.
|
||||
class FunctionAccessor {
|
||||
friend class StackMapV2Parser;
|
||||
public:
|
||||
|
||||
public:
|
||||
/// Get the function address.
|
||||
uint64_t getFunctionAddress() const {
|
||||
return read<uint64_t>(P);
|
||||
@ -80,13 +83,12 @@ class StackMapV2Parser {
|
||||
/// Accessor for constants.
|
||||
class ConstantAccessor {
|
||||
friend class StackMapV2Parser;
|
||||
public:
|
||||
|
||||
public:
|
||||
/// Return the value of this constant.
|
||||
uint64_t getValue() const { return read<uint64_t>(P); }
|
||||
|
||||
private:
|
||||
|
||||
ConstantAccessor(const uint8_t *P) : P(P) {}
|
||||
|
||||
const static int ConstantAccessorSize = sizeof(uint64_t);
|
||||
@ -98,20 +100,16 @@ class StackMapV2Parser {
|
||||
const uint8_t *P;
|
||||
};
|
||||
|
||||
// Forward-declare RecordAccessor so we can friend it below.
|
||||
class RecordAccessor;
|
||||
|
||||
enum class LocationKind : uint8_t {
|
||||
Register = 1, Direct = 2, Indirect = 3, Constant = 4, ConstantIndex = 5
|
||||
};
|
||||
|
||||
|
||||
/// Accessor for location records.
|
||||
class LocationAccessor {
|
||||
friend class StackMapV2Parser;
|
||||
friend class RecordAccessor;
|
||||
public:
|
||||
|
||||
public:
|
||||
/// Get the Kind for this location.
|
||||
LocationKind getKind() const {
|
||||
return LocationKind(P[KindOffset]);
|
||||
@ -144,7 +142,6 @@ class StackMapV2Parser {
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
LocationAccessor(const uint8_t *P) : P(P) {}
|
||||
|
||||
LocationAccessor next() const {
|
||||
@ -163,8 +160,8 @@ class StackMapV2Parser {
|
||||
class LiveOutAccessor {
|
||||
friend class StackMapV2Parser;
|
||||
friend class RecordAccessor;
|
||||
public:
|
||||
|
||||
public:
|
||||
/// Get the Dwarf register number for this live-out.
|
||||
uint16_t getDwarfRegNum() const {
|
||||
return read<uint16_t>(P + DwarfRegNumOffset);
|
||||
@ -176,7 +173,6 @@ class StackMapV2Parser {
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
LiveOutAccessor(const uint8_t *P) : P(P) {}
|
||||
|
||||
LiveOutAccessor next() const {
|
||||
@ -194,10 +190,10 @@ class StackMapV2Parser {
|
||||
/// Accessor for stackmap records.
|
||||
class RecordAccessor {
|
||||
friend class StackMapV2Parser;
|
||||
public:
|
||||
|
||||
typedef AccessorIterator<LocationAccessor> location_iterator;
|
||||
typedef AccessorIterator<LiveOutAccessor> liveout_iterator;
|
||||
public:
|
||||
using location_iterator = AccessorIterator<LocationAccessor>;
|
||||
using liveout_iterator = AccessorIterator<LiveOutAccessor>;
|
||||
|
||||
/// Get the patchpoint/stackmap ID for this record.
|
||||
uint64_t getID() const {
|
||||
@ -254,7 +250,6 @@ class StackMapV2Parser {
|
||||
return liveout_iterator(getLiveOut(0));
|
||||
}
|
||||
|
||||
|
||||
/// End iterator for live-outs.
|
||||
liveout_iterator liveouts_end() const {
|
||||
return liveout_iterator(getLiveOut(getNumLiveOuts()));
|
||||
@ -266,7 +261,6 @@ class StackMapV2Parser {
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
RecordAccessor(const uint8_t *P) : P(P) {}
|
||||
|
||||
unsigned getNumLiveOutsOffset() const {
|
||||
@ -316,9 +310,9 @@ class StackMapV2Parser {
|
||||
}
|
||||
}
|
||||
|
||||
typedef AccessorIterator<FunctionAccessor> function_iterator;
|
||||
typedef AccessorIterator<ConstantAccessor> constant_iterator;
|
||||
typedef AccessorIterator<RecordAccessor> record_iterator;
|
||||
using function_iterator = AccessorIterator<FunctionAccessor>;
|
||||
using constant_iterator = AccessorIterator<ConstantAccessor>;
|
||||
using record_iterator = AccessorIterator<RecordAccessor>;
|
||||
|
||||
/// Get the version number of this stackmap. (Always returns 2).
|
||||
unsigned getVersion() const { return 2; }
|
||||
@ -413,7 +407,6 @@ class StackMapV2Parser {
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
template <typename T>
|
||||
static T read(const uint8_t *P) {
|
||||
return support::endian::read<T, Endianness, 1>(P);
|
||||
@ -441,6 +434,6 @@ class StackMapV2Parser {
|
||||
std::vector<unsigned> StackMapRecordOffsets;
|
||||
};
|
||||
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_CODEGEN_STACKMAPPARSER_H
|
||||
|
@ -17,6 +17,8 @@
|
||||
#ifndef LLVM_OBJECT_WASM_H
|
||||
#define LLVM_OBJECT_WASM_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/Object/Binary.h"
|
||||
#include "llvm/Object/ObjectFile.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
@ -47,10 +49,10 @@ class WasmSymbol {
|
||||
|
||||
class WasmSection {
|
||||
public:
|
||||
WasmSection() : Type(0), Offset(0) {}
|
||||
WasmSection() = default;
|
||||
|
||||
uint32_t Type; // Section type (See below)
|
||||
uint32_t Offset; // Offset with in the file
|
||||
uint32_t Type = 0; // Section type (See below)
|
||||
uint32_t Offset = 0; // Offset with in the file
|
||||
StringRef Name; // Section name (User-defined sections only)
|
||||
ArrayRef<uint8_t> Content; // Section content
|
||||
std::vector<wasm::WasmRelocation> Relocations; // Relocations for this section
|
||||
@ -74,12 +76,15 @@ class WasmObjectFile : public ObjectFile {
|
||||
const std::vector<wasm::WasmLimits>& memories() const { return Memories; }
|
||||
const std::vector<wasm::WasmGlobal>& globals() const { return Globals; }
|
||||
const std::vector<wasm::WasmExport>& exports() const { return Exports; }
|
||||
|
||||
const std::vector<wasm::WasmElemSegment>& elements() const {
|
||||
return ElemSegments;
|
||||
}
|
||||
|
||||
const std::vector<wasm::WasmDataSegment>& dataSegments() const {
|
||||
return DataSegments;
|
||||
}
|
||||
|
||||
const std::vector<wasm::WasmFunction>& functions() const { return Functions; }
|
||||
const ArrayRef<uint8_t>& code() const { return CodeSection; }
|
||||
uint32_t startFunction() const { return StartFunction; }
|
||||
@ -178,7 +183,7 @@ class WasmObjectFile : public ObjectFile {
|
||||
std::vector<WasmSymbol> Symbols;
|
||||
std::vector<wasm::WasmFunction> Functions;
|
||||
ArrayRef<uint8_t> CodeSection;
|
||||
uint32_t StartFunction;
|
||||
uint32_t StartFunction = -1;
|
||||
};
|
||||
|
||||
} // end namespace object
|
||||
|
@ -88,7 +88,7 @@ struct Relocation {
|
||||
RelocType Type;
|
||||
uint32_t Index;
|
||||
yaml::Hex32 Offset;
|
||||
yaml::Hex32 Addend;
|
||||
int32_t Addend;
|
||||
};
|
||||
|
||||
struct DataSegment {
|
||||
|
@ -79,14 +79,6 @@ inline StringRef getInstrProfValueRangeProfFuncName() {
|
||||
return INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR;
|
||||
}
|
||||
|
||||
/// Return the name of the section containing function coverage mapping
|
||||
/// data.
|
||||
std::string getInstrProfCoverageSectionName(const Module *M = nullptr);
|
||||
/// Similar to the above, but used by host tool (e.g, coverage) which has
|
||||
/// object format information. The section name returned is not prefixed
|
||||
/// with segment name.
|
||||
std::string getInstrProfCoverageSectionNameInObject(bool isCoff);
|
||||
|
||||
/// Return the name prefix of variables containing instrumented function names.
|
||||
inline StringRef getInstrProfNameVarPrefix() { return "__profn_"; }
|
||||
|
||||
|
@ -112,6 +112,13 @@ class BranchProbability {
|
||||
return *this;
|
||||
}
|
||||
|
||||
BranchProbability &operator*=(uint32_t RHS) {
|
||||
assert(N != UnknownN &&
|
||||
"Unknown probability cannot participate in arithmetics.");
|
||||
N = (uint64_t(N) * RHS > D) ? D : N * RHS;
|
||||
return *this;
|
||||
}
|
||||
|
||||
BranchProbability &operator/=(uint32_t RHS) {
|
||||
assert(N != UnknownN &&
|
||||
"Unknown probability cannot participate in arithmetics.");
|
||||
@ -135,6 +142,11 @@ class BranchProbability {
|
||||
return Prob *= RHS;
|
||||
}
|
||||
|
||||
BranchProbability operator*(uint32_t RHS) const {
|
||||
BranchProbability Prob(*this);
|
||||
return Prob *= RHS;
|
||||
}
|
||||
|
||||
BranchProbability operator/(uint32_t RHS) const {
|
||||
BranchProbability Prob(*this);
|
||||
return Prob /= RHS;
|
||||
|
@ -1,54 +0,0 @@
|
||||
# Figure out if we can track VC revisions.
|
||||
function(find_first_existing_file out_var)
|
||||
foreach(file ${ARGN})
|
||||
if(EXISTS "${file}")
|
||||
set(${out_var} "${file}" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
endfunction()
|
||||
|
||||
macro(find_first_existing_vc_file out_var path)
|
||||
find_program(git_executable NAMES git git.exe git.cmd)
|
||||
# Run from a subdirectory to force git to print an absolute path.
|
||||
execute_process(COMMAND ${git_executable} rev-parse --git-dir
|
||||
WORKING_DIRECTORY ${path}/cmake
|
||||
RESULT_VARIABLE git_result
|
||||
OUTPUT_VARIABLE git_dir)
|
||||
if(git_result EQUAL 0)
|
||||
string(STRIP "${git_dir}" git_dir)
|
||||
set(${out_var} "${git_dir}/logs/HEAD")
|
||||
else()
|
||||
find_first_existing_file(${out_var}
|
||||
"${path}/.svn/wc.db" # SVN 1.7
|
||||
"${path}/.svn/entries" # SVN 1.6
|
||||
)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
find_first_existing_vc_file(llvm_vc "${LLVM_MAIN_SRC_DIR}")
|
||||
|
||||
# The VC revision include that we want to generate.
|
||||
set(version_inc "${CMAKE_CURRENT_BINARY_DIR}/VCSRevision.h")
|
||||
|
||||
set(get_svn_script "${LLVM_CMAKE_PATH}/GenerateVersionFromCVS.cmake")
|
||||
|
||||
if(DEFINED llvm_vc)
|
||||
# Create custom target to generate the VC revision include.
|
||||
add_custom_command(OUTPUT "${version_inc}"
|
||||
DEPENDS "${llvm_vc}" "${get_svn_script}"
|
||||
COMMAND
|
||||
${CMAKE_COMMAND} "-DSOURCE_DIR=${LLVM_MAIN_SRC_DIR}"
|
||||
"-DNAME=LLVM_REVISION"
|
||||
"-DHEADER_FILE=${version_inc}"
|
||||
-P "${get_svn_script}")
|
||||
|
||||
# Mark the generated header as being generated.
|
||||
set_source_files_properties("${version_inc}"
|
||||
PROPERTIES GENERATED TRUE
|
||||
HEADER_FILE_ONLY TRUE)
|
||||
else()
|
||||
file(WRITE "${version_inc}" "")
|
||||
endif()
|
||||
|
||||
add_custom_target(llvm_vcsrevision_h DEPENDS "${version_inc}")
|
@ -116,7 +116,9 @@ inline perms &operator&=(perms &l, perms r) {
|
||||
return l;
|
||||
}
|
||||
inline perms operator~(perms x) {
|
||||
return static_cast<perms>(~static_cast<unsigned short>(x));
|
||||
// Avoid UB by explicitly truncating the (unsigned) ~ result.
|
||||
return static_cast<perms>(
|
||||
static_cast<unsigned short>(~static_cast<unsigned short>(x)));
|
||||
}
|
||||
|
||||
class UniqueID {
|
||||
|
@ -286,13 +286,13 @@ template <class NodeT> class DominatorTreeBase : public DominatorBase<NodeT> {
|
||||
NodeRef NewBBSucc = *GraphT::child_begin(NewBB);
|
||||
|
||||
std::vector<NodeRef> PredBlocks;
|
||||
for (const auto Pred : children<Inverse<N>>(NewBB))
|
||||
for (const auto &Pred : children<Inverse<N>>(NewBB))
|
||||
PredBlocks.push_back(Pred);
|
||||
|
||||
assert(!PredBlocks.empty() && "No predblocks?");
|
||||
|
||||
bool NewBBDominatesNewBBSucc = true;
|
||||
for (const auto Pred : children<Inverse<N>>(NewBBSucc)) {
|
||||
for (const auto &Pred : children<Inverse<N>>(NewBBSucc)) {
|
||||
if (Pred != NewBB && !dominates(NewBBSucc, Pred) &&
|
||||
isReachableFromEntry(Pred)) {
|
||||
NewBBDominatesNewBBSucc = false;
|
||||
|
43
contrib/llvm/include/llvm/Support/KnownBits.h
Normal file
43
contrib/llvm/include/llvm/Support/KnownBits.h
Normal file
@ -0,0 +1,43 @@
|
||||
//===- llvm/Support/KnownBits.h - Stores known zeros/ones -------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains a class for representing known zeros and ones used by
|
||||
// computeKnownBits.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_SUPPORT_KNOWNBITS_H
|
||||
#define LLVM_SUPPORT_KNOWNBITS_H
|
||||
|
||||
#include "llvm/ADT/APInt.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
// For now this is a simple wrapper around two APInts.
|
||||
struct KnownBits {
|
||||
APInt Zero;
|
||||
APInt One;
|
||||
|
||||
// Default construct Zero and One.
|
||||
KnownBits() {}
|
||||
|
||||
/// Create a known bits object of BitWidth bits initialized to unknown.
|
||||
KnownBits(unsigned BitWidth) : Zero(BitWidth, 0), One(BitWidth, 0) {}
|
||||
|
||||
/// Get the bit width of this value.
|
||||
unsigned getBitWidth() const {
|
||||
assert(Zero.getBitWidth() == One.getBitWidth() &&
|
||||
"Zero and One should have the same width!");
|
||||
return Zero.getBitWidth();
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
@ -606,7 +606,7 @@ class IO {
|
||||
template <typename T>
|
||||
void bitSetCase(T &Val, const char* Str, const T ConstVal) {
|
||||
if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
|
||||
Val = Val | ConstVal;
|
||||
Val = static_cast<T>(Val | ConstVal);
|
||||
}
|
||||
}
|
||||
|
||||
@ -614,7 +614,7 @@ class IO {
|
||||
template <typename T>
|
||||
void bitSetCase(T &Val, const char* Str, const uint32_t ConstVal) {
|
||||
if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
|
||||
Val = Val | ConstVal;
|
||||
Val = static_cast<T>(Val | ConstVal);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,21 +30,13 @@ def s64 : LLT;
|
||||
// Definitions that inherit from this may also inherit from
|
||||
// GIComplexPatternEquiv to enable the import of SelectionDAG patterns involving
|
||||
// those ComplexPatterns.
|
||||
class GIComplexOperandMatcher<LLT type, dag operands, string matcherfn> {
|
||||
class GIComplexOperandMatcher<LLT type, string matcherfn> {
|
||||
// The expected type of the root of the match.
|
||||
//
|
||||
// TODO: We should probably support, any-type, any-scalar, and multiple types
|
||||
// in the future.
|
||||
LLT Type = type;
|
||||
|
||||
// The operands that result from a successful match
|
||||
// Should be of the form '(ops ty1, ty2, ...)' where ty1/ty2 are definitions
|
||||
// that inherit from Operand.
|
||||
//
|
||||
// FIXME: Which definition is used for ty1/ty2 doesn't actually matter at the
|
||||
// moment. Only the number of operands is used.
|
||||
dag Operands = operands;
|
||||
|
||||
// The function that determines whether the operand matches. It should be of
|
||||
// the form:
|
||||
// bool select(const MatchOperand &Root, MatchOperand &Result1)
|
||||
|
@ -1108,7 +1108,7 @@ class TargetInstrInfo : public MCInstrInfo {
|
||||
|
||||
|
||||
/// Return the noop instruction to use for a noop.
|
||||
virtual void getNoopForMachoTarget(MCInst &NopInst) const;
|
||||
virtual void getNoop(MCInst &NopInst) const;
|
||||
|
||||
/// Return true for post-incremented instructions.
|
||||
virtual bool isPostIncrement(const MachineInstr &MI) const {
|
||||
|
@ -236,6 +236,12 @@ class TargetLoweringBase {
|
||||
return getPointerTy(DL, DL.getAllocaAddrSpace());
|
||||
}
|
||||
|
||||
/// Return the type for operands of fence.
|
||||
/// TODO: Let fence operands be of i32 type and remove this.
|
||||
virtual MVT getFenceOperandTy(const DataLayout &DL) const {
|
||||
return getPointerTy(DL);
|
||||
}
|
||||
|
||||
/// EVT is not used in-tree, but is used by out-of-tree target.
|
||||
/// A documentation for this function would be nice...
|
||||
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
|
||||
@ -2268,7 +2274,8 @@ class TargetLoweringBase {
|
||||
|
||||
/// Return true if the value types that can be represented by the specified
|
||||
/// register class are all legal.
|
||||
bool isLegalRC(const TargetRegisterClass *RC) const;
|
||||
bool isLegalRC(const TargetRegisterInfo &TRI,
|
||||
const TargetRegisterClass &RC) const;
|
||||
|
||||
/// Replace/modify any TargetFrameIndex operands with a targte-dependent
|
||||
/// sequence of memory operands that is recognized by PrologEpilogInserter.
|
||||
@ -2388,30 +2395,39 @@ class TargetLowering : public TargetLoweringBase {
|
||||
New = N;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Check to see if the specified operand of the specified instruction is a
|
||||
/// constant integer. If so, check to see if there are any bits set in the
|
||||
/// constant that are not demanded. If so, shrink the constant and return
|
||||
/// true.
|
||||
bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
|
||||
|
||||
/// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
|
||||
/// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
|
||||
/// generalized for targets with other types of implicit widening casts.
|
||||
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
|
||||
const SDLoc &dl);
|
||||
|
||||
/// Helper for SimplifyDemandedBits that can simplify an operation with
|
||||
/// multiple uses. This function uses TLI.SimplifyDemandedBits to
|
||||
/// simplify Operand \p OpIdx of \p User and then updated \p User with
|
||||
/// the simplified version. No other uses of \p OpIdx are updated.
|
||||
/// If \p User is the only user of \p OpIdx, this function behaves exactly
|
||||
/// like TLI.SimplifyDemandedBits except that it also updates the DAG by
|
||||
/// calling DCI.CommitTargetLoweringOpt.
|
||||
bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx,
|
||||
const APInt &Demanded, DAGCombinerInfo &DCI);
|
||||
};
|
||||
|
||||
/// Check to see if the specified operand of the specified instruction is a
|
||||
/// constant integer. If so, check to see if there are any bits set in the
|
||||
/// constant that are not demanded. If so, shrink the constant and return
|
||||
/// true.
|
||||
bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
|
||||
TargetLoweringOpt &TLO) const;
|
||||
|
||||
// Target hook to do target-specific const optimization, which is called by
|
||||
// ShrinkDemandedConstant. This function should return true if the target
|
||||
// doesn't want ShrinkDemandedConstant to further optimize the constant.
|
||||
virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
|
||||
TargetLoweringOpt &TLO) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
|
||||
/// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
|
||||
/// generalized for targets with other types of implicit widening casts.
|
||||
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
|
||||
TargetLoweringOpt &TLO) const;
|
||||
|
||||
/// Helper for SimplifyDemandedBits that can simplify an operation with
|
||||
/// multiple uses. This function simplifies operand \p OpIdx of \p User and
|
||||
/// then updates \p User with the simplified version. No other uses of
|
||||
/// \p OpIdx are updated. If \p User is the only user of \p OpIdx, this
|
||||
/// function behaves exactly like function SimplifyDemandedBits declared
|
||||
/// below except that it also updates the DAG by calling
|
||||
/// DCI.CommitTargetLoweringOpt.
|
||||
bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded,
|
||||
DAGCombinerInfo &DCI, TargetLoweringOpt &TLO) const;
|
||||
|
||||
/// Look at Op. At this point, we know that only the DemandedMask bits of the
|
||||
/// result of Op are ever used downstream. If we can use this information to
|
||||
/// simplify Op, create a new simplified DAG node and return true, returning
|
||||
|
@ -40,13 +40,12 @@ class TargetRegisterClass {
|
||||
public:
|
||||
typedef const MCPhysReg* iterator;
|
||||
typedef const MCPhysReg* const_iterator;
|
||||
typedef const MVT::SimpleValueType* vt_iterator;
|
||||
typedef const TargetRegisterClass* const * sc_iterator;
|
||||
|
||||
// Instance variables filled by tablegen, do not use!
|
||||
const MCRegisterClass *MC;
|
||||
const uint16_t SpillSize, SpillAlignment;
|
||||
const vt_iterator VTs;
|
||||
const MVT::SimpleValueType *VTs;
|
||||
const uint32_t *SubClassMask;
|
||||
const uint16_t *SuperRegIndices;
|
||||
const LaneBitmask LaneMask;
|
||||
@ -93,13 +92,6 @@ class TargetRegisterClass {
|
||||
return MC->contains(Reg1, Reg2);
|
||||
}
|
||||
|
||||
/// Return the size of the register in bytes, which is also the size
|
||||
/// of a stack slot allocated to hold a spilled copy of this register.
|
||||
unsigned getSize() const { return SpillSize; }
|
||||
|
||||
/// Return the minimum required alignment for a register of this class.
|
||||
unsigned getAlignment() const { return SpillAlignment; }
|
||||
|
||||
/// Return the cost of copying a value between two registers in this class.
|
||||
/// A negative number means the register class is very expensive
|
||||
/// to copy e.g. status flag register classes.
|
||||
@ -109,26 +101,6 @@ class TargetRegisterClass {
|
||||
/// registers.
|
||||
bool isAllocatable() const { return MC->isAllocatable(); }
|
||||
|
||||
/// Return true if this TargetRegisterClass has the ValueType vt.
|
||||
bool hasType(MVT vt) const {
|
||||
for(int i = 0; VTs[i] != MVT::Other; ++i)
|
||||
if (MVT(VTs[i]) == vt)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// vt_begin / vt_end - Loop over all of the value types that can be
|
||||
/// represented by values in this register class.
|
||||
vt_iterator vt_begin() const {
|
||||
return VTs;
|
||||
}
|
||||
|
||||
vt_iterator vt_end() const {
|
||||
vt_iterator I = VTs;
|
||||
while (*I != MVT::Other) ++I;
|
||||
return I;
|
||||
}
|
||||
|
||||
/// Return true if the specified TargetRegisterClass
|
||||
/// is a proper sub-class of this TargetRegisterClass.
|
||||
bool hasSubClass(const TargetRegisterClass *RC) const {
|
||||
@ -246,6 +218,7 @@ struct RegClassWeight {
|
||||
class TargetRegisterInfo : public MCRegisterInfo {
|
||||
public:
|
||||
typedef const TargetRegisterClass * const * regclass_iterator;
|
||||
typedef const MVT::SimpleValueType* vt_iterator;
|
||||
private:
|
||||
const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
|
||||
const char *const *SubRegIndexNames; // Names of subreg indexes.
|
||||
@ -327,6 +300,44 @@ class TargetRegisterInfo : public MCRegisterInfo {
|
||||
return Index | (1u << 31);
|
||||
}
|
||||
|
||||
/// Return the size in bits of a register from class RC.
|
||||
unsigned getRegSizeInBits(const TargetRegisterClass &RC) const {
|
||||
return RC.SpillSize * 8;
|
||||
}
|
||||
|
||||
/// Return the size in bytes of the stack slot allocated to hold a spilled
|
||||
/// copy of a register from class RC.
|
||||
unsigned getSpillSize(const TargetRegisterClass &RC) const {
|
||||
return RC.SpillSize;
|
||||
}
|
||||
|
||||
/// Return the minimum required alignment for a spill slot for a register
|
||||
/// of this class.
|
||||
unsigned getSpillAlignment(const TargetRegisterClass &RC) const {
|
||||
return RC.SpillAlignment;
|
||||
}
|
||||
|
||||
/// Return true if the given TargetRegisterClass has the ValueType T.
|
||||
bool isTypeLegalForClass(const TargetRegisterClass &RC, MVT T) const {
|
||||
for (int i = 0; RC.VTs[i] != MVT::Other; ++i)
|
||||
if (MVT(RC.VTs[i]) == T)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Loop over all of the value types that can be represented by values
|
||||
// in the given register class.
|
||||
vt_iterator legalclasstypes_begin(const TargetRegisterClass &RC) const {
|
||||
return RC.VTs;
|
||||
}
|
||||
|
||||
vt_iterator legalclasstypes_end(const TargetRegisterClass &RC) const {
|
||||
vt_iterator I = RC.VTs;
|
||||
while (*I != MVT::Other)
|
||||
++I;
|
||||
return I;
|
||||
}
|
||||
|
||||
/// Returns the Register Class of a physical register of the given type,
|
||||
/// picking the most sub register class of the right type that contains this
|
||||
/// physreg.
|
||||
|
@ -131,7 +131,8 @@ FunctionPass *createAddressSanitizerFunctionPass(bool CompileKernel = false,
|
||||
bool Recover = false,
|
||||
bool UseAfterScope = false);
|
||||
ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false,
|
||||
bool Recover = false);
|
||||
bool Recover = false,
|
||||
bool UseGlobalsGC = true);
|
||||
|
||||
// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
|
||||
FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0,
|
||||
|
@ -36,6 +36,7 @@
|
||||
#ifndef LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
|
||||
#define LLVM_TRANSFORMS_SCALAR_CONSTANTHOISTING_H
|
||||
|
||||
#include "llvm/Analysis/BlockFrequencyInfo.h"
|
||||
#include "llvm/Analysis/TargetTransformInfo.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/PassManager.h"
|
||||
@ -98,7 +99,7 @@ class ConstantHoistingPass : public PassInfoMixin<ConstantHoistingPass> {
|
||||
|
||||
// Glue for old PM.
|
||||
bool runImpl(Function &F, TargetTransformInfo &TTI, DominatorTree &DT,
|
||||
BasicBlock &Entry);
|
||||
BlockFrequencyInfo *BFI, BasicBlock &Entry);
|
||||
|
||||
void releaseMemory() {
|
||||
ConstantVec.clear();
|
||||
@ -112,6 +113,7 @@ class ConstantHoistingPass : public PassInfoMixin<ConstantHoistingPass> {
|
||||
|
||||
const TargetTransformInfo *TTI;
|
||||
DominatorTree *DT;
|
||||
BlockFrequencyInfo *BFI;
|
||||
BasicBlock *Entry;
|
||||
|
||||
/// Keeps track of constant candidates found in the function.
|
||||
@ -124,8 +126,8 @@ class ConstantHoistingPass : public PassInfoMixin<ConstantHoistingPass> {
|
||||
SmallVector<consthoist::ConstantInfo, 8> ConstantVec;
|
||||
|
||||
Instruction *findMatInsertPt(Instruction *Inst, unsigned Idx = ~0U) const;
|
||||
Instruction *findConstantInsertionPoint(
|
||||
const consthoist::ConstantInfo &ConstInfo) const;
|
||||
SmallPtrSet<Instruction *, 8>
|
||||
findConstantInsertionPoint(const consthoist::ConstantInfo &ConstInfo) const;
|
||||
void collectConstantCandidates(ConstCandMapType &ConstCandMap,
|
||||
Instruction *Inst, unsigned Idx,
|
||||
ConstantInt *ConstInt);
|
||||
|
@ -924,8 +924,8 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
|
||||
uint64_t V2Size,
|
||||
const DataLayout &DL) {
|
||||
|
||||
assert(GEP1->getPointerOperand()->stripPointerCasts() ==
|
||||
GEP2->getPointerOperand()->stripPointerCasts() &&
|
||||
assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
|
||||
GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
|
||||
GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
|
||||
"Expected GEPs with the same pointer operand");
|
||||
|
||||
@ -1184,8 +1184,8 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
|
||||
// If we know the two GEPs are based off of the exact same pointer (and not
|
||||
// just the same underlying object), see if that tells us anything about
|
||||
// the resulting pointers.
|
||||
if (GEP1->getPointerOperand()->stripPointerCasts() ==
|
||||
GEP2->getPointerOperand()->stripPointerCasts() &&
|
||||
if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
|
||||
GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
|
||||
GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
|
||||
AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
|
||||
// If we couldn't find anything interesting, don't abandon just yet.
|
||||
@ -1500,8 +1500,8 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
|
||||
return NoAlias;
|
||||
|
||||
// Strip off any casts if they exist.
|
||||
V1 = V1->stripPointerCasts();
|
||||
V2 = V2->stripPointerCasts();
|
||||
V1 = V1->stripPointerCastsAndBarriers();
|
||||
V2 = V2->stripPointerCastsAndBarriers();
|
||||
|
||||
// If V1 or V2 is undef, the result is NoAlias because we can always pick a
|
||||
// value for undef that aliases nothing in the program.
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "llvm/IR/Value.h"
|
||||
#include "llvm/Support/Casting.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/KnownBits.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include <cassert>
|
||||
#include <cerrno>
|
||||
@ -687,21 +688,21 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
|
||||
|
||||
if (Opc == Instruction::And) {
|
||||
unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
|
||||
APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
|
||||
APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
|
||||
computeKnownBits(Op0, KnownZero0, KnownOne0, DL);
|
||||
computeKnownBits(Op1, KnownZero1, KnownOne1, DL);
|
||||
if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
|
||||
KnownBits Known0(BitWidth);
|
||||
KnownBits Known1(BitWidth);
|
||||
computeKnownBits(Op0, Known0, DL);
|
||||
computeKnownBits(Op1, Known1, DL);
|
||||
if ((Known1.One | Known0.Zero).isAllOnesValue()) {
|
||||
// All the bits of Op0 that the 'and' could be masking are already zero.
|
||||
return Op0;
|
||||
}
|
||||
if ((KnownOne0 | KnownZero1).isAllOnesValue()) {
|
||||
if ((Known0.One | Known1.Zero).isAllOnesValue()) {
|
||||
// All the bits of Op1 that the 'and' could be masking are already zero.
|
||||
return Op1;
|
||||
}
|
||||
|
||||
APInt KnownZero = KnownZero0 | KnownZero1;
|
||||
APInt KnownOne = KnownOne0 & KnownOne1;
|
||||
APInt KnownZero = Known0.Zero | Known1.Zero;
|
||||
APInt KnownOne = Known0.One & Known1.One;
|
||||
if ((KnownZero | KnownOne).isAllOnesValue()) {
|
||||
return ConstantInt::get(Op0->getType(), KnownOne);
|
||||
}
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "llvm/IR/Operator.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/KnownBits.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
using namespace llvm;
|
||||
|
||||
@ -72,8 +73,7 @@ static bool isAlwaysLive(Instruction *I) {
|
||||
|
||||
void DemandedBits::determineLiveOperandBits(
|
||||
const Instruction *UserI, const Instruction *I, unsigned OperandNo,
|
||||
const APInt &AOut, APInt &AB, APInt &KnownZero, APInt &KnownOne,
|
||||
APInt &KnownZero2, APInt &KnownOne2) {
|
||||
const APInt &AOut, APInt &AB, KnownBits &Known, KnownBits &Known2) {
|
||||
unsigned BitWidth = AB.getBitWidth();
|
||||
|
||||
// We're called once per operand, but for some instructions, we need to
|
||||
@ -85,15 +85,13 @@ void DemandedBits::determineLiveOperandBits(
|
||||
auto ComputeKnownBits =
|
||||
[&](unsigned BitWidth, const Value *V1, const Value *V2) {
|
||||
const DataLayout &DL = I->getModule()->getDataLayout();
|
||||
KnownZero = APInt(BitWidth, 0);
|
||||
KnownOne = APInt(BitWidth, 0);
|
||||
computeKnownBits(const_cast<Value *>(V1), KnownZero, KnownOne, DL, 0,
|
||||
Known = KnownBits(BitWidth);
|
||||
computeKnownBits(const_cast<Value *>(V1), Known, DL, 0,
|
||||
&AC, UserI, &DT);
|
||||
|
||||
if (V2) {
|
||||
KnownZero2 = APInt(BitWidth, 0);
|
||||
KnownOne2 = APInt(BitWidth, 0);
|
||||
computeKnownBits(const_cast<Value *>(V2), KnownZero2, KnownOne2, DL,
|
||||
Known2 = KnownBits(BitWidth);
|
||||
computeKnownBits(const_cast<Value *>(V2), Known2, DL,
|
||||
0, &AC, UserI, &DT);
|
||||
}
|
||||
};
|
||||
@ -120,7 +118,7 @@ void DemandedBits::determineLiveOperandBits(
|
||||
// known to be one.
|
||||
ComputeKnownBits(BitWidth, I, nullptr);
|
||||
AB = APInt::getHighBitsSet(BitWidth,
|
||||
std::min(BitWidth, KnownOne.countLeadingZeros()+1));
|
||||
std::min(BitWidth, Known.One.countLeadingZeros()+1));
|
||||
}
|
||||
break;
|
||||
case Intrinsic::cttz:
|
||||
@ -130,7 +128,7 @@ void DemandedBits::determineLiveOperandBits(
|
||||
// known to be one.
|
||||
ComputeKnownBits(BitWidth, I, nullptr);
|
||||
AB = APInt::getLowBitsSet(BitWidth,
|
||||
std::min(BitWidth, KnownOne.countTrailingZeros()+1));
|
||||
std::min(BitWidth, Known.One.countTrailingZeros()+1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -200,11 +198,11 @@ void DemandedBits::determineLiveOperandBits(
|
||||
// dead).
|
||||
if (OperandNo == 0) {
|
||||
ComputeKnownBits(BitWidth, I, UserI->getOperand(1));
|
||||
AB &= ~KnownZero2;
|
||||
AB &= ~Known2.Zero;
|
||||
} else {
|
||||
if (!isa<Instruction>(UserI->getOperand(0)))
|
||||
ComputeKnownBits(BitWidth, UserI->getOperand(0), I);
|
||||
AB &= ~(KnownZero & ~KnownZero2);
|
||||
AB &= ~(Known.Zero & ~Known2.Zero);
|
||||
}
|
||||
break;
|
||||
case Instruction::Or:
|
||||
@ -216,11 +214,11 @@ void DemandedBits::determineLiveOperandBits(
|
||||
// dead).
|
||||
if (OperandNo == 0) {
|
||||
ComputeKnownBits(BitWidth, I, UserI->getOperand(1));
|
||||
AB &= ~KnownOne2;
|
||||
AB &= ~Known2.One;
|
||||
} else {
|
||||
if (!isa<Instruction>(UserI->getOperand(0)))
|
||||
ComputeKnownBits(BitWidth, UserI->getOperand(0), I);
|
||||
AB &= ~(KnownOne & ~KnownOne2);
|
||||
AB &= ~(Known.One & ~Known2.One);
|
||||
}
|
||||
break;
|
||||
case Instruction::Xor:
|
||||
@ -318,7 +316,7 @@ void DemandedBits::performAnalysis() {
|
||||
if (!UserI->getType()->isIntegerTy())
|
||||
Visited.insert(UserI);
|
||||
|
||||
APInt KnownZero, KnownOne, KnownZero2, KnownOne2;
|
||||
KnownBits Known, Known2;
|
||||
// Compute the set of alive bits for each operand. These are anded into the
|
||||
// existing set, if any, and if that changes the set of alive bits, the
|
||||
// operand is added to the work-list.
|
||||
@ -335,8 +333,7 @@ void DemandedBits::performAnalysis() {
|
||||
// Bits of each operand that are used to compute alive bits of the
|
||||
// output are alive, all others are dead.
|
||||
determineLiveOperandBits(UserI, I, OI.getOperandNo(), AOut, AB,
|
||||
KnownZero, KnownOne,
|
||||
KnownZero2, KnownOne2);
|
||||
Known, Known2);
|
||||
}
|
||||
|
||||
// If we've added to the set of alive bits (or the operand has not
|
||||
|
@ -80,6 +80,22 @@ struct DOTGraphTraits<PostDominatorTree*>
|
||||
};
|
||||
}
|
||||
|
||||
void DominatorTree::viewGraph(const Twine &Name, const Twine &Title) {
|
||||
#ifndef NDEBUG
|
||||
ViewGraph(this, Name, false, Title);
|
||||
#else
|
||||
errs() << "DomTree dump not available, build with DEBUG\n";
|
||||
#endif // NDEBUG
|
||||
}
|
||||
|
||||
void DominatorTree::viewGraph() {
|
||||
#ifndef NDEBUG
|
||||
this->viewGraph("domtree", "Dominator Tree for function");
|
||||
#else
|
||||
errs() << "DomTree dump not available, build with DEBUG\n";
|
||||
#endif // NDEBUG
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct DominatorTreeWrapperPassAnalysisGraphTraits {
|
||||
static DominatorTree *getGraph(DominatorTreeWrapperPass *DTWP) {
|
||||
|
@ -253,18 +253,8 @@ bool IVUsers::AddUsersImpl(Instruction *I,
|
||||
const SCEV *OriginalISE = ISE;
|
||||
|
||||
auto NormalizePred = [&](const SCEVAddRecExpr *AR) {
|
||||
// We only allow affine AddRecs to be normalized, otherwise we would not
|
||||
// be able to correctly denormalize.
|
||||
// e.g. {1,+,3,+,2} == {-2,+,1,+,2} + {3,+,2}
|
||||
// Normalized form: {-2,+,1,+,2}
|
||||
// Denormalized form: {1,+,3,+,2}
|
||||
//
|
||||
// However, denormalization would use a different step expression than
|
||||
// normalization (see getPostIncExpr), generating the wrong final
|
||||
// expression: {-2,+,1,+,2} + {1,+,2} => {-1,+,3,+,2}
|
||||
auto *L = AR->getLoop();
|
||||
bool Result =
|
||||
AR->isAffine() && IVUseShouldUsePostIncValue(User, I, L, DT);
|
||||
bool Result = IVUseShouldUsePostIncValue(User, I, L, DT);
|
||||
if (Result)
|
||||
NewUse.PostIncLoops.insert(L);
|
||||
return Result;
|
||||
|
@ -1556,7 +1556,6 @@ InlineParams llvm::getInlineParams(int Threshold) {
|
||||
// Set the ColdCallSiteThreshold knob from the -inline-cold-callsite-threshold.
|
||||
Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
|
||||
|
||||
// Set the OptMinSizeThreshold and OptSizeThreshold params only if the
|
||||
// Set the OptMinSizeThreshold and OptSizeThreshold params only if the
|
||||
// -inlinehint-threshold commandline option is not explicitly given. If that
|
||||
// option is present, then its value applies even for callees with size and
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -70,6 +70,7 @@
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/Casting.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/KnownBits.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include <cassert>
|
||||
@ -534,10 +535,9 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
|
||||
VectorType *VecTy = dyn_cast<VectorType>(V->getType());
|
||||
if (!VecTy) {
|
||||
unsigned BitWidth = V->getType()->getIntegerBitWidth();
|
||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||
computeKnownBits(V, KnownZero, KnownOne, DL, 0, AC,
|
||||
dyn_cast<Instruction>(V), DT);
|
||||
return KnownZero.isAllOnesValue();
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
|
||||
return Known.Zero.isAllOnesValue();
|
||||
}
|
||||
|
||||
// Per-component check doesn't work with zeroinitializer
|
||||
@ -556,9 +556,9 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
|
||||
if (isa<UndefValue>(Elem))
|
||||
return true;
|
||||
|
||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||
computeKnownBits(Elem, KnownZero, KnownOne, DL);
|
||||
if (KnownZero.isAllOnesValue())
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(Elem, Known, DL);
|
||||
if (Known.Zero.isAllOnesValue())
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
||||
|
||||
#define DEBUG_TYPE "memoryssa"
|
||||
using namespace llvm;
|
||||
namespace llvm {
|
||||
|
||||
// This is the marker algorithm from "Simple and Efficient Construction of
|
||||
// Static Single Assignment Form"
|
||||
// The simple, non-marker algorithm places phi nodes at any join
|
||||
@ -211,8 +211,8 @@ void MemorySSAUpdater::insertUse(MemoryUse *MU) {
|
||||
}
|
||||
|
||||
// Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
|
||||
void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
|
||||
MemoryAccess *NewDef) {
|
||||
static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
|
||||
MemoryAccess *NewDef) {
|
||||
// Replace any operand with us an incoming block with the new defining
|
||||
// access.
|
||||
int i = MP->getBasicBlockIndex(BB);
|
||||
@ -415,6 +415,7 @@ static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
|
||||
}
|
||||
return MA;
|
||||
}
|
||||
|
||||
void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA) {
|
||||
assert(!MSSA->isLiveOnEntryDef(MA) &&
|
||||
"Trying to remove the live on entry def");
|
||||
@ -490,5 +491,3 @@ MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
|
||||
++InsertPt->getIterator());
|
||||
return NewAccess;
|
||||
}
|
||||
|
||||
} // namespace llvm
|
||||
|
@ -89,6 +89,7 @@
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/KnownBits.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Support/SaveAndRestore.h"
|
||||
@ -4575,10 +4576,10 @@ uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
|
||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
||||
// For a SCEVUnknown, ask ValueTracking.
|
||||
unsigned BitWidth = getTypeSizeInBits(U->getType());
|
||||
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
|
||||
computeKnownBits(U->getValue(), Zeros, Ones, getDataLayout(), 0, &AC,
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(U->getValue(), Known, getDataLayout(), 0, &AC,
|
||||
nullptr, &DT);
|
||||
return Zeros.countTrailingOnes();
|
||||
return Known.Zero.countTrailingOnes();
|
||||
}
|
||||
|
||||
// SCEVUDivExpr
|
||||
@ -4757,11 +4758,12 @@ ScalarEvolution::getRange(const SCEV *S,
|
||||
const DataLayout &DL = getDataLayout();
|
||||
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
|
||||
// For a SCEVUnknown, ask ValueTracking.
|
||||
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
|
||||
computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT);
|
||||
if (Ones != ~Zeros + 1)
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(U->getValue(), Known, DL, 0, &AC, nullptr, &DT);
|
||||
if (Known.One != ~Known.Zero + 1)
|
||||
ConservativeResult =
|
||||
ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
|
||||
ConservativeResult.intersectWith(ConstantRange(Known.One,
|
||||
~Known.Zero + 1));
|
||||
} else {
|
||||
assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED &&
|
||||
"generalize as needed!");
|
||||
@ -5292,13 +5294,13 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
unsigned LZ = A.countLeadingZeros();
|
||||
unsigned TZ = A.countTrailingZeros();
|
||||
unsigned BitWidth = A.getBitWidth();
|
||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||
computeKnownBits(BO->LHS, KnownZero, KnownOne, getDataLayout(),
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(BO->LHS, Known, getDataLayout(),
|
||||
0, &AC, nullptr, &DT);
|
||||
|
||||
APInt EffectiveMask =
|
||||
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
|
||||
if ((LZ != 0 || TZ != 0) && !((~A & ~KnownZero) & EffectiveMask)) {
|
||||
if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
|
||||
const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
|
||||
const SCEV *LHS = getSCEV(BO->LHS);
|
||||
const SCEV *ShiftedLHS = nullptr;
|
||||
@ -5328,12 +5330,28 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
break;
|
||||
|
||||
case Instruction::Or:
|
||||
// Use ValueTracking to check whether this is actually an add.
|
||||
if (haveNoCommonBitsSet(BO->LHS, BO->RHS, getDataLayout(), &AC,
|
||||
nullptr, &DT)) {
|
||||
// There aren't any common bits set, so the add can't wrap.
|
||||
auto Flags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNSW);
|
||||
return getAddExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
|
||||
// If the RHS of the Or is a constant, we may have something like:
|
||||
// X*4+1 which got turned into X*4|1. Handle this as an Add so loop
|
||||
// optimizations will transparently handle this case.
|
||||
//
|
||||
// In order for this transformation to be safe, the LHS must be of the
|
||||
// form X*(2^n) and the Or constant must be less than 2^n.
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
|
||||
const SCEV *LHS = getSCEV(BO->LHS);
|
||||
const APInt &CIVal = CI->getValue();
|
||||
if (GetMinTrailingZeros(LHS) >=
|
||||
(CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
|
||||
// Build a plain add SCEV.
|
||||
const SCEV *S = getAddExpr(LHS, getSCEV(CI));
|
||||
// If the LHS of the add was an addrec and it has no-wrap flags,
|
||||
// transfer the no-wrap flags, since an or won't introduce a wrap.
|
||||
if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
|
||||
const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
|
||||
const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
|
||||
OldAR->getNoWrapFlags());
|
||||
}
|
||||
return S;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
@ -6063,24 +6081,74 @@ ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
|
||||
return getCouldNotCompute();
|
||||
}
|
||||
|
||||
ScalarEvolution::ExitLimit
|
||||
ScalarEvolution::computeExitLimitFromCond(const Loop *L,
|
||||
Value *ExitCond,
|
||||
BasicBlock *TBB,
|
||||
BasicBlock *FBB,
|
||||
bool ControlsExit,
|
||||
bool AllowPredicates) {
|
||||
ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
|
||||
const Loop *L, Value *ExitCond, BasicBlock *TBB, BasicBlock *FBB,
|
||||
bool ControlsExit, bool AllowPredicates) {
|
||||
ScalarEvolution::ExitLimitCacheTy Cache(L, TBB, FBB, AllowPredicates);
|
||||
return computeExitLimitFromCondCached(Cache, L, ExitCond, TBB, FBB,
|
||||
ControlsExit, AllowPredicates);
|
||||
}
|
||||
|
||||
Optional<ScalarEvolution::ExitLimit>
|
||||
ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
|
||||
BasicBlock *TBB, BasicBlock *FBB,
|
||||
bool ControlsExit, bool AllowPredicates) {
|
||||
(void)this->L;
|
||||
(void)this->TBB;
|
||||
(void)this->FBB;
|
||||
(void)this->AllowPredicates;
|
||||
|
||||
assert(this->L == L && this->TBB == TBB && this->FBB == FBB &&
|
||||
this->AllowPredicates == AllowPredicates &&
|
||||
"Variance in assumed invariant key components!");
|
||||
auto Itr = TripCountMap.find({ExitCond, ControlsExit});
|
||||
if (Itr == TripCountMap.end())
|
||||
return None;
|
||||
return Itr->second;
|
||||
}
|
||||
|
||||
void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
|
||||
BasicBlock *TBB, BasicBlock *FBB,
|
||||
bool ControlsExit,
|
||||
bool AllowPredicates,
|
||||
const ExitLimit &EL) {
|
||||
assert(this->L == L && this->TBB == TBB && this->FBB == FBB &&
|
||||
this->AllowPredicates == AllowPredicates &&
|
||||
"Variance in assumed invariant key components!");
|
||||
|
||||
auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
|
||||
assert(InsertResult.second && "Expected successful insertion!");
|
||||
(void)InsertResult;
|
||||
}
|
||||
|
||||
ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
|
||||
ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB,
|
||||
BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) {
|
||||
|
||||
if (auto MaybeEL =
|
||||
Cache.find(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates))
|
||||
return *MaybeEL;
|
||||
|
||||
ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, TBB, FBB,
|
||||
ControlsExit, AllowPredicates);
|
||||
Cache.insert(L, ExitCond, TBB, FBB, ControlsExit, AllowPredicates, EL);
|
||||
return EL;
|
||||
}
|
||||
|
||||
ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
|
||||
ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, BasicBlock *TBB,
|
||||
BasicBlock *FBB, bool ControlsExit, bool AllowPredicates) {
|
||||
// Check if the controlling expression for this loop is an And or Or.
|
||||
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
|
||||
if (BO->getOpcode() == Instruction::And) {
|
||||
// Recurse on the operands of the and.
|
||||
bool EitherMayExit = L->contains(TBB);
|
||||
ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
|
||||
ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
|
||||
ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
ExitLimit EL0 = computeExitLimitFromCondCached(
|
||||
Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
ExitLimit EL1 = computeExitLimitFromCondCached(
|
||||
Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
const SCEV *BECount = getCouldNotCompute();
|
||||
const SCEV *MaxBECount = getCouldNotCompute();
|
||||
if (EitherMayExit) {
|
||||
@ -6124,12 +6192,12 @@ ScalarEvolution::computeExitLimitFromCond(const Loop *L,
|
||||
if (BO->getOpcode() == Instruction::Or) {
|
||||
// Recurse on the operands of the or.
|
||||
bool EitherMayExit = L->contains(FBB);
|
||||
ExitLimit EL0 = computeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB,
|
||||
ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
ExitLimit EL1 = computeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB,
|
||||
ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
ExitLimit EL0 = computeExitLimitFromCondCached(
|
||||
Cache, L, BO->getOperand(0), TBB, FBB, ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
ExitLimit EL1 = computeExitLimitFromCondCached(
|
||||
Cache, L, BO->getOperand(1), TBB, FBB, ControlsExit && !EitherMayExit,
|
||||
AllowPredicates);
|
||||
const SCEV *BECount = getCouldNotCompute();
|
||||
const SCEV *MaxBECount = getCouldNotCompute();
|
||||
if (EitherMayExit) {
|
||||
@ -10221,84 +10289,75 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
|
||||
RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
|
||||
}
|
||||
|
||||
typedef DenseMap<const Loop *, std::string> VerifyMap;
|
||||
|
||||
/// replaceSubString - Replaces all occurrences of From in Str with To.
|
||||
static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
|
||||
size_t Pos = 0;
|
||||
while ((Pos = Str.find(From, Pos)) != std::string::npos) {
|
||||
Str.replace(Pos, From.size(), To.data(), To.size());
|
||||
Pos += To.size();
|
||||
}
|
||||
}
|
||||
|
||||
/// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis.
|
||||
static void
|
||||
getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) {
|
||||
std::string &S = Map[L];
|
||||
if (S.empty()) {
|
||||
raw_string_ostream OS(S);
|
||||
SE.getBackedgeTakenCount(L)->print(OS);
|
||||
|
||||
// false and 0 are semantically equivalent. This can happen in dead loops.
|
||||
replaceSubString(OS.str(), "false", "0");
|
||||
// Remove wrap flags, their use in SCEV is highly fragile.
|
||||
// FIXME: Remove this when SCEV gets smarter about them.
|
||||
replaceSubString(OS.str(), "<nw>", "");
|
||||
replaceSubString(OS.str(), "<nsw>", "");
|
||||
replaceSubString(OS.str(), "<nuw>", "");
|
||||
}
|
||||
|
||||
for (auto *R : reverse(*L))
|
||||
getLoopBackedgeTakenCounts(R, Map, SE); // recurse.
|
||||
}
|
||||
|
||||
void ScalarEvolution::verify() const {
|
||||
ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
|
||||
|
||||
// Gather stringified backedge taken counts for all loops using SCEV's caches.
|
||||
// FIXME: It would be much better to store actual values instead of strings,
|
||||
// but SCEV pointers will change if we drop the caches.
|
||||
VerifyMap BackedgeDumpsOld, BackedgeDumpsNew;
|
||||
for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I)
|
||||
getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE);
|
||||
|
||||
// Gather stringified backedge taken counts for all loops using a fresh
|
||||
// ScalarEvolution object.
|
||||
ScalarEvolution SE2(F, TLI, AC, DT, LI);
|
||||
for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I)
|
||||
getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2);
|
||||
|
||||
// Now compare whether they're the same with and without caches. This allows
|
||||
// verifying that no pass changed the cache.
|
||||
assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() &&
|
||||
"New loops suddenly appeared!");
|
||||
SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
|
||||
|
||||
for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(),
|
||||
OldE = BackedgeDumpsOld.end(),
|
||||
NewI = BackedgeDumpsNew.begin();
|
||||
OldI != OldE; ++OldI, ++NewI) {
|
||||
assert(OldI->first == NewI->first && "Loop order changed!");
|
||||
// Map's SCEV expressions from one ScalarEvolution "universe" to another.
|
||||
struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
|
||||
const SCEV *visitConstant(const SCEVConstant *Constant) {
|
||||
return SE.getConstant(Constant->getAPInt());
|
||||
}
|
||||
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
|
||||
return SE.getUnknown(Expr->getValue());
|
||||
}
|
||||
|
||||
// Compare the stringified SCEVs. We don't care if undef backedgetaken count
|
||||
// changes.
|
||||
// FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This
|
||||
// means that a pass is buggy or SCEV has to learn a new pattern but is
|
||||
// usually not harmful.
|
||||
if (OldI->second != NewI->second &&
|
||||
OldI->second.find("undef") == std::string::npos &&
|
||||
NewI->second.find("undef") == std::string::npos &&
|
||||
OldI->second != "***COULDNOTCOMPUTE***" &&
|
||||
NewI->second != "***COULDNOTCOMPUTE***") {
|
||||
dbgs() << "SCEVValidator: SCEV for loop '"
|
||||
<< OldI->first->getHeader()->getName()
|
||||
<< "' changed from '" << OldI->second
|
||||
<< "' to '" << NewI->second << "'!\n";
|
||||
const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
|
||||
return SE.getCouldNotCompute();
|
||||
}
|
||||
SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
|
||||
};
|
||||
|
||||
SCEVMapper SCM(SE2);
|
||||
|
||||
while (!LoopStack.empty()) {
|
||||
auto *L = LoopStack.pop_back_val();
|
||||
LoopStack.insert(LoopStack.end(), L->begin(), L->end());
|
||||
|
||||
auto *CurBECount = SCM.visit(
|
||||
const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
|
||||
auto *NewBECount = SE2.getBackedgeTakenCount(L);
|
||||
|
||||
if (CurBECount == SE2.getCouldNotCompute() ||
|
||||
NewBECount == SE2.getCouldNotCompute()) {
|
||||
// NB! This situation is legal, but is very suspicious -- whatever pass
|
||||
// change the loop to make a trip count go from could not compute to
|
||||
// computable or vice-versa *should have* invalidated SCEV. However, we
|
||||
// choose not to assert here (for now) since we don't want false
|
||||
// positives.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
|
||||
// SCEV treats "undef" as an unknown but consistent value (i.e. it does
|
||||
// not propagate undef aggressively). This means we can (and do) fail
|
||||
// verification in cases where a transform makes the trip count of a loop
|
||||
// go from "undef" to "undef+1" (say). The transform is fine, since in
|
||||
// both cases the loop iterates "undef" times, but SCEV thinks we
|
||||
// increased the trip count of the loop by 1 incorrectly.
|
||||
continue;
|
||||
}
|
||||
|
||||
if (SE.getTypeSizeInBits(CurBECount->getType()) >
|
||||
SE.getTypeSizeInBits(NewBECount->getType()))
|
||||
NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
|
||||
else if (SE.getTypeSizeInBits(CurBECount->getType()) <
|
||||
SE.getTypeSizeInBits(NewBECount->getType()))
|
||||
CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
|
||||
|
||||
auto *ConstantDelta =
|
||||
dyn_cast<SCEVConstant>(SE2.getMinusSCEV(CurBECount, NewBECount));
|
||||
|
||||
if (ConstantDelta && ConstantDelta->getAPInt() != 0) {
|
||||
dbgs() << "Trip Count Changed!\n";
|
||||
dbgs() << "Old: " << *CurBECount << "\n";
|
||||
dbgs() << "New: " << *NewBECount << "\n";
|
||||
dbgs() << "Delta: " << *ConstantDelta << "\n";
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Verify more things.
|
||||
}
|
||||
|
||||
bool ScalarEvolution::invalidate(
|
||||
|
@ -51,40 +51,47 @@ NormalizeDenormalizeRewriter::visitAddRecExpr(const SCEVAddRecExpr *AR) {
|
||||
transform(AR->operands(), std::back_inserter(Operands),
|
||||
[&](const SCEV *Op) { return visit(Op); });
|
||||
|
||||
// Conservatively use AnyWrap until/unless we need FlagNW.
|
||||
const SCEV *Result =
|
||||
SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);
|
||||
switch (Kind) {
|
||||
case Normalize:
|
||||
// We want to normalize step expression, because otherwise we might not be
|
||||
// able to denormalize to the original expression.
|
||||
if (!Pred(AR))
|
||||
return SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);
|
||||
|
||||
// Normalization and denormalization are fancy names for decrementing and
|
||||
// incrementing a SCEV expression with respect to a set of loops. Since
|
||||
// Pred(AR) has returned true, we know we need to normalize or denormalize AR
|
||||
// with respect to its loop.
|
||||
|
||||
if (Kind == Denormalize) {
|
||||
// Denormalization / "partial increment" is essentially the same as \c
|
||||
// SCEVAddRecExpr::getPostIncExpr. Here we use an explicit loop to make the
|
||||
// symmetry with Normalization clear.
|
||||
for (int i = 0, e = Operands.size() - 1; i < e; i++)
|
||||
Operands[i] = SE.getAddExpr(Operands[i], Operands[i + 1]);
|
||||
} else {
|
||||
assert(Kind == Normalize && "Only two possibilities!");
|
||||
|
||||
// Normalization / "partial decrement" is a bit more subtle. Since
|
||||
// incrementing a SCEV expression (in general) changes the step of the SCEV
|
||||
// expression as well, we cannot use the step of the current expression.
|
||||
// Instead, we have to use the step of the very expression we're trying to
|
||||
// compute!
|
||||
//
|
||||
// Here is an example what will happen if we don't normalize step:
|
||||
// ORIGINAL ISE:
|
||||
// {(100 /u {1,+,1}<%bb16>),+,(100 /u {1,+,1}<%bb16>)}<%bb25>
|
||||
// NORMALIZED ISE:
|
||||
// {((-1 * (100 /u {1,+,1}<%bb16>)) + (100 /u {0,+,1}<%bb16>)),+,
|
||||
// (100 /u {0,+,1}<%bb16>)}<%bb25>
|
||||
// DENORMALIZED BACK ISE:
|
||||
// {((2 * (100 /u {1,+,1}<%bb16>)) + (-1 * (100 /u {2,+,1}<%bb16>))),+,
|
||||
// (100 /u {1,+,1}<%bb16>)}<%bb25>
|
||||
// Note that the initial value changes after normalization +
|
||||
// denormalization, which isn't correct.
|
||||
if (Pred(AR)) {
|
||||
const SCEV *TransformedStep = visit(AR->getStepRecurrence(SE));
|
||||
Result = SE.getMinusSCEV(Result, TransformedStep);
|
||||
}
|
||||
break;
|
||||
case Denormalize:
|
||||
// Here we want to normalize step expressions for the same reasons, as
|
||||
// stated above.
|
||||
if (Pred(AR)) {
|
||||
const SCEV *TransformedStep = visit(AR->getStepRecurrence(SE));
|
||||
Result = SE.getAddExpr(Result, TransformedStep);
|
||||
}
|
||||
break;
|
||||
// We solve the issue by recursively building up the result, starting from
|
||||
// the "least significant" operand in the add recurrence:
|
||||
//
|
||||
// Base case:
|
||||
// Single operand add recurrence. It's its own normalization.
|
||||
//
|
||||
// N-operand case:
|
||||
// {S_{N-1},+,S_{N-2},+,...,+,S_0} = S
|
||||
//
|
||||
// Since the step recurrence of S is {S_{N-2},+,...,+,S_0}, we know its
|
||||
// normalization by induction. We subtract the normalized step
|
||||
// recurrence from S_{N-1} to get the normalization of S.
|
||||
|
||||
for (int i = Operands.size() - 2; i >= 0; i--)
|
||||
Operands[i] = SE.getMinusSCEV(Operands[i], Operands[i + 1]);
|
||||
}
|
||||
return Result;
|
||||
|
||||
return SE.getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagAnyWrap);
|
||||
}
|
||||
|
||||
const SCEV *llvm::normalizeForPostIncUse(const SCEV *S,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -726,54 +726,50 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
|
||||
}
|
||||
|
||||
void ModuleBitcodeWriter::writeAttributeGroupTable() {
|
||||
const std::vector<AttributeList> &AttrGrps = VE.getAttributeGroups();
|
||||
const std::vector<ValueEnumerator::IndexAndAttrSet> &AttrGrps =
|
||||
VE.getAttributeGroups();
|
||||
if (AttrGrps.empty()) return;
|
||||
|
||||
Stream.EnterSubblock(bitc::PARAMATTR_GROUP_BLOCK_ID, 3);
|
||||
|
||||
SmallVector<uint64_t, 64> Record;
|
||||
for (unsigned i = 0, e = AttrGrps.size(); i != e; ++i) {
|
||||
AttributeList AS = AttrGrps[i];
|
||||
for (unsigned i = 0, e = AS.getNumSlots(); i != e; ++i) {
|
||||
AttributeList A = AS.getSlotAttributes(i);
|
||||
for (ValueEnumerator::IndexAndAttrSet Pair : AttrGrps) {
|
||||
unsigned AttrListIndex = Pair.first;
|
||||
AttributeSet AS = Pair.second;
|
||||
Record.push_back(VE.getAttributeGroupID(Pair));
|
||||
Record.push_back(AttrListIndex);
|
||||
|
||||
Record.push_back(VE.getAttributeGroupID(A));
|
||||
Record.push_back(AS.getSlotIndex(i));
|
||||
for (Attribute Attr : AS) {
|
||||
if (Attr.isEnumAttribute()) {
|
||||
Record.push_back(0);
|
||||
Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
|
||||
} else if (Attr.isIntAttribute()) {
|
||||
Record.push_back(1);
|
||||
Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
|
||||
Record.push_back(Attr.getValueAsInt());
|
||||
} else {
|
||||
StringRef Kind = Attr.getKindAsString();
|
||||
StringRef Val = Attr.getValueAsString();
|
||||
|
||||
for (AttributeList::iterator I = AS.begin(0), E = AS.end(0); I != E;
|
||||
++I) {
|
||||
Attribute Attr = *I;
|
||||
if (Attr.isEnumAttribute()) {
|
||||
Record.push_back(Val.empty() ? 3 : 4);
|
||||
Record.append(Kind.begin(), Kind.end());
|
||||
Record.push_back(0);
|
||||
if (!Val.empty()) {
|
||||
Record.append(Val.begin(), Val.end());
|
||||
Record.push_back(0);
|
||||
Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
|
||||
} else if (Attr.isIntAttribute()) {
|
||||
Record.push_back(1);
|
||||
Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
|
||||
Record.push_back(Attr.getValueAsInt());
|
||||
} else {
|
||||
StringRef Kind = Attr.getKindAsString();
|
||||
StringRef Val = Attr.getValueAsString();
|
||||
|
||||
Record.push_back(Val.empty() ? 3 : 4);
|
||||
Record.append(Kind.begin(), Kind.end());
|
||||
Record.push_back(0);
|
||||
if (!Val.empty()) {
|
||||
Record.append(Val.begin(), Val.end());
|
||||
Record.push_back(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Stream.EmitRecord(bitc::PARAMATTR_GRP_CODE_ENTRY, Record);
|
||||
Record.clear();
|
||||
}
|
||||
|
||||
Stream.EmitRecord(bitc::PARAMATTR_GRP_CODE_ENTRY, Record);
|
||||
Record.clear();
|
||||
}
|
||||
|
||||
Stream.ExitBlock();
|
||||
}
|
||||
|
||||
void ModuleBitcodeWriter::writeAttributeTable() {
|
||||
const std::vector<AttributeList> &Attrs = VE.getAttributes();
|
||||
const std::vector<AttributeList> &Attrs = VE.getAttributeLists();
|
||||
if (Attrs.empty()) return;
|
||||
|
||||
Stream.EnterSubblock(bitc::PARAMATTR_BLOCK_ID, 3);
|
||||
@ -782,7 +778,8 @@ void ModuleBitcodeWriter::writeAttributeTable() {
|
||||
for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
|
||||
const AttributeList &A = Attrs[i];
|
||||
for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i)
|
||||
Record.push_back(VE.getAttributeGroupID(A.getSlotAttributes(i)));
|
||||
Record.push_back(
|
||||
VE.getAttributeGroupID({A.getSlotIndex(i), A.getSlotAttributes(i)}));
|
||||
|
||||
Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
|
||||
Record.clear();
|
||||
@ -1270,7 +1267,7 @@ void ModuleBitcodeWriter::writeModuleInfo() {
|
||||
Vals.push_back(F.getCallingConv());
|
||||
Vals.push_back(F.isDeclaration());
|
||||
Vals.push_back(getEncodedLinkage(F));
|
||||
Vals.push_back(VE.getAttributeID(F.getAttributes()));
|
||||
Vals.push_back(VE.getAttributeListID(F.getAttributes()));
|
||||
Vals.push_back(Log2_32(F.getAlignment())+1);
|
||||
Vals.push_back(F.hasSection() ? SectionMap[F.getSection()] : 0);
|
||||
Vals.push_back(getEncodedVisibility(F));
|
||||
@ -2616,7 +2613,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
|
||||
|
||||
Code = bitc::FUNC_CODE_INST_INVOKE;
|
||||
|
||||
Vals.push_back(VE.getAttributeID(II->getAttributes()));
|
||||
Vals.push_back(VE.getAttributeListID(II->getAttributes()));
|
||||
Vals.push_back(II->getCallingConv() | 1 << 13);
|
||||
Vals.push_back(VE.getValueID(II->getNormalDest()));
|
||||
Vals.push_back(VE.getValueID(II->getUnwindDest()));
|
||||
@ -2808,7 +2805,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
|
||||
|
||||
Code = bitc::FUNC_CODE_INST_CALL;
|
||||
|
||||
Vals.push_back(VE.getAttributeID(CI.getAttributes()));
|
||||
Vals.push_back(VE.getAttributeListID(CI.getAttributes()));
|
||||
|
||||
unsigned Flags = getOptimizationFlags(&I);
|
||||
Vals.push_back(CI.getCallingConv() << bitc::CALL_CCONV |
|
||||
|
@ -891,19 +891,19 @@ void ValueEnumerator::EnumerateAttributes(AttributeList PAL) {
|
||||
if (PAL.isEmpty()) return; // null is always 0.
|
||||
|
||||
// Do a lookup.
|
||||
unsigned &Entry = AttributeMap[PAL];
|
||||
unsigned &Entry = AttributeListMap[PAL];
|
||||
if (Entry == 0) {
|
||||
// Never saw this before, add it.
|
||||
Attribute.push_back(PAL);
|
||||
Entry = Attribute.size();
|
||||
AttributeLists.push_back(PAL);
|
||||
Entry = AttributeLists.size();
|
||||
}
|
||||
|
||||
// Do lookups for all attribute groups.
|
||||
for (unsigned i = 0, e = PAL.getNumSlots(); i != e; ++i) {
|
||||
AttributeList AS = PAL.getSlotAttributes(i);
|
||||
unsigned &Entry = AttributeGroupMap[AS];
|
||||
IndexAndAttrSet Pair = {PAL.getSlotIndex(i), PAL.getSlotAttributes(i)};
|
||||
unsigned &Entry = AttributeGroupMap[Pair];
|
||||
if (Entry == 0) {
|
||||
AttributeGroups.push_back(AS);
|
||||
AttributeGroups.push_back(Pair);
|
||||
Entry = AttributeGroups.size();
|
||||
}
|
||||
}
|
||||
|
@ -48,6 +48,10 @@ class ValueEnumerator {
|
||||
// For each value, we remember its Value* and occurrence frequency.
|
||||
typedef std::vector<std::pair<const Value*, unsigned> > ValueList;
|
||||
|
||||
/// Attribute groups as encoded in bitcode are almost AttributeSets, but they
|
||||
/// include the AttributeList index, so we have to track that in our map.
|
||||
typedef std::pair<unsigned, AttributeSet> IndexAndAttrSet;
|
||||
|
||||
UseListOrderStack UseListOrders;
|
||||
|
||||
private:
|
||||
@ -102,13 +106,13 @@ class ValueEnumerator {
|
||||
|
||||
bool ShouldPreserveUseListOrder;
|
||||
|
||||
typedef DenseMap<AttributeList, unsigned> AttributeGroupMapType;
|
||||
typedef DenseMap<IndexAndAttrSet, unsigned> AttributeGroupMapType;
|
||||
AttributeGroupMapType AttributeGroupMap;
|
||||
std::vector<AttributeList> AttributeGroups;
|
||||
std::vector<IndexAndAttrSet> AttributeGroups;
|
||||
|
||||
typedef DenseMap<AttributeList, unsigned> AttributeMapType;
|
||||
AttributeMapType AttributeMap;
|
||||
std::vector<AttributeList> Attribute;
|
||||
typedef DenseMap<AttributeList, unsigned> AttributeListMapType;
|
||||
AttributeListMapType AttributeListMap;
|
||||
std::vector<AttributeList> AttributeLists;
|
||||
|
||||
/// GlobalBasicBlockIDs - This map memoizes the basic block ID's referenced by
|
||||
/// the "getGlobalBasicBlockID" method.
|
||||
@ -166,16 +170,17 @@ class ValueEnumerator {
|
||||
unsigned getInstructionID(const Instruction *I) const;
|
||||
void setInstructionID(const Instruction *I);
|
||||
|
||||
unsigned getAttributeID(AttributeList PAL) const {
|
||||
unsigned getAttributeListID(AttributeList PAL) const {
|
||||
if (PAL.isEmpty()) return 0; // Null maps to zero.
|
||||
AttributeMapType::const_iterator I = AttributeMap.find(PAL);
|
||||
assert(I != AttributeMap.end() && "Attribute not in ValueEnumerator!");
|
||||
AttributeListMapType::const_iterator I = AttributeListMap.find(PAL);
|
||||
assert(I != AttributeListMap.end() && "Attribute not in ValueEnumerator!");
|
||||
return I->second;
|
||||
}
|
||||
|
||||
unsigned getAttributeGroupID(AttributeList PAL) const {
|
||||
if (PAL.isEmpty()) return 0; // Null maps to zero.
|
||||
AttributeGroupMapType::const_iterator I = AttributeGroupMap.find(PAL);
|
||||
unsigned getAttributeGroupID(IndexAndAttrSet Group) const {
|
||||
if (!Group.second.hasAttributes())
|
||||
return 0; // Null maps to zero.
|
||||
AttributeGroupMapType::const_iterator I = AttributeGroupMap.find(Group);
|
||||
assert(I != AttributeGroupMap.end() && "Attribute not in ValueEnumerator!");
|
||||
return I->second;
|
||||
}
|
||||
@ -206,8 +211,8 @@ class ValueEnumerator {
|
||||
const std::vector<const BasicBlock*> &getBasicBlocks() const {
|
||||
return BasicBlocks;
|
||||
}
|
||||
const std::vector<AttributeList> &getAttributes() const { return Attribute; }
|
||||
const std::vector<AttributeList> &getAttributeGroups() const {
|
||||
const std::vector<AttributeList> &getAttributeLists() const { return AttributeLists; }
|
||||
const std::vector<IndexAndAttrSet> &getAttributeGroups() const {
|
||||
return AttributeGroups;
|
||||
}
|
||||
|
||||
|
@ -964,10 +964,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
|
||||
// sure to update that as well.
|
||||
const SUnit *SU = MISUnitMap[Q.second.Operand->getParent()];
|
||||
if (!SU) continue;
|
||||
for (DbgValueVector::iterator DVI = DbgValues.begin(),
|
||||
DVE = DbgValues.end(); DVI != DVE; ++DVI)
|
||||
if (DVI->second == Q.second.Operand->getParent())
|
||||
UpdateDbgValue(*DVI->first, AntiDepReg, NewReg);
|
||||
UpdateDbgValues(DbgValues, Q.second.Operand->getParent(),
|
||||
AntiDepReg, NewReg);
|
||||
}
|
||||
|
||||
// We just went back in time and modified history; the
|
||||
|
@ -60,6 +60,25 @@ class LLVM_LIBRARY_VISIBILITY AntiDepBreaker {
|
||||
if (MI.getOperand(0).isReg() && MI.getOperand(0).getReg() == OldReg)
|
||||
MI.getOperand(0).setReg(NewReg);
|
||||
}
|
||||
|
||||
/// Update all DBG_VALUE instructions that may be affected by the dependency
|
||||
/// breaker's update of ParentMI to use NewReg.
|
||||
void UpdateDbgValues(const DbgValueVector &DbgValues, MachineInstr *ParentMI,
|
||||
unsigned OldReg, unsigned NewReg) {
|
||||
// The following code is dependent on the order in which the DbgValues are
|
||||
// constructed in ScheduleDAGInstrs::buildSchedGraph.
|
||||
MachineInstr *PrevDbgMI = nullptr;
|
||||
for (const auto &DV : make_range(DbgValues.crbegin(), DbgValues.crend())) {
|
||||
MachineInstr *PrevMI = DV.second;
|
||||
if ((PrevMI == ParentMI) || (PrevMI == PrevDbgMI)) {
|
||||
MachineInstr *DbgMI = DV.first;
|
||||
UpdateDbgValue(*DbgMI, OldReg, NewReg);
|
||||
PrevDbgMI = DbgMI;
|
||||
} else if (PrevDbgMI) {
|
||||
break; // If no match and already found a DBG_VALUE, we're done.
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -825,41 +825,25 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
|
||||
OS << Name << ":";
|
||||
}
|
||||
OS << V->getName();
|
||||
|
||||
const DIExpression *Expr = MI->getDebugExpression();
|
||||
auto Fragment = Expr->getFragmentInfo();
|
||||
if (Fragment)
|
||||
OS << " [fragment offset=" << Fragment->OffsetInBits
|
||||
<< " size=" << Fragment->SizeInBits << "]";
|
||||
OS << " <- ";
|
||||
|
||||
// The second operand is only an offset if it's an immediate.
|
||||
bool Deref = false;
|
||||
bool MemLoc = MI->getOperand(0).isReg() && MI->getOperand(1).isImm();
|
||||
int64_t Offset = MemLoc ? MI->getOperand(1).getImm() : 0;
|
||||
for (unsigned i = 0; i < Expr->getNumElements(); ++i) {
|
||||
uint64_t Op = Expr->getElement(i);
|
||||
if (Op == dwarf::DW_OP_LLVM_fragment) {
|
||||
// There can't be any operands after this in a valid expression
|
||||
break;
|
||||
} else if (Deref) {
|
||||
// We currently don't support extra Offsets or derefs after the first
|
||||
// one. Bail out early instead of emitting an incorrect comment.
|
||||
OS << " [complex expression]";
|
||||
AP.OutStreamer->emitRawComment(OS.str());
|
||||
return true;
|
||||
} else if (Op == dwarf::DW_OP_deref) {
|
||||
Deref = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
uint64_t ExtraOffset = Expr->getElement(i++);
|
||||
if (Op == dwarf::DW_OP_plus)
|
||||
Offset += ExtraOffset;
|
||||
else {
|
||||
assert(Op == dwarf::DW_OP_minus);
|
||||
Offset -= ExtraOffset;
|
||||
const DIExpression *Expr = MI->getDebugExpression();
|
||||
if (Expr->getNumElements()) {
|
||||
OS << '[';
|
||||
bool NeedSep = false;
|
||||
for (auto Op : Expr->expr_ops()) {
|
||||
if (NeedSep)
|
||||
OS << ", ";
|
||||
else
|
||||
NeedSep = true;
|
||||
OS << dwarf::OperationEncodingString(Op.getOp());
|
||||
for (unsigned I = 0; I < Op.getNumArgs(); ++I)
|
||||
OS << ' ' << Op.getArg(I);
|
||||
}
|
||||
OS << "] ";
|
||||
}
|
||||
|
||||
// Register or immediate value. Register 0 means undef.
|
||||
@ -890,7 +874,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
|
||||
const TargetFrameLowering *TFI = AP.MF->getSubtarget().getFrameLowering();
|
||||
Offset += TFI->getFrameIndexReference(*AP.MF,
|
||||
MI->getOperand(0).getIndex(), Reg);
|
||||
Deref = true;
|
||||
MemLoc = true;
|
||||
}
|
||||
if (Reg == 0) {
|
||||
// Suppress offset, it is not meaningful here.
|
||||
@ -899,12 +883,12 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
|
||||
AP.OutStreamer->emitRawComment(OS.str());
|
||||
return true;
|
||||
}
|
||||
if (MemLoc || Deref)
|
||||
if (MemLoc)
|
||||
OS << '[';
|
||||
OS << PrintReg(Reg, AP.MF->getSubtarget().getRegisterInfo());
|
||||
}
|
||||
|
||||
if (MemLoc || Deref)
|
||||
if (MemLoc)
|
||||
OS << '+' << Offset << ']';
|
||||
|
||||
// NOTE: Want this comment at start of line, don't emit with AddComment.
|
||||
@ -936,6 +920,16 @@ void AsmPrinter::emitCFIInstruction(const MachineInstr &MI) {
|
||||
if (needsCFIMoves() == CFI_M_None)
|
||||
return;
|
||||
|
||||
// If there is no "real" instruction following this CFI instruction, skip
|
||||
// emitting it; it would be beyond the end of the function's FDE range.
|
||||
auto *MBB = MI.getParent();
|
||||
auto I = std::next(MI.getIterator());
|
||||
while (I != MBB->end() && I->isTransient())
|
||||
++I;
|
||||
if (I == MBB->instr_end() &&
|
||||
MBB->getReverseIterator() == MBB->getParent()->rbegin())
|
||||
return;
|
||||
|
||||
const std::vector<MCCFIInstruction> &Instrs = MF->getFrameInstructions();
|
||||
unsigned CFIIndex = MI.getOperand(0).getCFIIndex();
|
||||
const MCCFIInstruction &CFI = Instrs[CFIIndex];
|
||||
@ -1046,15 +1040,23 @@ void AsmPrinter::EmitFunctionBody() {
|
||||
// If the function is empty and the object file uses .subsections_via_symbols,
|
||||
// then we need to emit *something* to the function body to prevent the
|
||||
// labels from collapsing together. Just emit a noop.
|
||||
if ((MAI->hasSubsectionsViaSymbols() && !HasAnyRealCode)) {
|
||||
// Similarly, don't emit empty functions on Windows either. It can lead to
|
||||
// duplicate entries (two functions with the same RVA) in the Guard CF Table
|
||||
// after linking, causing the kernel not to load the binary:
|
||||
// https://developercommunity.visualstudio.com/content/problem/45366/vc-linker-creates-invalid-dll-with-clang-cl.html
|
||||
// FIXME: Hide this behind some API in e.g. MCAsmInfo or MCTargetStreamer.
|
||||
const Triple &TT = TM.getTargetTriple();
|
||||
if (!HasAnyRealCode && (MAI->hasSubsectionsViaSymbols() ||
|
||||
(TT.isOSWindows() && TT.isOSBinFormatCOFF()))) {
|
||||
MCInst Noop;
|
||||
MF->getSubtarget().getInstrInfo()->getNoopForMachoTarget(Noop);
|
||||
OutStreamer->AddComment("avoids zero-length function");
|
||||
MF->getSubtarget().getInstrInfo()->getNoop(Noop);
|
||||
|
||||
// Targets can opt-out of emitting the noop here by leaving the opcode
|
||||
// unspecified.
|
||||
if (Noop.getOpcode())
|
||||
if (Noop.getOpcode()) {
|
||||
OutStreamer->AddComment("avoids zero-length function");
|
||||
OutStreamer->EmitInstruction(Noop, getSubtargetInfo());
|
||||
}
|
||||
}
|
||||
|
||||
const Function *F = MF->getFunction();
|
||||
|
@ -144,6 +144,9 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
|
||||
" we don't have an asm parser for this target\n");
|
||||
Parser->setAssemblerDialect(Dialect);
|
||||
Parser->setTargetParser(*TAP.get());
|
||||
if (Dialect == InlineAsm::AD_Intel)
|
||||
// We need this flag to be able to parse numbers like "0bH"
|
||||
Parser->setParsingInlineAsm(true);
|
||||
if (MF) {
|
||||
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
|
||||
TAP->SetFrameRegister(TRI->getFrameRegister(*MF));
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user