Merge llvm, clang, lld, lldb, compiler-rt and libc++ r306325, and update
build glue.
This commit is contained in:
commit
edd7eaddc8
@ -60,7 +60,8 @@ extern int __xray_remove_handler();
|
||||
/// start logging their subsequent affected function calls (if patched).
|
||||
///
|
||||
/// Returns 1 on success, 0 on error.
|
||||
extern int __xray_set_handler_arg1(void (*)(int32_t, XRayEntryType, uint64_t));
|
||||
extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType,
|
||||
uint64_t));
|
||||
|
||||
/// Disables the XRay handler used to log first arguments of function calls.
|
||||
/// Returns 1 on success, 0 on error.
|
||||
|
@ -160,7 +160,7 @@ struct QuarantineCallback {
|
||||
}
|
||||
|
||||
void *Allocate(uptr size) {
|
||||
return get_allocator().Allocate(cache_, size, 1, false);
|
||||
return get_allocator().Allocate(cache_, size, 1);
|
||||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
@ -266,7 +266,8 @@ struct Allocator {
|
||||
}
|
||||
|
||||
void Initialize(const AllocatorOptions &options) {
|
||||
allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
|
||||
SetAllocatorMayReturnNull(options.may_return_null);
|
||||
allocator.Init(options.release_to_os_interval_ms);
|
||||
SharedInitCode(options);
|
||||
}
|
||||
|
||||
@ -302,7 +303,7 @@ struct Allocator {
|
||||
}
|
||||
|
||||
void ReInitialize(const AllocatorOptions &options) {
|
||||
allocator.SetMayReturnNull(options.may_return_null);
|
||||
SetAllocatorMayReturnNull(options.may_return_null);
|
||||
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
|
||||
SharedInitCode(options);
|
||||
|
||||
@ -323,7 +324,7 @@ struct Allocator {
|
||||
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
|
||||
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
|
||||
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
|
||||
options->may_return_null = allocator.MayReturnNull();
|
||||
options->may_return_null = AllocatorMayReturnNull();
|
||||
options->alloc_dealloc_mismatch =
|
||||
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
|
||||
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
|
||||
@ -374,7 +375,7 @@ struct Allocator {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
AsanInitFromRtl();
|
||||
if (RssLimitExceeded())
|
||||
return allocator.ReturnNullOrDieOnOOM();
|
||||
return AsanAllocator::FailureHandler::OnOOM();
|
||||
Flags &fl = *flags();
|
||||
CHECK(stack);
|
||||
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||
@ -407,23 +408,21 @@ struct Allocator {
|
||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||
(void*)size);
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
}
|
||||
|
||||
AsanThread *t = GetCurrentThread();
|
||||
void *allocated;
|
||||
if (t) {
|
||||
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
|
||||
allocated =
|
||||
allocator.Allocate(cache, needed_size, 8, false);
|
||||
allocated = allocator.Allocate(cache, needed_size, 8);
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocated =
|
||||
allocator.Allocate(cache, needed_size, 8, false);
|
||||
allocated = allocator.Allocate(cache, needed_size, 8);
|
||||
}
|
||||
|
||||
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
|
||||
if (!allocated)
|
||||
return nullptr;
|
||||
|
||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||
@ -634,7 +633,7 @@ struct Allocator {
|
||||
|
||||
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
||||
// If the memory comes from the secondary allocator no need to clear it
|
||||
// as it comes directly from mmap.
|
||||
|
@ -204,6 +204,14 @@ class ScopedInErrorReport {
|
||||
error_report_callback(buffer_copy.data());
|
||||
}
|
||||
|
||||
if (halt_on_error_ && common_flags()->abort_on_error) {
|
||||
// On Android the message is truncated to 512 characters.
|
||||
// FIXME: implement "compact" error format, possibly without, or with
|
||||
// highly compressed stack traces?
|
||||
// FIXME: or just use the summary line as abort message?
|
||||
SetAbortMessage(buffer_copy.data());
|
||||
}
|
||||
|
||||
// In halt_on_error = false mode, reset the current error object (before
|
||||
// unlocking).
|
||||
if (!halt_on_error_)
|
||||
|
@ -1,3 +1,12 @@
|
||||
___asan_default_options
|
||||
___asan_default_suppressions
|
||||
___asan_on_error
|
||||
___asan_set_shadow_00
|
||||
___asan_set_shadow_f1
|
||||
___asan_set_shadow_f2
|
||||
___asan_set_shadow_f3
|
||||
___asan_set_shadow_f4
|
||||
___asan_set_shadow_f5
|
||||
___asan_set_shadow_f6
|
||||
___asan_set_shadow_f7
|
||||
___asan_set_shadow_f8
|
||||
|
@ -477,7 +477,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
switch (*(u8*)address) {
|
||||
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
|
||||
// movabs eax, dword ptr ds:[XXXXXXXX]
|
||||
return 8;
|
||||
return 9;
|
||||
}
|
||||
|
||||
switch (*(u16*)address) {
|
||||
@ -495,6 +495,11 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
case 0x5741: // push r15
|
||||
case 0x9066: // Two-byte NOP
|
||||
return 2;
|
||||
|
||||
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
|
||||
if (rel_offset)
|
||||
*rel_offset = 2;
|
||||
return 6;
|
||||
}
|
||||
|
||||
switch (0x00FFFFFF & *(u32*)address) {
|
||||
|
@ -38,6 +38,8 @@
|
||||
GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
|
||||
common_flags()->fast_unwind_on_malloc)
|
||||
|
||||
#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
void InitializeInterceptors();
|
||||
|
@ -38,8 +38,8 @@ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||
static Allocator allocator;
|
||||
|
||||
void InitializeAllocator() {
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator.InitLinkerInitialized(
|
||||
common_flags()->allocator_may_return_null,
|
||||
common_flags()->allocator_release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
|
||||
return nullptr;
|
||||
}
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false);
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
|
||||
// Do not rely on the allocator to clear the memory (it's slow).
|
||||
if (cleared && allocator.FromPrimary(p))
|
||||
memset(p, 0, size);
|
||||
|
@ -79,8 +79,7 @@ void EnableInThisThread() {
|
||||
|
||||
u32 GetCurrentThread() {
|
||||
thread_local_data_t *data = get_tls_val(false);
|
||||
CHECK(data);
|
||||
return data->current_thread_id;
|
||||
return data ? data->current_thread_id : kInvalidTid;
|
||||
}
|
||||
|
||||
void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
|
||||
|
192
contrib/compiler-rt/lib/lsan/lsan_mac.cc
Normal file
192
contrib/compiler-rt/lib/lsan/lsan_mac.cc
Normal file
@ -0,0 +1,192 @@
|
||||
//===-- lsan_mac.cc -------------------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of LeakSanitizer, a memory leak checker.
|
||||
//
|
||||
// Mac-specific details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_MAC
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "lsan.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_thread.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
namespace __lsan {
|
||||
// Support for the following functions from libdispatch on Mac OS:
|
||||
// dispatch_async_f()
|
||||
// dispatch_async()
|
||||
// dispatch_sync_f()
|
||||
// dispatch_sync()
|
||||
// dispatch_after_f()
|
||||
// dispatch_after()
|
||||
// dispatch_group_async_f()
|
||||
// dispatch_group_async()
|
||||
// TODO(glider): libdispatch API contains other functions that we don't support
|
||||
// yet.
|
||||
//
|
||||
// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
|
||||
// they can cause jobs to run on a thread different from the current one.
|
||||
// TODO(glider): if so, we need a test for this (otherwise we should remove
|
||||
// them).
|
||||
//
|
||||
// The following functions use dispatch_barrier_async_f() (which isn't a library
|
||||
// function but is exported) and are thus supported:
|
||||
// dispatch_source_set_cancel_handler_f()
|
||||
// dispatch_source_set_cancel_handler()
|
||||
// dispatch_source_set_event_handler_f()
|
||||
// dispatch_source_set_event_handler()
|
||||
//
|
||||
// The reference manual for Grand Central Dispatch is available at
|
||||
// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
|
||||
// The implementation details are at
|
||||
// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
|
||||
|
||||
typedef void *dispatch_group_t;
|
||||
typedef void *dispatch_queue_t;
|
||||
typedef void *dispatch_source_t;
|
||||
typedef u64 dispatch_time_t;
|
||||
typedef void (*dispatch_function_t)(void *block);
|
||||
typedef void *(*worker_t)(void *block);
|
||||
|
||||
// A wrapper for the ObjC blocks used to support libdispatch.
|
||||
typedef struct {
|
||||
void *block;
|
||||
dispatch_function_t func;
|
||||
u32 parent_tid;
|
||||
} lsan_block_context_t;
|
||||
|
||||
ALWAYS_INLINE
|
||||
void lsan_register_worker_thread(int parent_tid) {
|
||||
if (GetCurrentThread() == kInvalidTid) {
|
||||
u32 tid = ThreadCreate(parent_tid, 0, true);
|
||||
ThreadStart(tid, GetTid());
|
||||
SetCurrentThread(tid);
|
||||
}
|
||||
}
|
||||
|
||||
// For use by only those functions that allocated the context via
|
||||
// alloc_lsan_context().
|
||||
extern "C" void lsan_dispatch_call_block_and_release(void *block) {
|
||||
lsan_block_context_t *context = (lsan_block_context_t *)block;
|
||||
VReport(2,
|
||||
"lsan_dispatch_call_block_and_release(): "
|
||||
"context: %p, pthread_self: %p\n",
|
||||
block, pthread_self());
|
||||
lsan_register_worker_thread(context->parent_tid);
|
||||
// Call the original dispatcher for the block.
|
||||
context->func(context->block);
|
||||
lsan_free(context);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
using namespace __lsan; // NOLINT
|
||||
|
||||
// Wrap |ctxt| and |func| into an lsan_block_context_t.
|
||||
// The caller retains control of the allocated context.
|
||||
extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
|
||||
dispatch_function_t func) {
|
||||
GET_STACK_TRACE_THREAD;
|
||||
lsan_block_context_t *lsan_ctxt =
|
||||
(lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
|
||||
lsan_ctxt->block = ctxt;
|
||||
lsan_ctxt->func = func;
|
||||
lsan_ctxt->parent_tid = GetCurrentThread();
|
||||
return lsan_ctxt;
|
||||
}
|
||||
|
||||
// Define interceptor for dispatch_*_f function with the three most common
|
||||
// parameters: dispatch_queue_t, context, dispatch_function_t.
|
||||
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
|
||||
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
|
||||
dispatch_function_t func) { \
|
||||
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
|
||||
return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
|
||||
lsan_dispatch_call_block_and_release); \
|
||||
}
|
||||
|
||||
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
|
||||
INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
|
||||
INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
|
||||
|
||||
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
|
||||
void *ctxt, dispatch_function_t func) {
|
||||
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
|
||||
return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
|
||||
lsan_dispatch_call_block_and_release);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
|
||||
dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
|
||||
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
|
||||
REAL(dispatch_group_async_f)
|
||||
(group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
|
||||
}
|
||||
|
||||
#if !defined(MISSING_BLOCKS_SUPPORT)
|
||||
extern "C" {
|
||||
void dispatch_async(dispatch_queue_t dq, void (^work)(void));
|
||||
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
|
||||
void (^work)(void));
|
||||
void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
|
||||
void (^work)(void));
|
||||
void dispatch_source_set_cancel_handler(dispatch_source_t ds,
|
||||
void (^work)(void));
|
||||
void dispatch_source_set_event_handler(dispatch_source_t ds,
|
||||
void (^work)(void));
|
||||
}
|
||||
|
||||
#define GET_LSAN_BLOCK(work) \
|
||||
void (^lsan_block)(void); \
|
||||
int parent_tid = GetCurrentThread(); \
|
||||
lsan_block = ^(void) { \
|
||||
lsan_register_worker_thread(parent_tid); \
|
||||
work(); \
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_async)(dq, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
|
||||
dispatch_queue_t dq, void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_group_async)(dg, dq, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
|
||||
void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_after)(when, queue, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
|
||||
void (^work)(void)) {
|
||||
if (!work) {
|
||||
REAL(dispatch_source_set_cancel_handler)(ds, work);
|
||||
return;
|
||||
}
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
|
||||
void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_source_set_event_handler)(ds, lsan_block);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_MAC
|
@ -77,7 +77,7 @@ u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
|
||||
/* arg */ nullptr);
|
||||
}
|
||||
|
||||
void ThreadStart(u32 tid, tid_t os_id) {
|
||||
void ThreadStart(u32 tid, tid_t os_id, bool workerthread) {
|
||||
OnStartedArgs args;
|
||||
uptr stack_size = 0;
|
||||
uptr tls_size = 0;
|
||||
@ -87,7 +87,7 @@ void ThreadStart(u32 tid, tid_t os_id) {
|
||||
args.tls_end = args.tls_begin + tls_size;
|
||||
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
|
||||
args.dtls = DTLS_Get();
|
||||
thread_registry->StartThread(tid, os_id, /*workerthread*/ false, &args);
|
||||
thread_registry->StartThread(tid, os_id, workerthread, &args);
|
||||
}
|
||||
|
||||
void ThreadFinish() {
|
||||
|
@ -45,7 +45,7 @@ class ThreadContext : public ThreadContextBase {
|
||||
|
||||
void InitializeThreadRegistry();
|
||||
|
||||
void ThreadStart(u32 tid, tid_t os_id);
|
||||
void ThreadStart(u32 tid, tid_t os_id, bool workerthread = false);
|
||||
void ThreadFinish();
|
||||
u32 ThreadCreate(u32 tid, uptr uid, bool detached);
|
||||
void ThreadJoin(u32 tid);
|
||||
|
@ -119,9 +119,8 @@ static AllocatorCache fallback_allocator_cache;
|
||||
static SpinMutex fallback_mutex;
|
||||
|
||||
void MsanAllocatorInit() {
|
||||
allocator.Init(
|
||||
common_flags()->allocator_may_return_null,
|
||||
common_flags()->allocator_release_to_os_interval_ms);
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
|
||||
@ -139,17 +138,17 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
|
||||
if (size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
|
||||
(void *)size);
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
}
|
||||
MsanThread *t = GetCurrentThread();
|
||||
void *allocated;
|
||||
if (t) {
|
||||
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
|
||||
allocated = allocator.Allocate(cache, size, alignment, false);
|
||||
allocated = allocator.Allocate(cache, size, alignment);
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocated = allocator.Allocate(cache, size, alignment, false);
|
||||
allocated = allocator.Allocate(cache, size, alignment);
|
||||
}
|
||||
Metadata *meta =
|
||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
|
||||
@ -197,7 +196,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
|
||||
|
||||
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
|
||||
}
|
||||
|
||||
|
@ -94,8 +94,7 @@ InternalAllocator *internal_allocator() {
|
||||
SpinMutexLock l(&internal_alloc_init_mu);
|
||||
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
|
||||
0) {
|
||||
internal_allocator_instance->Init(
|
||||
/* may_return_null */ false, kReleaseToOSIntervalNever);
|
||||
internal_allocator_instance->Init(kReleaseToOSIntervalNever);
|
||||
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
|
||||
}
|
||||
}
|
||||
@ -108,9 +107,9 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
|
||||
if (cache == 0) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Allocate(&internal_allocator_cache, size,
|
||||
alignment, false);
|
||||
alignment);
|
||||
}
|
||||
return internal_allocator()->Allocate(cache, size, alignment, false);
|
||||
return internal_allocator()->Allocate(cache, size, alignment);
|
||||
}
|
||||
|
||||
static void *RawInternalRealloc(void *ptr, uptr size,
|
||||
@ -162,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
||||
|
||||
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
||||
if (CallocShouldReturnNullDueToOverflow(count, size))
|
||||
return internal_allocator()->ReturnNullOrDieOnBadRequest();
|
||||
return InternalAllocator::FailureHandler::OnBadRequest();
|
||||
void *p = InternalAlloc(count * size, cache);
|
||||
if (p) internal_memset(p, 0, count * size);
|
||||
return p;
|
||||
@ -209,12 +208,15 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
|
||||
return (max / size) < n;
|
||||
}
|
||||
|
||||
static atomic_uint8_t reporting_out_of_memory = {0};
|
||||
static atomic_uint8_t allocator_out_of_memory = {0};
|
||||
static atomic_uint8_t allocator_may_return_null = {0};
|
||||
|
||||
bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
|
||||
bool IsAllocatorOutOfMemory() {
|
||||
return atomic_load_relaxed(&allocator_out_of_memory);
|
||||
}
|
||||
|
||||
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
|
||||
if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
|
||||
// Prints error message and kills the program.
|
||||
void NORETURN ReportAllocatorCannotReturnNull() {
|
||||
Report("%s's allocator is terminating the process instead of returning 0\n",
|
||||
SanitizerToolName);
|
||||
Report("If you don't like this behavior set allocator_may_return_null=1\n");
|
||||
@ -222,4 +224,35 @@ void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
|
||||
Die();
|
||||
}
|
||||
|
||||
bool AllocatorMayReturnNull() {
|
||||
return atomic_load(&allocator_may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetAllocatorMayReturnNull(bool may_return_null) {
|
||||
atomic_store(&allocator_may_return_null, may_return_null,
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnFailure::OnBadRequest() {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnFailure::OnOOM() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void *DieOnFailure::OnBadRequest() {
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void *DieOnFailure::OnOOM() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -24,12 +24,28 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Returns true if ReportAllocatorCannotReturnNull(true) was called.
|
||||
// Can be use to avoid memory hungry operations.
|
||||
bool IsReportingOOM();
|
||||
// Since flags are immutable and allocator behavior can be changed at runtime
|
||||
// (unit tests or ASan on Android are some examples), allocator_may_return_null
|
||||
// flag value is cached here and can be altered later.
|
||||
bool AllocatorMayReturnNull();
|
||||
void SetAllocatorMayReturnNull(bool may_return_null);
|
||||
|
||||
// Prints error message and kills the program.
|
||||
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
|
||||
// Allocator failure handling policies:
|
||||
// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
|
||||
// dies otherwise.
|
||||
struct ReturnNullOrDieOnFailure {
|
||||
static void *OnBadRequest();
|
||||
static void *OnOOM();
|
||||
};
|
||||
// Always dies on the failure.
|
||||
struct DieOnFailure {
|
||||
static void *OnBadRequest();
|
||||
static void *OnOOM();
|
||||
};
|
||||
|
||||
// Returns true if allocator detected OOM condition. Can be used to avoid memory
|
||||
// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
|
||||
bool IsAllocatorOutOfMemory();
|
||||
|
||||
// Allocators call these callbacks on mmap/munmap.
|
||||
struct NoOpMapUnmapCallback {
|
||||
|
@ -24,31 +24,26 @@ template <class PrimaryAllocator, class AllocatorCache,
|
||||
class SecondaryAllocator> // NOLINT
|
||||
class CombinedAllocator {
|
||||
public:
|
||||
void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
|
||||
typedef typename SecondaryAllocator::FailureHandler FailureHandler;
|
||||
|
||||
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
|
||||
primary_.Init(release_to_os_interval_ms);
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void InitLinkerInitialized(
|
||||
bool may_return_null, s32 release_to_os_interval_ms) {
|
||||
secondary_.InitLinkerInitialized(may_return_null);
|
||||
secondary_.InitLinkerInitialized();
|
||||
stats_.InitLinkerInitialized();
|
||||
InitCommon(may_return_null, release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null, s32 release_to_os_interval_ms) {
|
||||
secondary_.Init(may_return_null);
|
||||
void Init(s32 release_to_os_interval_ms) {
|
||||
primary_.Init(release_to_os_interval_ms);
|
||||
secondary_.Init();
|
||||
stats_.Init();
|
||||
InitCommon(may_return_null, release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
|
||||
bool cleared = false) {
|
||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
|
||||
// Returning 0 on malloc(0) may break a lot of code.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
if (size + alignment < size)
|
||||
return ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
uptr original_size = size;
|
||||
// If alignment requirements are to be fulfilled by the frontend allocator
|
||||
// rather than by the primary or secondary, passing an alignment lower than
|
||||
@ -56,49 +51,24 @@ class CombinedAllocator {
|
||||
// alignment check.
|
||||
if (alignment > 8)
|
||||
size = RoundUpTo(size, alignment);
|
||||
void *res;
|
||||
bool from_primary = primary_.CanAllocate(size, alignment);
|
||||
// The primary allocator should return a 2^x aligned allocation when
|
||||
// requested 2^x bytes, hence using the rounded up 'size' when being
|
||||
// serviced by the primary (this is no longer true when the primary is
|
||||
// using a non-fixed base address). The secondary takes care of the
|
||||
// alignment without such requirement, and allocating 'size' would use
|
||||
// extraneous memory, so we employ 'original_size'.
|
||||
if (from_primary)
|
||||
void *res;
|
||||
if (primary_.CanAllocate(size, alignment))
|
||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||
else
|
||||
res = secondary_.Allocate(&stats_, original_size, alignment);
|
||||
if (!res)
|
||||
return FailureHandler::OnOOM();
|
||||
if (alignment > 8)
|
||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||
// When serviced by the secondary, the chunk comes from a mmap allocation
|
||||
// and will be zero'd out anyway. We only need to clear our the chunk if
|
||||
// it was serviced by the primary, hence using the rounded up 'size'.
|
||||
if (cleared && res && from_primary)
|
||||
internal_bzero_aligned16(res, RoundUpTo(size, 16));
|
||||
return res;
|
||||
}
|
||||
|
||||
bool MayReturnNull() const {
|
||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (MayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (MayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
secondary_.SetMayReturnNull(may_return_null);
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
s32 ReleaseToOSIntervalMs() const {
|
||||
return primary_.ReleaseToOSIntervalMs();
|
||||
}
|
||||
@ -219,6 +189,5 @@ class CombinedAllocator {
|
||||
PrimaryAllocator primary_;
|
||||
SecondaryAllocator secondary_;
|
||||
AllocatorGlobalStats stats_;
|
||||
atomic_uint8_t may_return_null_;
|
||||
};
|
||||
|
||||
|
@ -47,7 +47,8 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
|
||||
InternalAllocatorCache;
|
||||
|
||||
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
|
||||
LargeMmapAllocator<> > InternalAllocator;
|
||||
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
|
||||
> InternalAllocator;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
|
||||
uptr alignment = 0);
|
||||
|
@ -144,8 +144,10 @@ struct SizeClassAllocator32LocalCache {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(allocator, class_id);
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(allocator, class_id)))
|
||||
return nullptr;
|
||||
}
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
void *res = c->batch[--c->count];
|
||||
PREFETCH(c->batch[c->count - 1]);
|
||||
@ -227,14 +229,17 @@ struct SizeClassAllocator32LocalCache {
|
||||
Deallocate(allocator, batch_class_id, b);
|
||||
}
|
||||
|
||||
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
|
||||
if (UNLIKELY(!b))
|
||||
return false;
|
||||
CHECK_GT(b->Count(), 0);
|
||||
b->CopyToArray(c->batch);
|
||||
c->count = b->Count();
|
||||
DestroyBatch(class_id, allocator, b);
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
|
||||
@ -244,6 +249,10 @@ struct SizeClassAllocator32LocalCache {
|
||||
uptr first_idx_to_drain = c->count - cnt;
|
||||
TransferBatch *b = CreateBatch(
|
||||
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
|
||||
// Failure to allocate a batch while releasing memory is non recoverable.
|
||||
// TODO(alekseys): Figure out how to do it without allocating a new batch.
|
||||
if (UNLIKELY(!b))
|
||||
DieOnFailure::OnOOM();
|
||||
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
|
||||
&c->batch[first_idx_to_drain], cnt);
|
||||
c->count -= cnt;
|
||||
|
@ -24,7 +24,8 @@ template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
|
||||
// be returned by MmapOrDie().
|
||||
//
|
||||
// Region:
|
||||
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
|
||||
// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
|
||||
// kRegionSize).
|
||||
// Since the regions are aligned by kRegionSize, there are exactly
|
||||
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||
// a ByteMap possible_regions to store the size classes of each Region.
|
||||
@ -149,8 +150,9 @@ class SizeClassAllocator32 {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
if (sci->free_list.empty())
|
||||
PopulateFreeList(stat, c, sci, class_id);
|
||||
if (sci->free_list.empty() &&
|
||||
UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
|
||||
return nullptr;
|
||||
CHECK(!sci->free_list.empty());
|
||||
TransferBatch *b = sci->free_list.front();
|
||||
sci->free_list.pop_front();
|
||||
@ -277,8 +279,10 @@ class SizeClassAllocator32 {
|
||||
|
||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
|
||||
"SizeClassAllocator32"));
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
|
||||
kRegionSize, kRegionSize, "SizeClassAllocator32"));
|
||||
if (UNLIKELY(!res))
|
||||
return 0;
|
||||
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||
stat->Add(AllocatorStatMapped, kRegionSize);
|
||||
CHECK_EQ(0U, (res & (kRegionSize - 1)));
|
||||
@ -291,16 +295,20 @@ class SizeClassAllocator32 {
|
||||
return &size_class_info_array[class_id];
|
||||
}
|
||||
|
||||
void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
SizeClassInfo *sci, uptr class_id) {
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr reg = AllocateRegion(stat, class_id);
|
||||
if (UNLIKELY(!reg))
|
||||
return false;
|
||||
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
uptr max_count = TransferBatch::MaxCached(class_id);
|
||||
TransferBatch *b = nullptr;
|
||||
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
|
||||
if (!b) {
|
||||
b = c->CreateBatch(class_id, this, (TransferBatch*)i);
|
||||
if (!b)
|
||||
return false;
|
||||
b->Clear();
|
||||
}
|
||||
b->Add((void*)i);
|
||||
@ -314,6 +322,7 @@ class SizeClassAllocator32 {
|
||||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_back(b);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
ByteMap possible_regions;
|
||||
|
@ -17,17 +17,19 @@
|
||||
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
||||
// The main purpose of this allocator is to cover large and rare allocation
|
||||
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
||||
template <class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||
template <class MapUnmapCallback = NoOpMapUnmapCallback,
|
||||
class FailureHandlerT = ReturnNullOrDieOnFailure>
|
||||
class LargeMmapAllocator {
|
||||
public:
|
||||
void InitLinkerInitialized(bool may_return_null) {
|
||||
typedef FailureHandlerT FailureHandler;
|
||||
|
||||
void InitLinkerInitialized() {
|
||||
page_size_ = GetPageSizeCached();
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null) {
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized(may_return_null);
|
||||
InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
||||
@ -37,11 +39,11 @@ class LargeMmapAllocator {
|
||||
map_size += alignment;
|
||||
// Overflow.
|
||||
if (map_size < size)
|
||||
return ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
uptr map_beg = reinterpret_cast<uptr>(
|
||||
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
|
||||
if (!map_beg)
|
||||
return ReturnNullOrDieOnOOM();
|
||||
return FailureHandler::OnOOM();
|
||||
CHECK(IsAligned(map_beg, page_size_));
|
||||
MapUnmapCallback().OnMap(map_beg, map_size);
|
||||
uptr map_end = map_beg + map_size;
|
||||
@ -75,24 +77,6 @@ class LargeMmapAllocator {
|
||||
return reinterpret_cast<void*>(res);
|
||||
}
|
||||
|
||||
bool MayReturnNull() const {
|
||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (MayReturnNull()) return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (MayReturnNull()) return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorStats *stat, void *p) {
|
||||
Header *h = GetHeader(p);
|
||||
{
|
||||
@ -278,7 +262,6 @@ class LargeMmapAllocator {
|
||||
struct Stats {
|
||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||
} stats;
|
||||
atomic_uint8_t may_return_null_;
|
||||
SpinMutex mutex_;
|
||||
};
|
||||
|
||||
|
@ -71,16 +71,25 @@ INLINE typename T::Type atomic_exchange(volatile T *a,
|
||||
return v;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
INLINE bool atomic_compare_exchange_strong(volatile T *a,
|
||||
typename T::Type *cmp,
|
||||
template <typename T>
|
||||
INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
|
||||
typename T::Type xchg,
|
||||
memory_order mo) {
|
||||
typedef typename T::Type Type;
|
||||
Type cmpv = *cmp;
|
||||
Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
Type prev;
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
if (sizeof(*a) == 8) {
|
||||
Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use);
|
||||
prev = __mips_sync_val_compare_and_swap<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmpv, (u64)xchg);
|
||||
} else {
|
||||
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
}
|
||||
#else
|
||||
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
#endif
|
||||
if (prev == cmpv) return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
@ -17,6 +17,56 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// MIPS32 does not support atomic > 4 bytes. To address this lack of
|
||||
// functionality, the sanitizer library provides helper methods which use an
|
||||
// internal spin lock mechanism to emulate atomic oprations when the size is
|
||||
// 8 bytes.
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
static void __spin_lock(volatile int *lock) {
|
||||
while (__sync_lock_test_and_set(lock, 1))
|
||||
while (*lock) {
|
||||
}
|
||||
}
|
||||
|
||||
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
|
||||
|
||||
|
||||
// Make sure the lock is on its own cache line to prevent false sharing.
|
||||
// Put it inside a struct that is aligned and padded to the typical MIPS
|
||||
// cacheline which is 32 bytes.
|
||||
static struct {
|
||||
int lock;
|
||||
char pad[32 - sizeof(int)];
|
||||
} __attribute__((aligned(32))) lock = {0};
|
||||
|
||||
template <class T>
|
||||
T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
|
||||
T ret;
|
||||
|
||||
__spin_lock(&lock.lock);
|
||||
|
||||
ret = *ptr;
|
||||
*ptr = ret + val;
|
||||
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
|
||||
T ret;
|
||||
__spin_lock(&lock.lock);
|
||||
|
||||
ret = *ptr;
|
||||
if (ret == oldval) *ptr = newval;
|
||||
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
INLINE void proc_yield(int cnt) {
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
}
|
||||
@ -53,8 +103,15 @@ INLINE typename T::Type atomic_load(
|
||||
// 64-bit load on 32-bit platform.
|
||||
// Gross, but simple and reliable.
|
||||
// Assume that it is not in read-only memory.
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
typename T::Type volatile *val_ptr =
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
||||
v = __mips_sync_fetch_and_add<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), 0);
|
||||
#else
|
||||
v = __sync_fetch_and_add(
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
|
||||
#endif
|
||||
}
|
||||
return v;
|
||||
}
|
||||
@ -84,7 +141,14 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
|
||||
typename T::Type cmp = a->val_dont_use;
|
||||
typename T::Type cur;
|
||||
for (;;) {
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
typename T::Type volatile *val_ptr =
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
||||
cur = __mips_sync_val_compare_and_swap<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
|
||||
#else
|
||||
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
|
||||
#endif
|
||||
if (cmp == v)
|
||||
break;
|
||||
cmp = cur;
|
||||
|
@ -95,7 +95,9 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
||||
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||
void *MmapNoAccess(uptr size);
|
||||
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
|
||||
// Dies on all but out of memory errors, in the latter case returns nullptr.
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type);
|
||||
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
|
||||
// unaccessible memory.
|
||||
bool MprotectNoAccess(uptr addr, uptr size);
|
||||
@ -808,8 +810,11 @@ INLINE void LogMessageOnPrintf(const char *str) {}
|
||||
#if SANITIZER_LINUX
|
||||
// Initialize Android logging. Any writes before this are silently lost.
|
||||
void AndroidLogInit();
|
||||
void SetAbortMessage(const char *);
|
||||
#else
|
||||
INLINE void AndroidLogInit() {}
|
||||
// FIXME: MacOS implementation could use CRSetCrashLogMessage.
|
||||
INLINE void SetAbortMessage(const char *) {}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
@ -919,6 +924,10 @@ const s32 kReleaseToOSIntervalNever = -1;
|
||||
|
||||
void CheckNoDeepBind(const char *filename, int flag);
|
||||
|
||||
// Returns the requested amount of random data (up to 256 bytes) that can then
|
||||
// be used to seed a PRNG.
|
||||
bool GetRandom(void *buffer, uptr length);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
|
@ -1604,6 +1604,32 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool GetRandom(void *buffer, uptr length) {
|
||||
if (!buffer || !length || length > 256)
|
||||
return false;
|
||||
#if defined(__NR_getrandom)
|
||||
static atomic_uint8_t skip_getrandom_syscall;
|
||||
if (!atomic_load_relaxed(&skip_getrandom_syscall)) {
|
||||
// Up to 256 bytes, getrandom will not be interrupted.
|
||||
uptr res = internal_syscall(SYSCALL(getrandom), buffer, length, 0);
|
||||
int rverrno = 0;
|
||||
if (internal_iserror(res, &rverrno) && rverrno == ENOSYS)
|
||||
atomic_store_relaxed(&skip_getrandom_syscall, 1);
|
||||
else if (res == length)
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
uptr fd = internal_open("/dev/urandom", O_RDONLY);
|
||||
if (internal_iserror(fd))
|
||||
return false;
|
||||
// internal_read deals with EINTR.
|
||||
uptr res = internal_read(fd, buffer, length);
|
||||
if (internal_iserror(res))
|
||||
return false;
|
||||
internal_close(fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
|
@ -551,6 +551,13 @@ void LogMessageOnPrintf(const char *str) {
|
||||
WriteToSyslog(str);
|
||||
}
|
||||
|
||||
#if SANITIZER_ANDROID && __ANDROID_API__ >= 21
|
||||
extern "C" void android_set_abort_message(const char *msg);
|
||||
void SetAbortMessage(const char *str) { android_set_abort_message(str); }
|
||||
#else
|
||||
void SetAbortMessage(const char *str) {}
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_LINUX
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -923,6 +923,11 @@ void CheckNoDeepBind(const char *filename, int flag) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
// FIXME: implement on this platform.
|
||||
bool GetRandom(void *buffer, uptr length) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_MAC
|
||||
|
@ -164,11 +164,14 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
||||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
// We do it by maping a bit more and then unmaping redundant pieces.
|
||||
// We probably can do it with fewer syscalls in some OS-dependent way.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = size + alignment;
|
||||
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
|
||||
uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
|
||||
if (!map_res)
|
||||
return nullptr;
|
||||
uptr map_end = map_res + map_size;
|
||||
uptr res = map_res;
|
||||
if (res & (alignment - 1)) // Not aligned.
|
||||
|
@ -189,25 +189,7 @@ void UnsetAlternateSignalStack() {
|
||||
|
||||
static void MaybeInstallSigaction(int signum,
|
||||
SignalHandlerType handler) {
|
||||
switch (GetHandleSignalMode(signum)) {
|
||||
case kHandleSignalNo:
|
||||
return;
|
||||
case kHandleSignalYes: {
|
||||
struct sigaction sigact;
|
||||
internal_memset(&sigact, 0, sizeof(sigact));
|
||||
CHECK_EQ(0, internal_sigaction(signum, nullptr, &sigact));
|
||||
if (sigact.sa_flags & SA_SIGINFO) {
|
||||
if (sigact.sa_sigaction) return;
|
||||
} else {
|
||||
if (sigact.sa_handler != SIG_DFL && sigact.sa_handler != SIG_IGN &&
|
||||
sigact.sa_handler != SIG_ERR)
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kHandleSignalExclusive:
|
||||
break;
|
||||
}
|
||||
if (GetHandleSignalMode(signum) == kHandleSignalNo) return;
|
||||
|
||||
struct sigaction sigact;
|
||||
internal_memset(&sigact, 0, sizeof(sigact));
|
||||
|
@ -495,7 +495,7 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
|
||||
VReport(2, "Symbolizer is disabled.\n");
|
||||
return;
|
||||
}
|
||||
if (IsReportingOOM()) {
|
||||
if (IsAllocatorOutOfMemory()) {
|
||||
VReport(2, "Cannot use internal symbolizer: out of memory\n");
|
||||
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
|
||||
VReport(2, "Using internal symbolizer.\n");
|
||||
|
@ -131,18 +131,24 @@ void UnmapOrDie(void *addr, uptr size) {
|
||||
}
|
||||
}
|
||||
|
||||
static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
|
||||
const char *mmap_type) {
|
||||
error_t last_error = GetLastError();
|
||||
if (last_error == ERROR_NOT_ENOUGH_MEMORY)
|
||||
return nullptr;
|
||||
ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
|
||||
}
|
||||
|
||||
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
||||
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
if (rv == 0) {
|
||||
error_t last_error = GetLastError();
|
||||
if (last_error != ERROR_NOT_ENOUGH_MEMORY)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate", last_error);
|
||||
}
|
||||
if (rv == 0)
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
|
||||
return rv;
|
||||
}
|
||||
|
||||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
|
||||
@ -152,7 +158,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
uptr mapped_addr =
|
||||
(uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
if (!mapped_addr)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
// If we got it right on the first try, return. Otherwise, unmap it and go to
|
||||
// the slow path.
|
||||
@ -172,8 +178,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
mapped_addr =
|
||||
(uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (!mapped_addr)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
|
||||
GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
// Find the aligned address.
|
||||
uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
|
||||
@ -191,7 +196,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
|
||||
// Fail if we can't make this work quickly.
|
||||
if (retries == kMaxRetries && mapped_addr == 0)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
return (void *)mapped_addr;
|
||||
}
|
||||
@ -1002,6 +1007,11 @@ void CheckNoDeepBind(const char *filename, int flag) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
// FIXME: implement on this platform.
|
||||
bool GetRandom(void *buffer, uptr length) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // _WIN32
|
||||
|
@ -273,6 +273,8 @@ struct ScudoAllocator {
|
||||
static const uptr MaxAllowedMallocSize =
|
||||
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
|
||||
|
||||
typedef ReturnNullOrDieOnFailure FailureHandler;
|
||||
|
||||
ScudoBackendAllocator BackendAllocator;
|
||||
ScudoQuarantine AllocatorQuarantine;
|
||||
|
||||
@ -326,7 +328,8 @@ struct ScudoAllocator {
|
||||
DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
|
||||
DeleteSizeMismatch = Options.DeleteSizeMismatch;
|
||||
ZeroContents = Options.ZeroContents;
|
||||
BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
|
||||
SetAllocatorMayReturnNull(Options.MayReturnNull);
|
||||
BackendAllocator.Init(Options.ReleaseToOSIntervalMs);
|
||||
AllocatorQuarantine.Init(
|
||||
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
|
||||
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
|
||||
@ -354,11 +357,11 @@ struct ScudoAllocator {
|
||||
dieWithMessage("ERROR: alignment is not a power of 2\n");
|
||||
}
|
||||
if (Alignment > MaxAlignment)
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
if (Alignment < MinAlignment)
|
||||
Alignment = MinAlignment;
|
||||
if (Size >= MaxAllowedMallocSize)
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
if (Size == 0)
|
||||
Size = 1;
|
||||
|
||||
@ -366,7 +369,7 @@ struct ScudoAllocator {
|
||||
uptr AlignedSize = (Alignment > MinAlignment) ?
|
||||
NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
|
||||
if (AlignedSize >= MaxAllowedMallocSize)
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
|
||||
// Primary and Secondary backed allocations have a different treatment. We
|
||||
// deal with alignment requirements of Primary serviced allocations here,
|
||||
@ -391,7 +394,7 @@ struct ScudoAllocator {
|
||||
AllocationAlignment, FromPrimary);
|
||||
}
|
||||
if (!Ptr)
|
||||
return BackendAllocator.ReturnNullOrDieOnOOM();
|
||||
return FailureHandler::OnOOM();
|
||||
|
||||
// If requested, we will zero out the entire contents of the returned chunk.
|
||||
if ((ForceZeroContents || ZeroContents) && FromPrimary)
|
||||
@ -583,7 +586,7 @@ struct ScudoAllocator {
|
||||
initThreadMaybe();
|
||||
uptr Total = NMemB * Size;
|
||||
if (Size != 0 && Total / Size != NMemB) // Overflow check
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
return allocate(Total, MinAlignment, FromMalloc, true);
|
||||
}
|
||||
|
||||
|
@ -23,11 +23,10 @@ template <class PrimaryAllocator, class AllocatorCache,
|
||||
class SecondaryAllocator>
|
||||
class ScudoCombinedAllocator {
|
||||
public:
|
||||
void Init(bool AllocatorMayReturnNull, s32 ReleaseToOSIntervalMs) {
|
||||
void Init(s32 ReleaseToOSIntervalMs) {
|
||||
Primary.Init(ReleaseToOSIntervalMs);
|
||||
Secondary.Init(AllocatorMayReturnNull);
|
||||
Secondary.Init();
|
||||
Stats.Init();
|
||||
atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment,
|
||||
@ -37,18 +36,6 @@ class ScudoCombinedAllocator {
|
||||
return Secondary.Allocate(&Stats, Size, Alignment);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (atomic_load_relaxed(&MayReturnNull))
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (atomic_load_relaxed(&MayReturnNull))
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) {
|
||||
if (FromPrimary)
|
||||
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
|
||||
@ -78,7 +65,6 @@ class ScudoCombinedAllocator {
|
||||
PrimaryAllocator Primary;
|
||||
SecondaryAllocator Secondary;
|
||||
AllocatorGlobalStats Stats;
|
||||
atomic_uint8_t MayReturnNull;
|
||||
};
|
||||
|
||||
#endif // SCUDO_ALLOCATOR_COMBINED_H_
|
||||
|
@ -24,9 +24,8 @@
|
||||
class ScudoLargeMmapAllocator {
|
||||
public:
|
||||
|
||||
void Init(bool AllocatorMayReturnNull) {
|
||||
void Init() {
|
||||
PageSize = GetPageSizeCached();
|
||||
atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
||||
@ -42,7 +41,7 @@ class ScudoLargeMmapAllocator {
|
||||
|
||||
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
|
||||
if (MapBeg == ~static_cast<uptr>(0))
|
||||
return ReturnNullOrDieOnOOM();
|
||||
return ReturnNullOrDieOnFailure::OnOOM();
|
||||
// A page-aligned pointer is assumed after that, so check it now.
|
||||
CHECK(IsAligned(MapBeg, PageSize));
|
||||
uptr MapEnd = MapBeg + MapSize;
|
||||
@ -96,12 +95,6 @@ class ScudoLargeMmapAllocator {
|
||||
return reinterpret_cast<void *>(Ptr);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (atomic_load_relaxed(&MayReturnNull))
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorStats *Stats, void *Ptr) {
|
||||
SecondaryHeader *Header = getHeader(Ptr);
|
||||
{
|
||||
@ -140,7 +133,6 @@ class ScudoLargeMmapAllocator {
|
||||
const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
|
||||
uptr PageSize;
|
||||
SpinMutex StatsMutex;
|
||||
atomic_uint8_t MayReturnNull;
|
||||
};
|
||||
|
||||
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
|
||||
|
@ -112,9 +112,8 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
|
||||
}
|
||||
|
||||
void InitializeAllocator() {
|
||||
allocator()->Init(
|
||||
common_flags()->allocator_may_return_null,
|
||||
common_flags()->allocator_release_to_os_interval_ms);
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void InitializeAllocatorLate() {
|
||||
@ -151,7 +150,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
|
||||
|
||||
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
|
||||
return allocator()->ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
|
||||
if (p == 0)
|
||||
return 0;
|
||||
@ -164,7 +163,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||
|
||||
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, n))
|
||||
return allocator()->ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
void *p = user_alloc(thr, pc, n * size);
|
||||
if (p)
|
||||
internal_memset(p, 0, n * size);
|
||||
|
@ -473,9 +473,12 @@ void __ubsan::__ubsan_handle_function_type_mismatch_abort(
|
||||
Die();
|
||||
}
|
||||
|
||||
static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts,
|
||||
bool IsAttr) {
|
||||
SourceLocation Loc = Data->Loc.acquire();
|
||||
static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr,
|
||||
ReportOptions Opts, bool IsAttr) {
|
||||
if (!LocPtr)
|
||||
UNREACHABLE("source location pointer is null!");
|
||||
|
||||
SourceLocation Loc = LocPtr->acquire();
|
||||
ErrorType ET = ErrorType::InvalidNullReturn;
|
||||
|
||||
if (ignoreReport(Loc, Opts, ET))
|
||||
@ -491,25 +494,29 @@ static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts,
|
||||
: "_Nonnull return type annotation");
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nonnull_return_v1(NonNullReturnData *Data,
|
||||
SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(false);
|
||||
handleNonNullReturn(Data, Opts, true);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, true);
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nonnull_return_v1_abort(NonNullReturnData *Data,
|
||||
SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(true);
|
||||
handleNonNullReturn(Data, Opts, true);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, true);
|
||||
Die();
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nullability_return(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nullability_return_v1(NonNullReturnData *Data,
|
||||
SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(false);
|
||||
handleNonNullReturn(Data, Opts, false);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, false);
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nullability_return_abort(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nullability_return_v1_abort(
|
||||
NonNullReturnData *Data, SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(true);
|
||||
handleNonNullReturn(Data, Opts, false);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, false);
|
||||
Die();
|
||||
}
|
||||
|
||||
|
@ -132,14 +132,13 @@ RECOVERABLE(function_type_mismatch,
|
||||
ValueHandle Val)
|
||||
|
||||
struct NonNullReturnData {
|
||||
SourceLocation Loc;
|
||||
SourceLocation AttrLoc;
|
||||
};
|
||||
|
||||
/// \brief Handle returning null from function with the returns_nonnull
|
||||
/// attribute, or a return type annotated with _Nonnull.
|
||||
RECOVERABLE(nonnull_return, NonNullReturnData *Data)
|
||||
RECOVERABLE(nullability_return, NonNullReturnData *Data)
|
||||
RECOVERABLE(nonnull_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
|
||||
RECOVERABLE(nullability_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
|
||||
|
||||
struct NonNullArgData {
|
||||
SourceLocation Loc;
|
||||
|
@ -28,12 +28,12 @@ INTERFACE_FUNCTION(__ubsan_handle_negate_overflow)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_negate_overflow_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_arg)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_arg_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow)
|
||||
|
@ -312,7 +312,7 @@ __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
|
||||
return patchFunction(FuncId, false);
|
||||
}
|
||||
|
||||
int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t)) {
|
||||
int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
|
||||
if (!__sanitizer::atomic_load(&XRayInitialized,
|
||||
__sanitizer::memory_order_acquire))
|
||||
return 0;
|
||||
@ -320,7 +320,7 @@ int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t)) {
|
||||
// A relaxed write might not be visible even if the current thread gets
|
||||
// scheduled on a different CPU/NUMA node. We need to wait for everyone to
|
||||
// have this handler installed for consistency of collected data across CPUs.
|
||||
__sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(Handler),
|
||||
__sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
|
||||
__sanitizer::memory_order_release);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1154,6 +1154,7 @@ _LIBCPP_FUNC_VIS extern "C" void __sanitizer_annotate_contiguous_container(
|
||||
__attribute__((availability(watchos,strict,introduced=3.0)))
|
||||
#define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS __attribute__((unavailable))
|
||||
#define _LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH __attribute__((unavailable))
|
||||
#define _LIBCPP_AVAILABILITY_BAD_ANY_CAST __attribute__((unavailable))
|
||||
#define _LIBCPP_AVAILABILITY_UNCAUGHT_EXCEPTIONS \
|
||||
__attribute__((availability(macosx,strict,introduced=10.12))) \
|
||||
__attribute__((availability(ios,strict,introduced=10.0))) \
|
||||
@ -1175,25 +1176,35 @@ _LIBCPP_FUNC_VIS extern "C" void __sanitizer_annotate_contiguous_container(
|
||||
#define _LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR \
|
||||
__attribute__((availability(macosx,strict,introduced=10.9))) \
|
||||
__attribute__((availability(ios,strict,introduced=7.0)))
|
||||
#define _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION \
|
||||
__attribute__((availability(macosx,strict,introduced=10.13))) \
|
||||
__attribute__((availability(ios,strict,introduced=11.0))) \
|
||||
__attribute__((availability(tvos,strict,introduced=11.0))) \
|
||||
__attribute__((availability(watchos,strict,introduced=4.0)))
|
||||
#else
|
||||
#define _LIBCPP_AVAILABILITY_SHARED_MUTEX
|
||||
#define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS
|
||||
#define _LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH
|
||||
#define _LIBCPP_AVAILABILITY_BAD_ANY_CAST
|
||||
#define _LIBCPP_AVAILABILITY_UNCAUGHT_EXCEPTIONS
|
||||
#define _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE
|
||||
#define _LIBCPP_AVAILABILITY_FUTURE_ERROR
|
||||
#define _LIBCPP_AVAILABILITY_TYPEINFO_VTABLE
|
||||
#define _LIBCPP_AVAILABILITY_LOCALE_CATEGORY
|
||||
#define _LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR
|
||||
#define _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION
|
||||
#endif
|
||||
|
||||
// Define availability that depends on _LIBCPP_NO_EXCEPTIONS.
|
||||
#ifdef _LIBCPP_NO_EXCEPTIONS
|
||||
#define _LIBCPP_AVAILABILITY_DYNARRAY
|
||||
#define _LIBCPP_AVAILABILITY_FUTURE
|
||||
#define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST
|
||||
#else
|
||||
#define _LIBCPP_AVAILABILITY_DYNARRAY _LIBCPP_AVAILABILITY_BAD_ARRAY_LENGTH
|
||||
#define _LIBCPP_AVAILABILITY_FUTURE _LIBCPP_AVAILABILITY_FUTURE_ERROR
|
||||
#define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST \
|
||||
_LIBCPP_AVAILABILITY_BAD_ANY_CAST
|
||||
#endif
|
||||
|
||||
// Availability of stream API in the dylib got dropped and re-added. The
|
||||
@ -1202,9 +1213,9 @@ _LIBCPP_FUNC_VIS extern "C" void __sanitizer_annotate_contiguous_container(
|
||||
// availability(ios,introduced=7.0)
|
||||
#if defined(_LIBCPP_USE_AVAILABILITY_APPLE) && \
|
||||
((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
|
||||
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ <= 1090) || \
|
||||
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1090) || \
|
||||
(defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
|
||||
__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ <= 70000))
|
||||
__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 70000))
|
||||
#define _LIBCPP_AVAILABILITY_NO_STREAMS_EXTERN_TEMPLATE
|
||||
#endif
|
||||
|
||||
|
@ -89,7 +89,7 @@ inline namespace fundamentals_v1 {
|
||||
|
||||
_LIBCPP_BEGIN_NAMESPACE_LFTS
|
||||
|
||||
class _LIBCPP_EXCEPTION_ABI bad_any_cast : public bad_cast
|
||||
class _LIBCPP_EXCEPTION_ABI _LIBCPP_AVAILABILITY_BAD_ANY_CAST bad_any_cast : public bad_cast
|
||||
{
|
||||
public:
|
||||
virtual const char* what() const _NOEXCEPT;
|
||||
@ -98,6 +98,7 @@ public:
|
||||
#if _LIBCPP_STD_VER > 11 // C++ > 11
|
||||
|
||||
_LIBCPP_NORETURN inline _LIBCPP_ALWAYS_INLINE
|
||||
_LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST
|
||||
void __throw_bad_any_cast()
|
||||
{
|
||||
#ifndef _LIBCPP_NO_EXCEPTIONS
|
||||
@ -506,7 +507,7 @@ void swap(any & __lhs, any & __rhs) _NOEXCEPT
|
||||
}
|
||||
|
||||
template <class _ValueType>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST
|
||||
_ValueType any_cast(any const & __v)
|
||||
{
|
||||
static_assert(
|
||||
@ -522,7 +523,7 @@ _ValueType any_cast(any const & __v)
|
||||
}
|
||||
|
||||
template <class _ValueType>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST
|
||||
_ValueType any_cast(any & __v)
|
||||
{
|
||||
static_assert(
|
||||
@ -537,7 +538,7 @@ _ValueType any_cast(any & __v)
|
||||
}
|
||||
|
||||
template <class _ValueType>
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_LIBCPP_INLINE_VISIBILITY _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST
|
||||
_ValueType any_cast(any && __v)
|
||||
{
|
||||
static_assert(
|
||||
|
@ -193,20 +193,20 @@ _LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE void operato
|
||||
#endif
|
||||
|
||||
#ifndef _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new(std::size_t __sz, std::align_val_t) _THROW_BAD_ALLOC;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new(std::size_t __sz, std::align_val_t, const std::nothrow_t&) _NOEXCEPT _NOALIAS;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p, std::align_val_t) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete(void* __p, std::align_val_t, const std::nothrow_t&) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void* operator new(std::size_t __sz, std::align_val_t) _THROW_BAD_ALLOC;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void* operator new(std::size_t __sz, std::align_val_t, const std::nothrow_t&) _NOEXCEPT _NOALIAS;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void operator delete(void* __p, std::align_val_t) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void operator delete(void* __p, std::align_val_t, const std::nothrow_t&) _NOEXCEPT;
|
||||
#ifndef _LIBCPP_HAS_NO_SIZED_DEALLOCATION
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE void operator delete(void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void operator delete(void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
|
||||
#endif
|
||||
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new[](std::size_t __sz, std::align_val_t) _THROW_BAD_ALLOC;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void* operator new[](std::size_t __sz, std::align_val_t, const std::nothrow_t&) _NOEXCEPT _NOALIAS;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p, std::align_val_t) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS void operator delete[](void* __p, std::align_val_t, const std::nothrow_t&) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void* operator new[](std::size_t __sz, std::align_val_t) _THROW_BAD_ALLOC;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void* operator new[](std::size_t __sz, std::align_val_t, const std::nothrow_t&) _NOEXCEPT _NOALIAS;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void operator delete[](void* __p, std::align_val_t) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void operator delete[](void* __p, std::align_val_t, const std::nothrow_t&) _NOEXCEPT;
|
||||
#ifndef _LIBCPP_HAS_NO_SIZED_DEALLOCATION
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE void operator delete[](void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
|
||||
_LIBCPP_OVERRIDABLE_FUNC_VIS _LIBCPP_AVAILABILITY_ALIGNED_ALLOCATION void operator delete[](void* __p, std::size_t __sz, std::align_val_t) _NOEXCEPT;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -81,6 +81,20 @@ template<class InputIterator, class OutputIterator, class T, class BinaryOperati
|
||||
exclusive_scan(InputIterator first, InputIterator last,
|
||||
OutputIterator result, T init, BinaryOperation binary_op); // C++17
|
||||
|
||||
template<class InputIterator, class OutputIterator>
|
||||
OutputIterator
|
||||
inclusive_scan(InputIterator first, InputIterator last, OutputIterator result); // C++17
|
||||
|
||||
template<class InputIterator, class OutputIterator, class BinaryOperation>
|
||||
OutputIterator
|
||||
inclusive_scan(InputIterator first, InputIterator last,
|
||||
OutputIterator result, BinaryOperation binary_op); // C++17
|
||||
|
||||
template<class InputIterator, class OutputIterator, class BinaryOperation, class T>
|
||||
OutputIterator
|
||||
inclusive_scan(InputIterator first, InputIterator last,
|
||||
OutputIterator result, BinaryOperation binary_op, T init); // C++17
|
||||
|
||||
template<class InputIterator, class OutputIterator, class T,
|
||||
class BinaryOperation, class UnaryOperation>
|
||||
OutputIterator
|
||||
@ -88,6 +102,21 @@ template<class InputIterator, class OutputIterator, class T,
|
||||
OutputIterator result, T init,
|
||||
BinaryOperation binary_op, UnaryOperation unary_op); // C++17
|
||||
|
||||
template<class InputIterator, class OutputIterator,
|
||||
class BinaryOperation, class UnaryOperation>
|
||||
OutputIterator
|
||||
transform_inclusive_scan(InputIterator first, InputIterator last,
|
||||
OutputIterator result,
|
||||
BinaryOperation binary_op, UnaryOperation unary_op); // C++17
|
||||
|
||||
template<class InputIterator, class OutputIterator,
|
||||
class BinaryOperation, class UnaryOperation, class T>
|
||||
OutputIterator
|
||||
transform_inclusive_scan(InputIterator first, InputIterator last,
|
||||
OutputIterator result,
|
||||
BinaryOperation binary_op, UnaryOperation unary_op,
|
||||
T init); // C++17
|
||||
|
||||
template <class InputIterator, class OutputIterator>
|
||||
OutputIterator
|
||||
adjacent_difference(InputIterator first, InputIterator last, OutputIterator result);
|
||||
@ -295,6 +324,38 @@ exclusive_scan(_InputIterator __first, _InputIterator __last,
|
||||
return _VSTD::exclusive_scan(__first, __last, __result, __init, _VSTD::plus<>());
|
||||
}
|
||||
|
||||
template <class _InputIterator, class _OutputIterator, class _Tp, class _BinaryOp>
|
||||
_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last,
|
||||
_OutputIterator __result, _BinaryOp __b, _Tp __init)
|
||||
{
|
||||
for (; __first != __last; ++__first, (void) ++__result) {
|
||||
__init = __b(__init, *__first);
|
||||
*__result = __init;
|
||||
}
|
||||
return __result;
|
||||
}
|
||||
|
||||
template <class _InputIterator, class _OutputIterator, class _BinaryOp>
|
||||
_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last,
|
||||
_OutputIterator __result, _BinaryOp __b)
|
||||
{
|
||||
if (__first != __last) {
|
||||
typename std::iterator_traits<_InputIterator>::value_type __init = *__first;
|
||||
*__result++ = __init;
|
||||
if (++__first != __last)
|
||||
return _VSTD::inclusive_scan(__first, __last, __result, __b, __init);
|
||||
}
|
||||
|
||||
return __result;
|
||||
}
|
||||
|
||||
template <class _InputIterator, class _OutputIterator>
|
||||
_OutputIterator inclusive_scan(_InputIterator __first, _InputIterator __last,
|
||||
_OutputIterator __result)
|
||||
{
|
||||
return _VSTD::inclusive_scan(__first, __last, __result, std::plus<>());
|
||||
}
|
||||
|
||||
template <class _InputIterator, class _OutputIterator, class _Tp,
|
||||
class _BinaryOp, class _UnaryOp>
|
||||
inline _LIBCPP_INLINE_VISIBILITY
|
||||
@ -316,6 +377,32 @@ transform_exclusive_scan(_InputIterator __first, _InputIterator __last,
|
||||
}
|
||||
return __result;
|
||||
}
|
||||
|
||||
template <class _InputIterator, class _OutputIterator, class _Tp, class _BinaryOp, class _UnaryOp>
|
||||
_OutputIterator transform_inclusive_scan(_InputIterator __first, _InputIterator __last,
|
||||
_OutputIterator __result, _BinaryOp __b, _UnaryOp __u, _Tp __init)
|
||||
{
|
||||
for (; __first != __last; ++__first, (void) ++__result) {
|
||||
__init = __b(__init, __u(*__first));
|
||||
*__result = __init;
|
||||
}
|
||||
|
||||
return __result;
|
||||
}
|
||||
|
||||
template <class _InputIterator, class _OutputIterator, class _BinaryOp, class _UnaryOp>
|
||||
_OutputIterator transform_inclusive_scan(_InputIterator __first, _InputIterator __last,
|
||||
_OutputIterator __result, _BinaryOp __b, _UnaryOp __u)
|
||||
{
|
||||
if (__first != __last) {
|
||||
typename std::iterator_traits<_InputIterator>::value_type __init = __u(*__first);
|
||||
*__result++ = __init;
|
||||
if (++__first != __last)
|
||||
return _VSTD::transform_inclusive_scan(__first, __last, __result, __b, __u, __init);
|
||||
}
|
||||
|
||||
return __result;
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class _InputIterator, class _OutputIterator>
|
||||
|
@ -1116,6 +1116,8 @@ public:
|
||||
template <
|
||||
class _Arg,
|
||||
enable_if_t<!is_same_v<decay_t<_Arg>, variant>, int> = 0,
|
||||
enable_if_t<!__is_inplace_type<decay_t<_Arg>>::value, int> = 0,
|
||||
enable_if_t<!__is_inplace_index<decay_t<_Arg>>::value, int> = 0,
|
||||
class _Tp = __variant_detail::__best_match_t<_Arg, _Types...>,
|
||||
size_t _Ip =
|
||||
__find_detail::__find_unambiguous_index_sfinae<_Tp, _Types...>::value,
|
||||
|
@ -261,7 +261,8 @@ struct PathParser {
|
||||
string_view_pair separate_filename(string_view_t const & s) {
|
||||
if (s == "." || s == ".." || s.empty()) return string_view_pair{s, ""};
|
||||
auto pos = s.find_last_of('.');
|
||||
if (pos == string_view_t::npos) return string_view_pair{s, string_view{}};
|
||||
if (pos == string_view_t::npos)
|
||||
return string_view_pair{s, string_view_t{}};
|
||||
return string_view_pair{s.substr(0, pos), s.substr(pos)};
|
||||
}
|
||||
|
||||
@ -396,7 +397,7 @@ int path::__compare(string_view_t __s) const {
|
||||
size_t hash_value(const path& __p) noexcept {
|
||||
auto PP = PathParser::CreateBegin(__p.native());
|
||||
size_t hash_value = 0;
|
||||
std::hash<string_view> hasher;
|
||||
std::hash<string_view_t> hasher;
|
||||
while (PP) {
|
||||
hash_value = __hash_combine(hash_value, hasher(*PP));
|
||||
++PP;
|
||||
|
@ -29,6 +29,8 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct LLVMOpaqueSharedModule *LLVMSharedModuleRef;
|
||||
typedef struct LLVMOpaqueSharedObjectBuffer *LLVMSharedObjectBufferRef;
|
||||
typedef struct LLVMOrcOpaqueJITStack *LLVMOrcJITStackRef;
|
||||
typedef uint32_t LLVMOrcModuleHandle;
|
||||
typedef uint64_t LLVMOrcTargetAddress;
|
||||
@ -38,6 +40,45 @@ typedef uint64_t (*LLVMOrcLazyCompileCallbackFn)(LLVMOrcJITStackRef JITStack,
|
||||
|
||||
typedef enum { LLVMOrcErrSuccess = 0, LLVMOrcErrGeneric } LLVMOrcErrorCode;
|
||||
|
||||
/**
|
||||
* Turn an LLVMModuleRef into an LLVMSharedModuleRef.
|
||||
*
|
||||
* The JIT uses shared ownership for LLVM modules, since it is generally
|
||||
* difficult to know when the JIT will be finished with a module (and the JIT
|
||||
* has no way of knowing when a user may be finished with one).
|
||||
*
|
||||
* Calling this method with an LLVMModuleRef creates a shared-pointer to the
|
||||
* module, and returns a reference to this shared pointer.
|
||||
*
|
||||
* The shared module should be disposed when finished with by calling
|
||||
* LLVMOrcDisposeSharedModule (not LLVMDisposeModule). The Module will be
|
||||
* deleted when the last shared pointer owner relinquishes it.
|
||||
*/
|
||||
|
||||
LLVMSharedModuleRef LLVMOrcMakeSharedModule(LLVMModuleRef Mod);
|
||||
|
||||
/**
|
||||
* Dispose of a shared module.
|
||||
*
|
||||
* The module should not be accessed after this call. The module will be
|
||||
* deleted once all clients (including the JIT itself) have released their
|
||||
* shared pointers.
|
||||
*/
|
||||
|
||||
void LLVMOrcDisposeSharedModuleRef(LLVMSharedModuleRef SharedMod);
|
||||
|
||||
/**
|
||||
* Get an LLVMSharedObjectBufferRef from an LLVMMemoryBufferRef.
|
||||
*/
|
||||
LLVMSharedObjectBufferRef
|
||||
LLVMOrcMakeSharedObjectBuffer(LLVMMemoryBufferRef ObjBuffer);
|
||||
|
||||
/**
|
||||
* Dispose of a shared object buffer.
|
||||
*/
|
||||
void
|
||||
LLVMOrcDisposeSharedObjectBufferRef(LLVMSharedObjectBufferRef SharedObjBuffer);
|
||||
|
||||
/**
|
||||
* Create an ORC JIT stack.
|
||||
*
|
||||
@ -95,7 +136,8 @@ LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
|
||||
* Add module to be eagerly compiled.
|
||||
*/
|
||||
LLVMOrcModuleHandle
|
||||
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
|
||||
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
|
||||
LLVMSharedModuleRef Mod,
|
||||
LLVMOrcSymbolResolverFn SymbolResolver,
|
||||
void *SymbolResolverCtx);
|
||||
|
||||
@ -103,7 +145,8 @@ LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
|
||||
* Add module to be lazily compiled one function at a time.
|
||||
*/
|
||||
LLVMOrcModuleHandle
|
||||
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
|
||||
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
|
||||
LLVMSharedModuleRef Mod,
|
||||
LLVMOrcSymbolResolverFn SymbolResolver,
|
||||
void *SymbolResolverCtx);
|
||||
|
||||
@ -111,7 +154,7 @@ LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
|
||||
* Add an object file.
|
||||
*/
|
||||
LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
|
||||
LLVMObjectFileRef Obj,
|
||||
LLVMSharedObjectBufferRef Obj,
|
||||
LLVMOrcSymbolResolverFn SymbolResolver,
|
||||
void *SymbolResolverCtx);
|
||||
|
||||
|
@ -140,8 +140,8 @@ enum lostFraction { // Example of truncated bits:
|
||||
// implementation classes. This struct should not define any non-static data
|
||||
// members.
|
||||
struct APFloatBase {
|
||||
// TODO remove this and use APInt typedef directly.
|
||||
typedef APInt::WordType integerPart;
|
||||
static const unsigned integerPartWidth = APInt::APINT_BITS_PER_WORD;
|
||||
|
||||
/// A signed type to represent a floating point numbers unbiased exponent.
|
||||
typedef signed short ExponentType;
|
||||
|
@ -213,6 +213,12 @@ class LLVM_NODISCARD APInt {
|
||||
/// out-of-line slow case for countLeadingZeros
|
||||
unsigned countLeadingZerosSlowCase() const LLVM_READONLY;
|
||||
|
||||
/// out-of-line slow case for countLeadingOnes.
|
||||
unsigned countLeadingOnesSlowCase() const LLVM_READONLY;
|
||||
|
||||
/// out-of-line slow case for countTrailingZeros.
|
||||
unsigned countTrailingZerosSlowCase() const LLVM_READONLY;
|
||||
|
||||
/// out-of-line slow case for countTrailingOnes
|
||||
unsigned countTrailingOnesSlowCase() const LLVM_READONLY;
|
||||
|
||||
@ -383,7 +389,7 @@ class LLVM_NODISCARD APInt {
|
||||
bool isAllOnesValue() const {
|
||||
if (isSingleWord())
|
||||
return U.VAL == WORD_MAX >> (APINT_BITS_PER_WORD - BitWidth);
|
||||
return countPopulationSlowCase() == BitWidth;
|
||||
return countTrailingOnesSlowCase() == BitWidth;
|
||||
}
|
||||
|
||||
/// \brief Determine if all bits are clear
|
||||
@ -408,7 +414,9 @@ class LLVM_NODISCARD APInt {
|
||||
/// This checks to see if the value of this APInt is the maximum signed
|
||||
/// value for the APInt's bit width.
|
||||
bool isMaxSignedValue() const {
|
||||
return !isNegative() && countPopulation() == BitWidth - 1;
|
||||
if (isSingleWord())
|
||||
return U.VAL == ((WordType(1) << (BitWidth - 1)) - 1);
|
||||
return !isNegative() && countTrailingOnesSlowCase() == BitWidth - 1;
|
||||
}
|
||||
|
||||
/// \brief Determine if this is the smallest unsigned value.
|
||||
@ -422,7 +430,9 @@ class LLVM_NODISCARD APInt {
|
||||
/// This checks to see if the value of this APInt is the minimum signed
|
||||
/// value for the APInt's bit width.
|
||||
bool isMinSignedValue() const {
|
||||
return isNegative() && isPowerOf2();
|
||||
if (isSingleWord())
|
||||
return U.VAL == (WordType(1) << (BitWidth - 1));
|
||||
return isNegative() && countTrailingZerosSlowCase() == BitWidth - 1;
|
||||
}
|
||||
|
||||
/// \brief Check if this APInt has an N-bits unsigned integer value.
|
||||
@ -1574,7 +1584,11 @@ class LLVM_NODISCARD APInt {
|
||||
///
|
||||
/// \returns 0 if the high order bit is not set, otherwise returns the number
|
||||
/// of 1 bits from the most significant to the least
|
||||
unsigned countLeadingOnes() const LLVM_READONLY;
|
||||
unsigned countLeadingOnes() const {
|
||||
if (isSingleWord())
|
||||
return llvm::countLeadingOnes(U.VAL << (APINT_BITS_PER_WORD - BitWidth));
|
||||
return countLeadingOnesSlowCase();
|
||||
}
|
||||
|
||||
/// Computes the number of leading bits of this APInt that are equal to its
|
||||
/// sign bit.
|
||||
@ -1590,7 +1604,11 @@ class LLVM_NODISCARD APInt {
|
||||
///
|
||||
/// \returns BitWidth if the value is zero, otherwise returns the number of
|
||||
/// zeros from the least significant bit to the first one bit.
|
||||
unsigned countTrailingZeros() const LLVM_READONLY;
|
||||
unsigned countTrailingZeros() const {
|
||||
if (isSingleWord())
|
||||
return std::min(unsigned(llvm::countTrailingZeros(U.VAL)), BitWidth);
|
||||
return countTrailingZerosSlowCase();
|
||||
}
|
||||
|
||||
/// \brief Count the number of trailing one bits.
|
||||
///
|
||||
|
@ -15,10 +15,12 @@
|
||||
#define LLVM_ADT_STRINGEXTRAS_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/SmallString.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <iterator>
|
||||
#include <string>
|
||||
@ -129,6 +131,32 @@ template <typename N> bool to_integer(StringRef S, N &Num, unsigned Base = 0) {
|
||||
return !S.getAsInteger(Base, Num);
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
template <typename N>
|
||||
inline bool to_float(const Twine &T, N &Num, N (*StrTo)(const char *, char **)) {
|
||||
SmallString<32> Storage;
|
||||
StringRef S = T.toNullTerminatedStringRef(Storage);
|
||||
char *End;
|
||||
N Temp = StrTo(S.data(), &End);
|
||||
if (*End != '\0')
|
||||
return false;
|
||||
Num = Temp;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
inline bool to_float(const Twine &T, float &Num) {
|
||||
return detail::to_float(T, Num, strtof);
|
||||
}
|
||||
|
||||
inline bool to_float(const Twine &T, double &Num) {
|
||||
return detail::to_float(T, Num, strtod);
|
||||
}
|
||||
|
||||
inline bool to_float(const Twine &T, long double &Num) {
|
||||
return detail::to_float(T, Num, strtold);
|
||||
}
|
||||
|
||||
static inline std::string utostr(uint64_t X, bool isNeg = false) {
|
||||
char Buffer[21];
|
||||
char *BufPtr = std::end(Buffer);
|
||||
|
@ -147,6 +147,7 @@ class Triple {
|
||||
enum OSType {
|
||||
UnknownOS,
|
||||
|
||||
Ananas,
|
||||
CloudABI,
|
||||
Darwin,
|
||||
DragonFly,
|
||||
|
@ -93,6 +93,13 @@ class LazyValueInfo {
|
||||
Constant *getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
|
||||
Instruction *CxtI = nullptr);
|
||||
|
||||
/// Return the ConstantRage constraint that is known to hold for the
|
||||
/// specified value on the specified edge. This may be only be called
|
||||
/// on integer-typed Values.
|
||||
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB,
|
||||
BasicBlock *ToBB,
|
||||
Instruction *CxtI = nullptr);
|
||||
|
||||
/// Inform the analysis cache that we have threaded an edge from
|
||||
/// PredBB to OldSucc to be from PredBB to NewSucc instead.
|
||||
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc);
|
||||
|
@ -39,6 +39,15 @@ bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
|
||||
const Instruction *CtxI = nullptr,
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// Returns true if V is always dereferenceable for Size byte with alignment
|
||||
/// greater or equal than requested. If the context instruction is specified
|
||||
/// performs context-sensitive analysis and returns true if the pointer is
|
||||
/// dereferenceable at the specified instruction.
|
||||
bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
|
||||
const APInt &Size, const DataLayout &DL,
|
||||
const Instruction *CtxI = nullptr,
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// Return true if we know that executing a load from this value cannot trap.
|
||||
///
|
||||
/// If DT and ScanFrom are specified this method performs context-sensitive
|
||||
|
@ -91,8 +91,9 @@ getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
|
||||
|
||||
/// getLoopPreheader - If there is a preheader for this loop, return it. A
|
||||
/// loop has a preheader if there is only one edge to the header of the loop
|
||||
/// from outside of the loop. If this is the case, the block branching to the
|
||||
/// header of the loop is the preheader node.
|
||||
/// from outside of the loop and it is legal to hoist instructions into the
|
||||
/// predecessor. If this is the case, the block branching to the header of the
|
||||
/// loop is the preheader node.
|
||||
///
|
||||
/// This method returns null if there is no preheader for the loop.
|
||||
///
|
||||
@ -102,6 +103,10 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
|
||||
BlockT *Out = getLoopPredecessor();
|
||||
if (!Out) return nullptr;
|
||||
|
||||
// Make sure we are allowed to hoist instructions into the predecessor.
|
||||
if (!Out->isLegalToHoistInto())
|
||||
return nullptr;
|
||||
|
||||
// Make sure there is only one exit out of the preheader.
|
||||
typedef GraphTraits<BlockT*> BlockTraits;
|
||||
typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
|
||||
|
@ -784,7 +784,9 @@ class ScalarEvolution {
|
||||
}
|
||||
|
||||
/// Determine the range for a particular SCEV.
|
||||
ConstantRange getRange(const SCEV *S, RangeSignHint Hint);
|
||||
/// NOTE: This returns a reference to an entry in a cache. It must be
|
||||
/// copied if its needed for longer.
|
||||
const ConstantRange &getRangeRef(const SCEV *S, RangeSignHint Hint);
|
||||
|
||||
/// Determines the range for the affine SCEVAddRecExpr {\p Start,+,\p Stop}.
|
||||
/// Helper for \c getRange.
|
||||
@ -1464,15 +1466,35 @@ class ScalarEvolution {
|
||||
uint32_t GetMinTrailingZeros(const SCEV *S);
|
||||
|
||||
/// Determine the unsigned range for a particular SCEV.
|
||||
///
|
||||
/// NOTE: This returns a copy of the reference returned by getRangeRef.
|
||||
ConstantRange getUnsignedRange(const SCEV *S) {
|
||||
return getRange(S, HINT_RANGE_UNSIGNED);
|
||||
return getRangeRef(S, HINT_RANGE_UNSIGNED);
|
||||
}
|
||||
|
||||
/// Determine the min of the unsigned range for a particular SCEV.
|
||||
APInt getUnsignedRangeMin(const SCEV *S) {
|
||||
return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMin();
|
||||
}
|
||||
|
||||
/// Determine the max of the unsigned range for a particular SCEV.
|
||||
APInt getUnsignedRangeMax(const SCEV *S) {
|
||||
return getRangeRef(S, HINT_RANGE_UNSIGNED).getUnsignedMax();
|
||||
}
|
||||
|
||||
/// Determine the signed range for a particular SCEV.
|
||||
///
|
||||
/// NOTE: This returns a copy of the reference returned by getRangeRef.
|
||||
ConstantRange getSignedRange(const SCEV *S) {
|
||||
return getRange(S, HINT_RANGE_SIGNED);
|
||||
return getRangeRef(S, HINT_RANGE_SIGNED);
|
||||
}
|
||||
|
||||
/// Determine the min of the signed range for a particular SCEV.
|
||||
APInt getSignedRangeMin(const SCEV *S) {
|
||||
return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMin();
|
||||
}
|
||||
|
||||
/// Determine the max of the signed range for a particular SCEV.
|
||||
APInt getSignedRangeMax(const SCEV *S) {
|
||||
return getRangeRef(S, HINT_RANGE_SIGNED).getSignedMax();
|
||||
}
|
||||
|
||||
/// Test if the given expression is known to be negative.
|
||||
|
@ -46,6 +46,12 @@ static const char ClGlObjMagic[] = {
|
||||
'\xac', '\x9b', '\xd6', '\xb6', '\x22', '\x26', '\x53', '\xc2',
|
||||
};
|
||||
|
||||
// The signature bytes that start a .res file.
|
||||
static const char WinResMagic[] = {
|
||||
'\x00', '\x00', '\x00', '\x00', '\x20', '\x00', '\x00', '\x00',
|
||||
'\xff', '\xff', '\x00', '\x00', '\xff', '\xff', '\x00', '\x00',
|
||||
};
|
||||
|
||||
// Sizes in bytes of various things in the COFF format.
|
||||
enum {
|
||||
Header16Size = 20,
|
||||
|
@ -484,7 +484,7 @@ struct PubIndexEntryDescriptor {
|
||||
};
|
||||
|
||||
/// Constants that define the DWARF format as 32 or 64 bit.
|
||||
enum DwarfFormat { DWARF32, DWARF64 };
|
||||
enum DwarfFormat : uint8_t { DWARF32, DWARF64 };
|
||||
|
||||
} // End of namespace dwarf
|
||||
|
||||
|
@ -78,7 +78,8 @@ enum {
|
||||
MH_DEAD_STRIPPABLE_DYLIB = 0x00400000u,
|
||||
MH_HAS_TLV_DESCRIPTORS = 0x00800000u,
|
||||
MH_NO_HEAP_EXECUTION = 0x01000000u,
|
||||
MH_APP_EXTENSION_SAFE = 0x02000000u
|
||||
MH_APP_EXTENSION_SAFE = 0x02000000u,
|
||||
MH_NLIST_OUTOFSYNC_WITH_DYLDINFO = 0x04000000u
|
||||
};
|
||||
|
||||
enum : uint32_t {
|
||||
|
@ -176,6 +176,11 @@ enum class ValType {
|
||||
// Linking metadata kinds.
|
||||
enum : unsigned {
|
||||
WASM_STACK_POINTER = 0x1,
|
||||
WASM_SYMBOL_INFO = 0x2,
|
||||
};
|
||||
|
||||
enum : unsigned {
|
||||
WASM_SYMBOL_FLAG_WEAK = 0x1,
|
||||
};
|
||||
|
||||
#define WASM_RELOC(name, value) name = value,
|
||||
|
@ -121,8 +121,8 @@ class DIEAbbrev : public FoldingSetNode {
|
||||
/// Print the abbreviation using the specified asm printer.
|
||||
void Emit(const AsmPrinter *AP) const;
|
||||
|
||||
void print(raw_ostream &O);
|
||||
void dump();
|
||||
void print(raw_ostream &O) const;
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
@ -780,7 +780,7 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
|
||||
DIEValue findAttribute(dwarf::Attribute Attribute) const;
|
||||
|
||||
void print(raw_ostream &O, unsigned IndentCount = 0) const;
|
||||
void dump();
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
@ -29,6 +29,7 @@ class MachineOperand;
|
||||
class MachineRegisterInfo;
|
||||
class RegisterBankInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterClass;
|
||||
class TargetRegisterInfo;
|
||||
|
||||
/// Container class for CodeGen predicate results.
|
||||
@ -79,6 +80,16 @@ class InstructionSelector {
|
||||
|
||||
InstructionSelector();
|
||||
|
||||
/// Constrain a register operand of an instruction \p I to a specified
|
||||
/// register class. This could involve inserting COPYs before (for uses) or
|
||||
/// after (for defs) and may replace the operand of \p I.
|
||||
/// \returns whether operand regclass constraining succeeded.
|
||||
bool constrainOperandRegToRegClass(MachineInstr &I, unsigned OpIdx,
|
||||
const TargetRegisterClass &RC,
|
||||
const TargetInstrInfo &TII,
|
||||
const TargetRegisterInfo &TRI,
|
||||
const RegisterBankInfo &RBI) const;
|
||||
|
||||
/// Mutate the newly-selected instruction \p I to constrain its (possibly
|
||||
/// generic) virtual register operands to the instruction's register class.
|
||||
/// This could involve inserting COPYs before (for uses) or after (for defs).
|
||||
|
@ -59,7 +59,7 @@ class Legalizer : public MachineFunctionPass {
|
||||
const TargetInstrInfo &TII);
|
||||
|
||||
bool combineMerges(MachineInstr &MI, MachineRegisterInfo &MRI,
|
||||
const TargetInstrInfo &TII);
|
||||
const TargetInstrInfo &TII, MachineIRBuilder &MIRBuilder);
|
||||
|
||||
bool runOnMachineFunction(MachineFunction &MF) override;
|
||||
};
|
||||
|
@ -471,10 +471,12 @@ class MachineIRBuilder {
|
||||
/// Build and insert \p Res = IMPLICIT_DEF.
|
||||
MachineInstrBuilder buildUndef(unsigned Dst);
|
||||
|
||||
/// Build and insert \p Res<def> = G_SEQUENCE \p Op0, \p Idx0...
|
||||
/// Build and insert instructions to put \p Ops together at the specified p
|
||||
/// Indices to form a larger register.
|
||||
///
|
||||
/// G_SEQUENCE inserts each element of Ops into an IMPLICIT_DEF register,
|
||||
/// where each entry starts at the bit-index specified by \p Indices.
|
||||
/// If the types of the input registers are uniform and cover the entirity of
|
||||
/// \p Res then a G_MERGE_VALUES will be produced. Otherwise an IMPLICIT_DEF
|
||||
/// followed by a sequence of G_INSERT instructions.
|
||||
///
|
||||
/// \pre setBasicBlock or setMI must have been called.
|
||||
/// \pre The final element of the sequence must not extend past the end of the
|
||||
@ -482,11 +484,8 @@ class MachineIRBuilder {
|
||||
/// \pre The bits defined by each Op (derived from index and scalar size) must
|
||||
/// not overlap.
|
||||
/// \pre \p Indices must be in ascending order of bit position.
|
||||
///
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
MachineInstrBuilder buildSequence(unsigned Res,
|
||||
ArrayRef<unsigned> Ops,
|
||||
ArrayRef<uint64_t> Indices);
|
||||
void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
|
||||
ArrayRef<uint64_t> Indices);
|
||||
|
||||
/// Build and insert \p Res<def> = G_MERGE_VALUES \p Op0, ...
|
||||
///
|
||||
@ -513,24 +512,6 @@ class MachineIRBuilder {
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, unsigned Op);
|
||||
|
||||
void addUsesWithIndices(MachineInstrBuilder MIB) {}
|
||||
|
||||
template <typename... ArgTys>
|
||||
void addUsesWithIndices(MachineInstrBuilder MIB, unsigned Reg,
|
||||
unsigned BitIndex, ArgTys... Args) {
|
||||
MIB.addUse(Reg).addImm(BitIndex);
|
||||
addUsesWithIndices(MIB, Args...);
|
||||
}
|
||||
|
||||
template <typename... ArgTys>
|
||||
MachineInstrBuilder buildSequence(unsigned Res, unsigned Op,
|
||||
unsigned Index, ArgTys... Args) {
|
||||
MachineInstrBuilder MIB =
|
||||
buildInstr(TargetOpcode::G_SEQUENCE).addDef(Res);
|
||||
addUsesWithIndices(MIB, Op, Index, Args...);
|
||||
return MIB;
|
||||
}
|
||||
|
||||
MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
|
||||
unsigned Op, unsigned Index);
|
||||
|
||||
|
@ -29,13 +29,26 @@ class RegisterBankInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetPassConfig;
|
||||
class TargetRegisterInfo;
|
||||
class TargetRegisterClass;
|
||||
class Twine;
|
||||
class ConstantFP;
|
||||
|
||||
/// Try to constrain Reg to the specified register class. If this fails,
|
||||
/// create a new virtual register in the correct class and insert a COPY before
|
||||
/// \p InsertPt. The debug location of \p InsertPt is used for the new copy.
|
||||
///
|
||||
/// \return The virtual register constrained to the right register class.
|
||||
unsigned constrainRegToClass(MachineRegisterInfo &MRI,
|
||||
const TargetInstrInfo &TII,
|
||||
const RegisterBankInfo &RBI,
|
||||
MachineInstr &InsertPt, unsigned Reg,
|
||||
const TargetRegisterClass &RegClass);
|
||||
|
||||
/// Try to constrain Reg so that it is usable by argument OpIdx of the
|
||||
/// provided MCInstrDesc \p II. If this fails, create a new virtual
|
||||
/// register in the correct class and insert a COPY before \p InsertPt.
|
||||
/// The debug location of \p InsertPt is used for the new copy.
|
||||
/// This is equivalent to constrainRegToClass() with RegClass obtained from the
|
||||
/// MCInstrDesc. The debug location of \p InsertPt is used for the new copy.
|
||||
///
|
||||
/// \return The virtual register constrained to the right register class.
|
||||
unsigned constrainOperandRegClass(const MachineFunction &MF,
|
||||
|
@ -196,7 +196,7 @@ class LexicalScopes {
|
||||
}
|
||||
|
||||
/// dump - Print data structures to dbgs().
|
||||
void dump();
|
||||
void dump() const;
|
||||
|
||||
/// getOrCreateAbstractScope - Find or create an abstract lexical scope.
|
||||
LexicalScope *getOrCreateAbstractScope(const DILocalScope *Scope);
|
||||
|
@ -376,6 +376,9 @@ class MachineBasicBlock
|
||||
/// Indicates if this is the entry block of a cleanup funclet.
|
||||
void setIsCleanupFuncletEntry(bool V = true) { IsCleanupFuncletEntry = V; }
|
||||
|
||||
/// Returns true if it is legal to hoist instructions into this block.
|
||||
bool isLegalToHoistInto() const;
|
||||
|
||||
// Code Layout methods.
|
||||
|
||||
/// Move 'this' block before or after the specified block. This only moves
|
||||
|
@ -59,6 +59,11 @@ struct MachinePointerInfo {
|
||||
return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O);
|
||||
}
|
||||
|
||||
/// Return true if memory region [V, V+Offset+Size) is known to be
|
||||
/// dereferenceable.
|
||||
bool isDereferenceable(unsigned Size, LLVMContext &C,
|
||||
const DataLayout &DL) const;
|
||||
|
||||
/// Return the LLVM IR address space number that this pointer points into.
|
||||
unsigned getAddrSpace() const;
|
||||
|
||||
|
@ -77,33 +77,6 @@ class MachineModuleInfoELF : public MachineModuleInfoImpl {
|
||||
SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
|
||||
};
|
||||
|
||||
/// MachineModuleInfoWasm - This is a MachineModuleInfoImpl implementation
|
||||
/// for Wasm targets.
|
||||
class MachineModuleInfoWasm : public MachineModuleInfoImpl {
|
||||
/// WebAssembly global variables defined by CodeGen.
|
||||
std::vector<wasm::Global> Globals;
|
||||
|
||||
/// The WebAssembly global variable which is the stack pointer.
|
||||
unsigned StackPointerGlobal;
|
||||
|
||||
virtual void anchor(); // Out of line virtual method.
|
||||
public:
|
||||
MachineModuleInfoWasm(const MachineModuleInfo &)
|
||||
: StackPointerGlobal(-1U) {}
|
||||
|
||||
void addGlobal(const wasm::Global &G) { Globals.push_back(G); }
|
||||
const std::vector<wasm::Global> &getGlobals() const { return Globals; }
|
||||
|
||||
bool hasStackPointerGlobal() const {
|
||||
return StackPointerGlobal != -1U;
|
||||
}
|
||||
unsigned getStackPointerGlobal() const {
|
||||
assert(hasStackPointerGlobal() && "Stack ptr global hasn't been set");
|
||||
return StackPointerGlobal;
|
||||
}
|
||||
void setStackPointerGlobal(unsigned Global) { StackPointerGlobal = Global; }
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
|
@ -203,7 +203,7 @@ class MachineSchedStrategy {
|
||||
MachineBasicBlock::iterator End,
|
||||
unsigned NumRegionInstrs) {}
|
||||
|
||||
virtual void dumpPolicy() {}
|
||||
virtual void dumpPolicy() const {}
|
||||
|
||||
/// Check if pressure tracking is needed before building the DAG and
|
||||
/// initializing this strategy. Called after initPolicy.
|
||||
@ -555,7 +555,7 @@ class ReadyQueue {
|
||||
return Queue.begin() + idx;
|
||||
}
|
||||
|
||||
void dump();
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
/// Summarize the unscheduled region.
|
||||
@ -756,7 +756,7 @@ class SchedBoundary {
|
||||
SUnit *pickOnlyChoice();
|
||||
|
||||
#ifndef NDEBUG
|
||||
void dumpScheduledState();
|
||||
void dumpScheduledState() const;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -890,7 +890,7 @@ class GenericScheduler : public GenericSchedulerBase {
|
||||
MachineBasicBlock::iterator End,
|
||||
unsigned NumRegionInstrs) override;
|
||||
|
||||
void dumpPolicy() override;
|
||||
void dumpPolicy() const override;
|
||||
|
||||
bool shouldTrackPressure() const override {
|
||||
return RegionPolicy.ShouldTrackPressure;
|
||||
|
41
contrib/llvm/include/llvm/CodeGen/MacroFusion.h
Normal file
41
contrib/llvm/include/llvm/CodeGen/MacroFusion.h
Normal file
@ -0,0 +1,41 @@
|
||||
//===- MacroFusion.h - Macro Fusion ------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
/// \file This file contains the definition of the DAG scheduling mutation to
|
||||
/// pair instructions back to back.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <functional>
|
||||
#include "llvm/Target/TargetInstrInfo.h"
|
||||
#include "llvm/CodeGen/MachineScheduler.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// \brief Check if the instr pair, FirstMI and SecondMI, should be fused
|
||||
/// together. Given SecondMI, when FirstMI is unspecified, then check if
|
||||
/// SecondMI may be part of a fused pair at all.
|
||||
typedef std::function<bool(const TargetInstrInfo &TII,
|
||||
const TargetSubtargetInfo &TSI,
|
||||
const MachineInstr *FirstMI,
|
||||
const MachineInstr &SecondMI)> ShouldSchedulePredTy;
|
||||
|
||||
/// \brief Create a DAG scheduling mutation to pair instructions back to back
|
||||
/// for instructions that benefit according to the target-specific
|
||||
/// shouldScheduleAdjacent predicate function.
|
||||
std::unique_ptr<ScheduleDAGMutation>
|
||||
createMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);
|
||||
|
||||
/// \brief Create a DAG scheduling mutation to pair branch instructions with one
|
||||
/// of their predecessors back to back for instructions that benefit according
|
||||
/// to the target-specific shouldScheduleAdjacent predicate function.
|
||||
std::unique_ptr<ScheduleDAGMutation>
|
||||
createBranchMacroFusionDAGMutation(ShouldSchedulePredTy shouldScheduleAdjacent);
|
||||
|
||||
} // end namespace llvm
|
@ -156,12 +156,24 @@ class RegScavenger {
|
||||
/// available and do the appropriate bookkeeping. SPAdj is the stack
|
||||
/// adjustment due to call frame, it's passed along to eliminateFrameIndex().
|
||||
/// Returns the scavenged register.
|
||||
/// This is deprecated as it depends on the quality of the kill flags being
|
||||
/// present; Use scavengeRegisterBackwards() instead!
|
||||
unsigned scavengeRegister(const TargetRegisterClass *RegClass,
|
||||
MachineBasicBlock::iterator I, int SPAdj);
|
||||
unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
|
||||
return scavengeRegister(RegClass, MBBI, SPAdj);
|
||||
}
|
||||
|
||||
/// Make a register of the specific register class available from the current
|
||||
/// position backwards to the place before \p To. If \p RestoreAfter is true
|
||||
/// this includes the instruction following the current position.
|
||||
/// SPAdj is the stack adjustment due to call frame, it's passed along to
|
||||
/// eliminateFrameIndex().
|
||||
/// Returns the scavenged register.
|
||||
unsigned scavengeRegisterBackwards(const TargetRegisterClass &RC,
|
||||
MachineBasicBlock::iterator To,
|
||||
bool RestoreAfter, int SPAdj);
|
||||
|
||||
/// Tell the scavenger a register is used.
|
||||
void setRegUsed(unsigned Reg, LaneBitmask LaneMask = LaneBitmask::getAll());
|
||||
|
||||
@ -202,6 +214,12 @@ class RegScavenger {
|
||||
|
||||
/// Mark live-in registers of basic block as used.
|
||||
void setLiveInsUsed(const MachineBasicBlock &MBB);
|
||||
|
||||
/// Spill a register after position \p After and reload it before position
|
||||
/// \p UseMI.
|
||||
ScavengedInfo &spill(unsigned Reg, const TargetRegisterClass &RC, int SPAdj,
|
||||
MachineBasicBlock::iterator After,
|
||||
MachineBasicBlock::iterator &UseMI);
|
||||
};
|
||||
|
||||
/// Replaces all frame index virtual registers with physical registers. Uses the
|
||||
|
@ -0,0 +1,64 @@
|
||||
//===-- llvm/CodeGen/SelectionDAGAddressAnalysis.h ------- DAG Address Analysis
|
||||
//---*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
|
||||
#define LLVM_CODEGEN_SELECTIONDAGADDRESSANALYSIS_H
|
||||
|
||||
#include "llvm/CodeGen/ISDOpcodes.h"
|
||||
#include "llvm/CodeGen/SelectionDAG.h"
|
||||
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
||||
|
||||
namespace llvm {
|
||||
/// Helper struct to parse and store a memory address as base + index + offset.
|
||||
/// We ignore sign extensions when it is safe to do so.
|
||||
/// The following two expressions are not equivalent. To differentiate we need
|
||||
/// to store whether there was a sign extension involved in the index
|
||||
/// computation.
|
||||
/// (load (i64 add (i64 copyfromreg %c)
|
||||
/// (i64 signextend (add (i8 load %index)
|
||||
/// (i8 1))))
|
||||
/// vs
|
||||
///
|
||||
/// (load (i64 add (i64 copyfromreg %c)
|
||||
/// (i64 signextend (i32 add (i32 signextend (i8 load %index))
|
||||
/// (i32 1)))))
|
||||
class BaseIndexOffset {
|
||||
private:
|
||||
SDValue Base;
|
||||
SDValue Index;
|
||||
int64_t Offset;
|
||||
bool IsIndexSignExt;
|
||||
|
||||
public:
|
||||
BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {}
|
||||
|
||||
BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset,
|
||||
bool IsIndexSignExt)
|
||||
: Base(Base), Index(Index), Offset(Offset),
|
||||
IsIndexSignExt(IsIndexSignExt) {}
|
||||
|
||||
SDValue getBase() { return Base; }
|
||||
SDValue getIndex() { return Index; }
|
||||
|
||||
bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG) {
|
||||
int64_t Off;
|
||||
return equalBaseIndex(Other, DAG, Off);
|
||||
}
|
||||
|
||||
bool equalBaseIndex(BaseIndexOffset &Other, const SelectionDAG &DAG,
|
||||
int64_t &Off);
|
||||
|
||||
/// Parses tree in Ptr for base, index, offset addresses.
|
||||
static BaseIndexOffset match(SDValue Ptr);
|
||||
};
|
||||
} // namespace llvm
|
||||
|
||||
#endif
|
@ -2107,7 +2107,7 @@ class MaskedGatherScatterSDNode : public MemSDNode {
|
||||
public:
|
||||
friend class SelectionDAG;
|
||||
|
||||
MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
|
||||
MaskedGatherScatterSDNode(unsigned NodeTy, unsigned Order,
|
||||
const DebugLoc &dl, SDVTList VTs, EVT MemVT,
|
||||
MachineMemOperand *MMO)
|
||||
: MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {}
|
||||
|
@ -27,9 +27,12 @@ namespace codeview {
|
||||
|
||||
template <typename Kind> class CVRecord {
|
||||
public:
|
||||
CVRecord() = default;
|
||||
CVRecord() : Type(static_cast<Kind>(0)) {}
|
||||
|
||||
CVRecord(Kind K, ArrayRef<uint8_t> Data) : Type(K), RecordData(Data) {}
|
||||
|
||||
bool valid() const { return Type != static_cast<Kind>(0); }
|
||||
|
||||
uint32_t length() const { return RecordData.size(); }
|
||||
Kind kind() const { return Type; }
|
||||
ArrayRef<uint8_t> data() const { return RecordData; }
|
||||
|
@ -402,6 +402,16 @@ enum class LocalSymFlags : uint16_t {
|
||||
};
|
||||
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(LocalSymFlags)
|
||||
|
||||
/// Corresponds to the CV_PUBSYMFLAGS bitfield.
|
||||
enum class PublicSymFlags : uint32_t {
|
||||
None = 0,
|
||||
Code = 1 << 0,
|
||||
Function = 1 << 1,
|
||||
Managed = 1 << 2,
|
||||
MSIL = 1 << 3,
|
||||
};
|
||||
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(PublicSymFlags)
|
||||
|
||||
/// Corresponds to the CV_PROCFLAGS bitfield.
|
||||
enum class ProcSymFlags : uint8_t {
|
||||
None = 0,
|
||||
|
@ -51,11 +51,23 @@ class DebugSubsectionRecordBuilder {
|
||||
public:
|
||||
DebugSubsectionRecordBuilder(std::shared_ptr<DebugSubsection> Subsection,
|
||||
CodeViewContainer Container);
|
||||
|
||||
/// Use this to copy existing subsections directly from source to destination.
|
||||
/// For example, line table subsections in an object file only need to be
|
||||
/// relocated before being copied into the PDB.
|
||||
DebugSubsectionRecordBuilder(const DebugSubsectionRecord &Contents,
|
||||
CodeViewContainer Container);
|
||||
|
||||
uint32_t calculateSerializedLength();
|
||||
Error commit(BinaryStreamWriter &Writer) const;
|
||||
|
||||
private:
|
||||
/// The subsection to build. Will be null if Contents is non-empty.
|
||||
std::shared_ptr<DebugSubsection> Subsection;
|
||||
|
||||
/// The bytes of the subsection. Only non-empty if Subsection is null.
|
||||
DebugSubsectionRecord Contents;
|
||||
|
||||
CodeViewContainer Container;
|
||||
};
|
||||
|
||||
|
@ -22,6 +22,7 @@ namespace codeview {
|
||||
ArrayRef<EnumEntry<SymbolKind>> getSymbolTypeNames();
|
||||
ArrayRef<EnumEntry<TypeLeafKind>> getTypeLeafNames();
|
||||
ArrayRef<EnumEntry<uint16_t>> getRegisterNames();
|
||||
ArrayRef<EnumEntry<uint32_t>> getPublicSymFlagNames();
|
||||
ArrayRef<EnumEntry<uint8_t>> getProcSymFlagNames();
|
||||
ArrayRef<EnumEntry<uint16_t>> getLocalFlagNames();
|
||||
ArrayRef<EnumEntry<uint8_t>> getFrameCookieKindNames();
|
||||
|
@ -11,18 +11,15 @@
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_LAZYRANDOMTYPECOLLECTION_H
|
||||
|
||||
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeDatabaseVisitor.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include "llvm/Support/StringSaver.h"
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
|
||||
class TypeDatabase;
|
||||
class TypeVisitorCallbacks;
|
||||
|
||||
/// \brief Provides amortized O(1) random access to a CodeView type stream.
|
||||
/// Normally to access a type from a type stream, you must know its byte
|
||||
/// offset into the type stream, because type records are variable-lengthed.
|
||||
@ -47,6 +44,11 @@ class TypeVisitorCallbacks;
|
||||
/// of O(N/M) and an amortized time of O(1).
|
||||
class LazyRandomTypeCollection : public TypeCollection {
|
||||
typedef FixedStreamArray<TypeIndexOffset> PartialOffsetArray;
|
||||
struct CacheEntry {
|
||||
CVType Type;
|
||||
uint32_t Offset;
|
||||
StringRef Name;
|
||||
};
|
||||
|
||||
public:
|
||||
explicit LazyRandomTypeCollection(uint32_t RecordCountHint);
|
||||
@ -56,8 +58,10 @@ class LazyRandomTypeCollection : public TypeCollection {
|
||||
PartialOffsetArray PartialOffsets);
|
||||
LazyRandomTypeCollection(const CVTypeArray &Types, uint32_t RecordCountHint);
|
||||
|
||||
void reset(ArrayRef<uint8_t> Data);
|
||||
void reset(StringRef Data);
|
||||
void reset(ArrayRef<uint8_t> Data, uint32_t RecordCountHint);
|
||||
void reset(StringRef Data, uint32_t RecordCountHint);
|
||||
|
||||
uint32_t getOffsetOfType(TypeIndex Index);
|
||||
|
||||
CVType getType(TypeIndex Index) override;
|
||||
StringRef getTypeName(TypeIndex Index) override;
|
||||
@ -68,27 +72,26 @@ class LazyRandomTypeCollection : public TypeCollection {
|
||||
Optional<TypeIndex> getNext(TypeIndex Prev) override;
|
||||
|
||||
private:
|
||||
const TypeDatabase &database() const { return Database; }
|
||||
Error ensureTypeExists(TypeIndex Index);
|
||||
void ensureCapacityFor(TypeIndex Index);
|
||||
|
||||
Error visitRangeForType(TypeIndex TI);
|
||||
Error fullScanForType(TypeIndex TI);
|
||||
Error visitRange(TypeIndex Begin, uint32_t BeginOffset, TypeIndex End);
|
||||
Error visitOneRecord(TypeIndex TI, uint32_t Offset, CVType &Record);
|
||||
void visitRange(TypeIndex Begin, uint32_t BeginOffset, TypeIndex End);
|
||||
|
||||
/// Visited records get automatically added to the type database.
|
||||
TypeDatabase Database;
|
||||
/// Number of actual records.
|
||||
uint32_t Count = 0;
|
||||
|
||||
/// The largest type index which we've visited.
|
||||
TypeIndex LargestTypeIndex = TypeIndex::None();
|
||||
|
||||
BumpPtrAllocator Allocator;
|
||||
StringSaver NameStorage;
|
||||
|
||||
/// The type array to allow random access visitation of.
|
||||
CVTypeArray Types;
|
||||
|
||||
/// The database visitor which adds new records to the database.
|
||||
TypeDatabaseVisitor DatabaseVisitor;
|
||||
|
||||
/// A vector mapping type indices to type offset. For every record that has
|
||||
/// been visited, contains the absolute offset of that record in the record
|
||||
/// array.
|
||||
std::vector<uint32_t> KnownOffsets;
|
||||
std::vector<CacheEntry> Records;
|
||||
|
||||
/// An array of index offsets for the given type stream, allowing log(N)
|
||||
/// lookups of a type record by index. Similar to KnownOffsets but only
|
||||
|
@ -363,7 +363,7 @@ class PublicSym32 : public SymbolRecord {
|
||||
: SymbolRecord(SymbolRecordKind::PublicSym32),
|
||||
RecordOffset(RecordOffset) {}
|
||||
|
||||
TypeIndex Index;
|
||||
PublicSymFlags Flags;
|
||||
uint32_t Offset;
|
||||
uint16_t Segment;
|
||||
StringRef Name;
|
||||
|
@ -1,84 +0,0 @@
|
||||
//===- TypeDatabase.h - A collection of CodeView type records ---*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASE_H
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASE_H
|
||||
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/StringSaver.h"
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
class TypeDatabase : public TypeCollection {
|
||||
friend class RandomAccessTypeVisitor;
|
||||
|
||||
public:
|
||||
explicit TypeDatabase(uint32_t Capacity);
|
||||
|
||||
/// Records the name of a type, and reserves its type index.
|
||||
TypeIndex appendType(StringRef Name, const CVType &Data);
|
||||
|
||||
/// Records the name of a type, and reserves its type index.
|
||||
void recordType(StringRef Name, TypeIndex Index, const CVType &Data);
|
||||
|
||||
/// Saves the name in a StringSet and creates a stable StringRef.
|
||||
StringRef saveTypeName(StringRef TypeName);
|
||||
|
||||
StringRef getTypeName(TypeIndex Index) const;
|
||||
|
||||
const CVType &getTypeRecord(TypeIndex Index) const;
|
||||
CVType &getTypeRecord(TypeIndex Index);
|
||||
|
||||
bool contains(TypeIndex Index) const;
|
||||
uint32_t size() const;
|
||||
uint32_t capacity() const;
|
||||
bool empty() const;
|
||||
|
||||
CVType getType(TypeIndex Index) override;
|
||||
StringRef getTypeName(TypeIndex Index) override;
|
||||
bool contains(TypeIndex Index) override;
|
||||
uint32_t size() override;
|
||||
uint32_t capacity() override;
|
||||
|
||||
Optional<TypeIndex> getFirst() override;
|
||||
Optional<TypeIndex> getNext(TypeIndex Prev) override;
|
||||
|
||||
Optional<TypeIndex> largestTypeIndexLessThan(TypeIndex TI) const;
|
||||
|
||||
private:
|
||||
TypeIndex getAppendIndex() const;
|
||||
|
||||
void grow();
|
||||
void grow(TypeIndex Index);
|
||||
|
||||
BumpPtrAllocator Allocator;
|
||||
|
||||
uint32_t Count = 0;
|
||||
TypeIndex LargestTypeIndex;
|
||||
|
||||
/// All user defined type records in .debug$T live in here. Type indices
|
||||
/// greater than 0x1000 are user defined. Subtract 0x1000 from the index to
|
||||
/// index into this vector.
|
||||
SmallVector<StringRef, 10> CVUDTNames;
|
||||
SmallVector<CVType, 10> TypeRecords;
|
||||
|
||||
StringSaver TypeNameStorage;
|
||||
|
||||
BitVector ValidRecords;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -1,62 +0,0 @@
|
||||
//===-- TypeDatabaseVisitor.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASEVISITOR_H
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASEVISITOR_H
|
||||
|
||||
#include "llvm/ADT/PointerUnion.h"
|
||||
|
||||
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
|
||||
/// Dumper for CodeView type streams found in COFF object files and PDB files.
|
||||
class TypeDatabaseVisitor : public TypeVisitorCallbacks {
|
||||
public:
|
||||
explicit TypeDatabaseVisitor(TypeDatabase &TypeDB) : TypeDB(&TypeDB) {}
|
||||
|
||||
/// Paired begin/end actions for all types. Receives all record data,
|
||||
/// including the fixed-length record prefix.
|
||||
Error visitTypeBegin(CVType &Record) override;
|
||||
Error visitTypeBegin(CVType &Record, TypeIndex Index) override;
|
||||
Error visitTypeEnd(CVType &Record) override;
|
||||
Error visitMemberBegin(CVMemberRecord &Record) override;
|
||||
Error visitMemberEnd(CVMemberRecord &Record) override;
|
||||
|
||||
#define TYPE_RECORD(EnumName, EnumVal, Name) \
|
||||
Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
|
||||
#define MEMBER_RECORD(EnumName, EnumVal, Name) \
|
||||
Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
|
||||
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
|
||||
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
|
||||
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
|
||||
|
||||
private:
|
||||
StringRef getTypeName(TypeIndex Index) const;
|
||||
StringRef saveTypeName(StringRef Name);
|
||||
|
||||
bool IsInFieldList = false;
|
||||
|
||||
/// Name of the current type. Only valid before visitTypeEnd.
|
||||
StringRef Name;
|
||||
/// Current type index. Only valid before visitTypeEnd, and if we are
|
||||
/// visiting a random access type database.
|
||||
Optional<TypeIndex> CurrentTypeIndex;
|
||||
|
||||
TypeDatabase *TypeDB;
|
||||
};
|
||||
|
||||
} // end namespace codeview
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPER_H
|
@ -11,6 +11,7 @@
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
|
||||
@ -27,6 +28,11 @@ void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
|
||||
SmallVectorImpl<TiReference> &Refs);
|
||||
void discoverTypeIndices(const CVType &Type,
|
||||
SmallVectorImpl<TiReference> &Refs);
|
||||
|
||||
/// Discover type indices in symbol records. Returns false if this is an unknown
|
||||
/// record.
|
||||
bool discoverTypeIndices(const CVSymbol &Symbol,
|
||||
SmallVectorImpl<TiReference> &Refs);
|
||||
}
|
||||
}
|
||||
|
||||
|
22
contrib/llvm/include/llvm/DebugInfo/CodeView/TypeName.h
Normal file
22
contrib/llvm/include/llvm/DebugInfo/CodeView/TypeName.h
Normal file
@ -0,0 +1,22 @@
|
||||
//===- TypeName.h --------------------------------------------- *- C++ --*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPENAME_H
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_TYPENAME_H
|
||||
|
||||
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
std::string computeTypeName(TypeCollection &Types, TypeIndex Index);
|
||||
}
|
||||
} // namespace llvm
|
||||
|
||||
#endif
|
@ -25,6 +25,7 @@ class TypeRecordMapping : public TypeVisitorCallbacks {
|
||||
explicit TypeRecordMapping(BinaryStreamReader &Reader) : IO(Reader) {}
|
||||
explicit TypeRecordMapping(BinaryStreamWriter &Writer) : IO(Writer) {}
|
||||
|
||||
using TypeVisitorCallbacks::visitTypeBegin;
|
||||
Error visitTypeBegin(CVType &Record) override;
|
||||
Error visitTypeEnd(CVType &Record) override;
|
||||
|
||||
|
@ -93,6 +93,7 @@ class TypeSerializer : public TypeVisitorCallbacks {
|
||||
TypeIndex insertRecord(const RemappedType &Record);
|
||||
Expected<TypeIndex> visitTypeEndGetIndex(CVType &Record);
|
||||
|
||||
using TypeVisitorCallbacks::visitTypeBegin;
|
||||
Error visitTypeBegin(CVType &Record) override;
|
||||
Error visitTypeEnd(CVType &Record) override;
|
||||
Error visitMemberBegin(CVMemberRecord &Record) override;
|
||||
|
@ -11,7 +11,9 @@
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_TYPETABLECOLLECTION_H
|
||||
|
||||
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
|
||||
#include "llvm/Support/StringSaver.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
@ -30,11 +32,10 @@ class TypeTableCollection : public TypeCollection {
|
||||
uint32_t capacity() override;
|
||||
|
||||
private:
|
||||
bool hasCapacityFor(TypeIndex Index) const;
|
||||
void ensureTypeExists(TypeIndex Index);
|
||||
|
||||
BumpPtrAllocator Allocator;
|
||||
StringSaver NameStorage;
|
||||
std::vector<StringRef> Names;
|
||||
ArrayRef<ArrayRef<uint8_t>> Records;
|
||||
TypeDatabase Database;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ struct DILineInfo {
|
||||
}
|
||||
};
|
||||
|
||||
typedef SmallVector<std::pair<uint64_t, DILineInfo>, 16> DILineInfoTable;
|
||||
using DILineInfoTable = SmallVector<std::pair<uint64_t, DILineInfo>, 16>;
|
||||
|
||||
/// DIInliningInfo - a format-neutral container for inlined code description.
|
||||
class DIInliningInfo {
|
||||
@ -102,7 +102,7 @@ enum class DINameKind { None, ShortName, LinkageName };
|
||||
/// should be filled with data.
|
||||
struct DILineInfoSpecifier {
|
||||
enum class FileLineInfoKind { None, Default, AbsoluteFilePath };
|
||||
typedef DINameKind FunctionNameKind;
|
||||
using FunctionNameKind = DINameKind;
|
||||
|
||||
FileLineInfoKind FLIKind;
|
||||
FunctionNameKind FNKind;
|
||||
@ -174,6 +174,7 @@ class DIContext {
|
||||
// No verifier? Just say things went well.
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual DILineInfo getLineInfoForAddress(uint64_t Address,
|
||||
DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
|
||||
virtual DILineInfoTable getLineInfoForAddressRange(uint64_t Address,
|
||||
|
@ -33,6 +33,7 @@ class DWARFAbbreviationDeclaration {
|
||||
|
||||
dwarf::Attribute Attr;
|
||||
dwarf::Form Form;
|
||||
|
||||
/// The following field is used for ByteSize for non-implicit_const
|
||||
/// attributes and as value for implicit_const ones, indicated by
|
||||
/// Form == DW_FORM_implicit_const.
|
||||
@ -58,7 +59,7 @@ class DWARFAbbreviationDeclaration {
|
||||
/// the ByteSize member.
|
||||
Optional<int64_t> getByteSize(const DWARFUnit &U) const;
|
||||
};
|
||||
typedef SmallVector<AttributeSpec, 8> AttributeSpecVector;
|
||||
using AttributeSpecVector = SmallVector<AttributeSpec, 8>;
|
||||
|
||||
DWARFAbbreviationDeclaration();
|
||||
|
||||
@ -67,8 +68,8 @@ class DWARFAbbreviationDeclaration {
|
||||
dwarf::Tag getTag() const { return Tag; }
|
||||
bool hasChildren() const { return HasChildren; }
|
||||
|
||||
typedef iterator_range<AttributeSpecVector::const_iterator>
|
||||
attr_iterator_range;
|
||||
using attr_iterator_range =
|
||||
iterator_range<AttributeSpecVector::const_iterator>;
|
||||
|
||||
attr_iterator_range attributes() const {
|
||||
return attr_iterator_range(AttributeSpecs.begin(), AttributeSpecs.end());
|
||||
|
@ -32,8 +32,9 @@ class DWARFAcceleratorTable {
|
||||
};
|
||||
|
||||
struct HeaderData {
|
||||
typedef uint16_t AtomType;
|
||||
typedef dwarf::Form Form;
|
||||
using AtomType = uint16_t;
|
||||
using Form = dwarf::Form;
|
||||
|
||||
uint32_t DIEOffsetBase;
|
||||
SmallVector<std::pair<AtomType, Form>, 3> Atoms;
|
||||
};
|
||||
|
@ -10,7 +10,6 @@
|
||||
#ifndef LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
|
||||
#define LLVM_DEBUGINFO_DWARF_DWARFCONTEXT_H
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/MapVector.h"
|
||||
#include "llvm/ADT/SmallString.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
@ -25,21 +24,24 @@
|
||||
#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFDebugLoc.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFDebugMacro.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFGdbIndex.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFSection.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFTypeUnit.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
|
||||
#include "llvm/Object/Binary.h"
|
||||
#include "llvm/Object/ObjectFile.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include "llvm/Support/Host.h"
|
||||
#include <cstdint>
|
||||
#include <deque>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class DataExtractor;
|
||||
class MemoryBuffer;
|
||||
class raw_ostream;
|
||||
|
||||
@ -73,7 +75,7 @@ class DWARFContext : public DIContext {
|
||||
std::unique_ptr<DWARFDebugLocDWO> LocDWO;
|
||||
|
||||
/// The maximum DWARF version of all units.
|
||||
unsigned MaxVersion;
|
||||
unsigned MaxVersion = 0;
|
||||
|
||||
struct DWOFile {
|
||||
object::OwningBinary<object::ObjectFile> File;
|
||||
@ -100,7 +102,7 @@ class DWARFContext : public DIContext {
|
||||
void parseDWOTypeUnits();
|
||||
|
||||
public:
|
||||
DWARFContext() : DIContext(CK_DWARF), MaxVersion(0) {}
|
||||
DWARFContext() : DIContext(CK_DWARF) {}
|
||||
DWARFContext(DWARFContext &) = delete;
|
||||
DWARFContext &operator=(DWARFContext &) = delete;
|
||||
|
||||
@ -112,9 +114,9 @@ class DWARFContext : public DIContext {
|
||||
|
||||
bool verify(raw_ostream &OS, DIDumpType DumpType = DIDT_All) override;
|
||||
|
||||
typedef DWARFUnitSection<DWARFCompileUnit>::iterator_range cu_iterator_range;
|
||||
typedef DWARFUnitSection<DWARFTypeUnit>::iterator_range tu_iterator_range;
|
||||
typedef iterator_range<decltype(TUs)::iterator> tu_section_iterator_range;
|
||||
using cu_iterator_range = DWARFUnitSection<DWARFCompileUnit>::iterator_range;
|
||||
using tu_iterator_range = DWARFUnitSection<DWARFTypeUnit>::iterator_range;
|
||||
using tu_section_iterator_range = iterator_range<decltype(TUs)::iterator>;
|
||||
|
||||
/// Get compile units in this context.
|
||||
cu_iterator_range compile_units() {
|
||||
@ -230,8 +232,10 @@ class DWARFContext : public DIContext {
|
||||
virtual bool isLittleEndian() const = 0;
|
||||
virtual uint8_t getAddressSize() const = 0;
|
||||
virtual const DWARFSection &getInfoSection() = 0;
|
||||
typedef MapVector<object::SectionRef, DWARFSection,
|
||||
std::map<object::SectionRef, unsigned>> TypeSectionMap;
|
||||
|
||||
using TypeSectionMap = MapVector<object::SectionRef, DWARFSection,
|
||||
std::map<object::SectionRef, unsigned>>;
|
||||
|
||||
virtual const TypeSectionMap &getTypesSections() = 0;
|
||||
virtual StringRef getAbbrevSection() = 0;
|
||||
virtual const DWARFSection &getLocSection() = 0;
|
||||
|
@ -18,6 +18,8 @@
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class raw_ostream;
|
||||
|
||||
class DWARFAbbreviationDeclarationSet {
|
||||
uint32_t Offset;
|
||||
/// Code of the first abbreviation, if all abbreviations in the set have
|
||||
@ -25,8 +27,8 @@ class DWARFAbbreviationDeclarationSet {
|
||||
uint32_t FirstAbbrCode;
|
||||
std::vector<DWARFAbbreviationDeclaration> Decls;
|
||||
|
||||
typedef std::vector<DWARFAbbreviationDeclaration>::const_iterator
|
||||
const_iterator;
|
||||
using const_iterator =
|
||||
std::vector<DWARFAbbreviationDeclaration>::const_iterator;
|
||||
|
||||
public:
|
||||
DWARFAbbreviationDeclarationSet();
|
||||
@ -51,8 +53,8 @@ class DWARFAbbreviationDeclarationSet {
|
||||
};
|
||||
|
||||
class DWARFDebugAbbrev {
|
||||
typedef std::map<uint64_t, DWARFAbbreviationDeclarationSet>
|
||||
DWARFAbbreviationDeclarationSetMap;
|
||||
using DWARFAbbreviationDeclarationSetMap =
|
||||
std::map<uint64_t, DWARFAbbreviationDeclarationSet>;
|
||||
|
||||
DWARFAbbreviationDeclarationSetMap AbbrDeclSets;
|
||||
mutable DWARFAbbreviationDeclarationSetMap::const_iterator PrevAbbrOffsetPos;
|
||||
|
@ -46,8 +46,8 @@ class DWARFDebugArangeSet {
|
||||
};
|
||||
|
||||
private:
|
||||
typedef std::vector<Descriptor> DescriptorColl;
|
||||
typedef iterator_range<DescriptorColl::const_iterator> desc_iterator_range;
|
||||
using DescriptorColl = std::vector<Descriptor>;
|
||||
using desc_iterator_range = iterator_range<DescriptorColl::const_iterator>;
|
||||
|
||||
uint32_t Offset;
|
||||
Header HeaderData;
|
||||
|
@ -76,8 +76,8 @@ class DWARFDebugAranges {
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::vector<Range> RangeColl;
|
||||
typedef RangeColl::const_iterator RangeCollIterator;
|
||||
using RangeColl = std::vector<Range>;
|
||||
using RangeCollIterator = RangeColl::const_iterator;
|
||||
|
||||
std::vector<RangeEndpoint> Endpoints;
|
||||
RangeColl Aranges;
|
||||
|
@ -10,7 +10,9 @@
|
||||
#ifndef LLVM_DEBUGINFO_DWARFDEBUGLINE_H
|
||||
#define LLVM_DEBUGINFO_DWARFDEBUGLINE_H
|
||||
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/DebugInfo/DIContext.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
|
||||
#include "llvm/Support/DataExtractor.h"
|
||||
#include <cstdint>
|
||||
@ -42,10 +44,10 @@ class DWARFDebugLine {
|
||||
/// The size in bytes of the statement information for this compilation unit
|
||||
/// (not including the total_length field itself).
|
||||
uint64_t TotalLength;
|
||||
/// Version identifier for the statement information format.
|
||||
uint16_t Version;
|
||||
/// In v5, size in bytes of an address (or segment offset).
|
||||
uint8_t AddressSize;
|
||||
/// Version, address size (starting in v5), and DWARF32/64 format; these
|
||||
/// parameters affect interpretation of forms (used in the directory and
|
||||
/// file tables starting with v5).
|
||||
DWARFFormParams FormParams;
|
||||
/// In v5, size in bytes of a segment selector.
|
||||
uint8_t SegSelectorSize;
|
||||
/// The number of bytes following the prologue_length field to the beginning
|
||||
@ -70,15 +72,18 @@ class DWARFDebugLine {
|
||||
std::vector<StringRef> IncludeDirectories;
|
||||
std::vector<FileNameEntry> FileNames;
|
||||
|
||||
bool IsDWARF64;
|
||||
const DWARFFormParams getFormParams() const { return FormParams; }
|
||||
uint16_t getVersion() const { return FormParams.Version; }
|
||||
uint8_t getAddressSize() const { return FormParams.AddrSize; }
|
||||
bool isDWARF64() const { return FormParams.Format == dwarf::DWARF64; }
|
||||
|
||||
uint32_t sizeofTotalLength() const { return IsDWARF64 ? 12 : 4; }
|
||||
uint32_t sizeofTotalLength() const { return isDWARF64() ? 12 : 4; }
|
||||
|
||||
uint32_t sizeofPrologueLength() const { return IsDWARF64 ? 8 : 4; }
|
||||
uint32_t sizeofPrologueLength() const { return isDWARF64() ? 8 : 4; }
|
||||
|
||||
/// Length of the prologue in bytes.
|
||||
uint32_t getLength() const {
|
||||
return PrologueLength + sizeofTotalLength() + sizeof(Version) +
|
||||
return PrologueLength + sizeofTotalLength() + sizeof(getVersion()) +
|
||||
sizeofPrologueLength();
|
||||
}
|
||||
|
||||
@ -104,7 +109,9 @@ class DWARFDebugLine {
|
||||
void postAppend();
|
||||
void reset(bool DefaultIsStmt);
|
||||
void dump(raw_ostream &OS) const;
|
||||
|
||||
static void dumpTableHeader(raw_ostream &OS);
|
||||
|
||||
static bool orderByAddress(const Row &LHS, const Row &RHS) {
|
||||
return LHS.Address < RHS.Address;
|
||||
}
|
||||
@ -216,11 +223,12 @@ class DWARFDebugLine {
|
||||
bool parse(DataExtractor DebugLineData, const RelocAddrMap *RMap,
|
||||
uint32_t *OffsetPtr);
|
||||
|
||||
using RowVector = std::vector<Row>;
|
||||
using RowIter = RowVector::const_iterator;
|
||||
using SequenceVector = std::vector<Sequence>;
|
||||
using SequenceIter = SequenceVector::const_iterator;
|
||||
|
||||
struct Prologue Prologue;
|
||||
typedef std::vector<Row> RowVector;
|
||||
typedef RowVector::const_iterator RowIter;
|
||||
typedef std::vector<Sequence> SequenceVector;
|
||||
typedef SequenceVector::const_iterator SequenceIter;
|
||||
RowVector Rows;
|
||||
SequenceVector Sequences;
|
||||
|
||||
@ -244,14 +252,14 @@ class DWARFDebugLine {
|
||||
struct LineTable *LineTable;
|
||||
/// The row number that starts at zero for the prologue, and increases for
|
||||
/// each row added to the matrix.
|
||||
unsigned RowNumber;
|
||||
unsigned RowNumber = 0;
|
||||
struct Row Row;
|
||||
struct Sequence Sequence;
|
||||
};
|
||||
|
||||
typedef std::map<uint32_t, LineTable> LineTableMapTy;
|
||||
typedef LineTableMapTy::iterator LineTableIter;
|
||||
typedef LineTableMapTy::const_iterator LineTableConstIter;
|
||||
using LineTableMapTy = std::map<uint32_t, LineTable>;
|
||||
using LineTableIter = LineTableMapTy::iterator;
|
||||
using LineTableConstIter = LineTableMapTy::const_iterator;
|
||||
|
||||
const RelocAddrMap *RelocMap;
|
||||
LineTableMapTy LineTableMap;
|
||||
|
@ -39,7 +39,7 @@ class DWARFDebugLoc {
|
||||
SmallVector<Entry, 2> Entries;
|
||||
};
|
||||
|
||||
typedef SmallVector<LocationList, 4> LocationLists;
|
||||
using LocationLists = SmallVector<LocationList, 4>;
|
||||
|
||||
/// A list of all the variables in the debug_loc section, each one describing
|
||||
/// the locations in which the variable is stored.
|
||||
@ -71,7 +71,7 @@ class DWARFDebugLocDWO {
|
||||
SmallVector<Entry, 2> Entries;
|
||||
};
|
||||
|
||||
typedef SmallVector<LocationList, 4> LocationLists;
|
||||
using LocationLists = SmallVector<LocationList, 4>;
|
||||
|
||||
LocationLists Locations;
|
||||
|
||||
|
@ -40,7 +40,7 @@ class DWARFDebugMacro {
|
||||
};
|
||||
};
|
||||
|
||||
typedef SmallVector<Entry, 4> MacroList;
|
||||
using MacroList = SmallVector<Entry, 4>;
|
||||
|
||||
/// A list of all the macro entries in the debug_macinfo section.
|
||||
MacroList Macros;
|
||||
|
@ -12,10 +12,8 @@
|
||||
|
||||
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
|
||||
#include "llvm/Support/DataExtractor.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
@ -29,7 +27,7 @@ struct DWARFAddressRange {
|
||||
};
|
||||
|
||||
/// DWARFAddressRangesVector - represents a set of absolute address ranges.
|
||||
typedef std::vector<DWARFAddressRange> DWARFAddressRangesVector;
|
||||
using DWARFAddressRangesVector = std::vector<DWARFAddressRange>;
|
||||
|
||||
class DWARFDebugRangeList {
|
||||
public:
|
||||
|
@ -22,6 +22,35 @@ namespace llvm {
|
||||
class DWARFUnit;
|
||||
class raw_ostream;
|
||||
|
||||
/// A helper struct for DWARFFormValue methods, providing information that
|
||||
/// allows it to know the byte size of DW_FORM values that vary in size
|
||||
/// depending on the DWARF version, address byte size, or DWARF32/DWARF64.
|
||||
struct DWARFFormParams {
|
||||
uint16_t Version;
|
||||
uint8_t AddrSize;
|
||||
dwarf::DwarfFormat Format;
|
||||
|
||||
/// The definition of the size of form DW_FORM_ref_addr depends on the
|
||||
/// version. In DWARF v2 it's the size of an address; after that, it's the
|
||||
/// size of a reference.
|
||||
uint8_t getRefAddrByteSize() const {
|
||||
if (Version == 2)
|
||||
return AddrSize;
|
||||
return getDwarfOffsetByteSize();
|
||||
}
|
||||
|
||||
/// The size of a reference is determined by the DWARF 32/64-bit format.
|
||||
uint8_t getDwarfOffsetByteSize() const {
|
||||
switch (Format) {
|
||||
case dwarf::DwarfFormat::DWARF32:
|
||||
return 4;
|
||||
case dwarf::DwarfFormat::DWARF64:
|
||||
return 8;
|
||||
}
|
||||
llvm_unreachable("Invalid Format value");
|
||||
}
|
||||
};
|
||||
|
||||
class DWARFFormValue {
|
||||
public:
|
||||
enum FormClass {
|
||||
@ -104,79 +133,43 @@ class DWARFFormValue {
|
||||
|
||||
/// Get the fixed byte size for a given form.
|
||||
///
|
||||
/// If the form always has a fixed valid byte size that doesn't depend on a
|
||||
/// DWARFUnit, then an Optional with a value will be returned. If the form
|
||||
/// can vary in size depending on the DWARFUnit (DWARF version, address byte
|
||||
/// size, or DWARF 32/64) and the DWARFUnit is valid, then an Optional with a
|
||||
/// valid value is returned. If the form is always encoded using a variable
|
||||
/// length storage format (ULEB or SLEB numbers or blocks) or the size
|
||||
/// depends on a DWARFUnit and the DWARFUnit is NULL, then None will be
|
||||
/// returned.
|
||||
/// \param Form The DWARF form to get the fixed byte size for
|
||||
/// \param U The DWARFUnit that can be used to help determine the byte size.
|
||||
/// If the form has a fixed byte size, then an Optional with a value will be
|
||||
/// returned. If the form is always encoded using a variable length storage
|
||||
/// format (ULEB or SLEB numbers or blocks) then None will be returned.
|
||||
///
|
||||
/// \returns Optional<uint8_t> value with the fixed byte size or None if
|
||||
/// \p Form doesn't have a fixed byte size or a DWARFUnit wasn't supplied
|
||||
/// and was needed to calculate the byte size.
|
||||
static Optional<uint8_t> getFixedByteSize(dwarf::Form Form,
|
||||
const DWARFUnit *U = nullptr);
|
||||
|
||||
/// Get the fixed byte size for a given form.
|
||||
///
|
||||
/// If the form has a fixed byte size given a valid DWARF version and address
|
||||
/// byte size, then an Optional with a valid value is returned. If the form
|
||||
/// is always encoded using a variable length storage format (ULEB or SLEB
|
||||
/// numbers or blocks) then None will be returned.
|
||||
///
|
||||
/// \param Form DWARF form to get the fixed byte size for
|
||||
/// \param Version DWARF version number.
|
||||
/// \param AddrSize size of an address in bytes.
|
||||
/// \param Format enum value from llvm::dwarf::DwarfFormat.
|
||||
/// \param Form DWARF form to get the fixed byte size for.
|
||||
/// \param FormParams DWARF parameters to help interpret forms.
|
||||
/// \returns Optional<uint8_t> value with the fixed byte size or None if
|
||||
/// \p Form doesn't have a fixed byte size.
|
||||
static Optional<uint8_t> getFixedByteSize(dwarf::Form Form, uint16_t Version,
|
||||
uint8_t AddrSize,
|
||||
llvm::dwarf::DwarfFormat Format);
|
||||
static Optional<uint8_t> getFixedByteSize(dwarf::Form Form,
|
||||
const DWARFFormParams FormParams);
|
||||
|
||||
/// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
|
||||
/// Skip a form's value in \p DebugInfoData at the offset specified by
|
||||
/// \p OffsetPtr.
|
||||
///
|
||||
/// Skips the bytes for this form in the debug info and updates the offset.
|
||||
/// Skips the bytes for the current form and updates the offset.
|
||||
///
|
||||
/// \param DebugInfoData the .debug_info data to use to skip the value.
|
||||
/// \param OffsetPtr a reference to the offset that will be updated.
|
||||
/// \param U the DWARFUnit to use when skipping the form in case the form
|
||||
/// size differs according to data in the DWARFUnit.
|
||||
/// \param DebugInfoData The data where we want to skip the value.
|
||||
/// \param OffsetPtr A reference to the offset that will be updated.
|
||||
/// \param Params DWARF parameters to help interpret forms.
|
||||
/// \returns true on success, false if the form was not skipped.
|
||||
bool skipValue(DataExtractor DebugInfoData, uint32_t *OffsetPtr,
|
||||
const DWARFUnit *U) const;
|
||||
const DWARFFormParams Params) const {
|
||||
return DWARFFormValue::skipValue(Form, DebugInfoData, OffsetPtr, Params);
|
||||
}
|
||||
|
||||
/// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
|
||||
/// Skip a form's value in \p DebugInfoData at the offset specified by
|
||||
/// \p OffsetPtr.
|
||||
///
|
||||
/// Skips the bytes for this form in the debug info and updates the offset.
|
||||
/// Skips the bytes for the specified form and updates the offset.
|
||||
///
|
||||
/// \param Form the DW_FORM enumeration that indicates the form to skip.
|
||||
/// \param DebugInfoData the .debug_info data to use to skip the value.
|
||||
/// \param OffsetPtr a reference to the offset that will be updated.
|
||||
/// \param U the DWARFUnit to use when skipping the form in case the form
|
||||
/// size differs according to data in the DWARFUnit.
|
||||
/// \param Form The DW_FORM enumeration that indicates the form to skip.
|
||||
/// \param DebugInfoData The data where we want to skip the value.
|
||||
/// \param OffsetPtr A reference to the offset that will be updated.
|
||||
/// \param FormParams DWARF parameters to help interpret forms.
|
||||
/// \returns true on success, false if the form was not skipped.
|
||||
static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
|
||||
uint32_t *OffsetPtr, const DWARFUnit *U);
|
||||
|
||||
/// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
|
||||
///
|
||||
/// Skips the bytes for this form in the debug info and updates the offset.
|
||||
///
|
||||
/// \param Form the DW_FORM enumeration that indicates the form to skip.
|
||||
/// \param DebugInfoData the .debug_info data to use to skip the value.
|
||||
/// \param OffsetPtr a reference to the offset that will be updated.
|
||||
/// \param Version DWARF version number.
|
||||
/// \param AddrSize size of an address in bytes.
|
||||
/// \param Format enum value from llvm::dwarf::DwarfFormat.
|
||||
/// \returns true on success, false if the form was not skipped.
|
||||
static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
|
||||
uint32_t *OffsetPtr, uint16_t Version, uint8_t AddrSize,
|
||||
llvm::dwarf::DwarfFormat Format);
|
||||
uint32_t *OffsetPtr, const DWARFFormParams FormParams);
|
||||
|
||||
private:
|
||||
void dumpString(raw_ostream &OS) const;
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
@ -28,7 +27,7 @@ struct RelocAddrEntry {
|
||||
/// dwarf where we expect relocated values. This adds a bit of complexity to the
|
||||
/// dwarf parsing/extraction at the benefit of not allocating memory for the
|
||||
/// entire size of the debug info sections.
|
||||
typedef DenseMap<uint64_t, RelocAddrEntry> RelocAddrMap;
|
||||
using RelocAddrMap = DenseMap<uint64_t, RelocAddrEntry>;
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
|
@ -19,11 +19,10 @@
|
||||
#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFSection.h"
|
||||
#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
|
||||
#include "llvm/Object/Binary.h"
|
||||
#include "llvm/Object/ObjectFile.h"
|
||||
#include "llvm/Support/DataExtractor.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
@ -31,6 +30,7 @@
|
||||
#include <cstdint>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
@ -72,9 +72,9 @@ class DWARFUnitSection final : public SmallVector<std::unique_ptr<UnitType>, 1>,
|
||||
bool Parsed = false;
|
||||
|
||||
public:
|
||||
typedef SmallVectorImpl<std::unique_ptr<UnitType>> UnitVector;
|
||||
typedef typename UnitVector::iterator iterator;
|
||||
typedef llvm::iterator_range<typename UnitVector::iterator> iterator_range;
|
||||
using UnitVector = SmallVectorImpl<std::unique_ptr<UnitType>>;
|
||||
using iterator = typename UnitVector::iterator;
|
||||
using iterator_range = llvm::iterator_range<typename UnitVector::iterator>;
|
||||
|
||||
UnitType *getUnitForOffset(uint32_t Offset) const override {
|
||||
auto *CU = std::upper_bound(
|
||||
@ -128,12 +128,13 @@ class DWARFUnit {
|
||||
bool isDWO;
|
||||
const DWARFUnitSectionBase &UnitSection;
|
||||
|
||||
// Version, address size, and DWARF format.
|
||||
DWARFFormParams FormParams;
|
||||
|
||||
uint32_t Offset;
|
||||
uint32_t Length;
|
||||
const DWARFAbbreviationDeclarationSet *Abbrevs;
|
||||
uint16_t Version;
|
||||
uint8_t UnitType;
|
||||
uint8_t AddrSize;
|
||||
uint64_t BaseAddr;
|
||||
/// The compile unit debug information entry items.
|
||||
std::vector<DWARFDebugInfoEntry> DieArray;
|
||||
@ -142,8 +143,9 @@ class DWARFUnit {
|
||||
/// IntervalMap does not support range removal, as a result, we use the
|
||||
/// std::map::upper_bound for address range lookup.
|
||||
std::map<uint64_t, std::pair<uint64_t, DWARFDie>> AddrDieMap;
|
||||
typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>
|
||||
die_iterator_range;
|
||||
|
||||
using die_iterator_range =
|
||||
iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>;
|
||||
|
||||
std::shared_ptr<DWARFUnit> DWO;
|
||||
|
||||
@ -159,7 +161,7 @@ class DWARFUnit {
|
||||
virtual bool extractImpl(DataExtractor debug_info, uint32_t *offset_ptr);
|
||||
|
||||
/// Size in bytes of the unit header.
|
||||
virtual uint32_t getHeaderSize() const { return Version <= 4 ? 11 : 12; }
|
||||
virtual uint32_t getHeaderSize() const { return getVersion() <= 4 ? 11 : 12; }
|
||||
|
||||
public:
|
||||
DWARFUnit(DWARFContext &Context, const DWARFSection &Section,
|
||||
@ -197,7 +199,8 @@ class DWARFUnit {
|
||||
uint64_t getStringOffsetSectionRelocation(uint32_t Index) const;
|
||||
|
||||
DataExtractor getDebugInfoExtractor() const {
|
||||
return DataExtractor(InfoSection.Data, isLittleEndian, AddrSize);
|
||||
return DataExtractor(InfoSection.Data, isLittleEndian,
|
||||
getAddressByteSize());
|
||||
}
|
||||
|
||||
DataExtractor getStringExtractor() const {
|
||||
@ -220,10 +223,14 @@ class DWARFUnit {
|
||||
uint32_t getOffset() const { return Offset; }
|
||||
uint32_t getNextUnitOffset() const { return Offset + Length + 4; }
|
||||
uint32_t getLength() const { return Length; }
|
||||
uint16_t getVersion() const { return Version; }
|
||||
|
||||
dwarf::DwarfFormat getFormat() const {
|
||||
return dwarf::DwarfFormat::DWARF32; // FIXME: Support DWARF64.
|
||||
const DWARFFormParams &getFormParams() const { return FormParams; }
|
||||
uint16_t getVersion() const { return FormParams.Version; }
|
||||
dwarf::DwarfFormat getFormat() const { return FormParams.Format; }
|
||||
uint8_t getAddressByteSize() const { return FormParams.AddrSize; }
|
||||
uint8_t getRefAddrByteSize() const { return FormParams.getRefAddrByteSize(); }
|
||||
uint8_t getDwarfOffsetByteSize() const {
|
||||
return FormParams.getDwarfOffsetByteSize();
|
||||
}
|
||||
|
||||
const DWARFAbbreviationDeclarationSet *getAbbreviations() const {
|
||||
@ -231,19 +238,6 @@ class DWARFUnit {
|
||||
}
|
||||
|
||||
uint8_t getUnitType() const { return UnitType; }
|
||||
uint8_t getAddressByteSize() const { return AddrSize; }
|
||||
|
||||
uint8_t getRefAddrByteSize() const {
|
||||
if (Version == 2)
|
||||
return AddrSize;
|
||||
return getDwarfOffsetByteSize();
|
||||
}
|
||||
|
||||
uint8_t getDwarfOffsetByteSize() const {
|
||||
if (getFormat() == dwarf::DwarfFormat::DWARF64)
|
||||
return 8;
|
||||
return 4;
|
||||
}
|
||||
|
||||
uint64_t getBaseAddress() const { return BaseAddr; }
|
||||
|
||||
|
@ -12,18 +12,16 @@
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
|
||||
#include "llvm/DebugInfo/MSF/MSFCommon.h"
|
||||
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
namespace msf {
|
||||
|
||||
class MSFBuilder {
|
||||
public:
|
||||
/// \brief Create a new `MSFBuilder`.
|
||||
@ -122,7 +120,7 @@ class MSFBuilder {
|
||||
Error allocateBlocks(uint32_t NumBlocks, MutableArrayRef<uint32_t> Blocks);
|
||||
uint32_t computeDirectoryByteSize() const;
|
||||
|
||||
typedef std::vector<uint32_t> BlockList;
|
||||
using BlockList = std::vector<uint32_t>;
|
||||
|
||||
BumpPtrAllocator &Allocator;
|
||||
|
||||
@ -136,7 +134,8 @@ class MSFBuilder {
|
||||
std::vector<uint32_t> DirectoryBlocks;
|
||||
std::vector<std::pair<uint32_t, BlockList>> StreamData;
|
||||
};
|
||||
} // namespace msf
|
||||
} // namespace llvm
|
||||
|
||||
} // end namespace msf
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_DEBUGINFO_MSF_MSFBUILDER_H
|
||||
|
@ -12,15 +12,15 @@
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
namespace msf {
|
||||
|
||||
static const char Magic[] = {'M', 'i', 'c', 'r', 'o', 's', 'o', 'f',
|
||||
't', ' ', 'C', '/', 'C', '+', '+', ' ',
|
||||
'M', 'S', 'F', ' ', '7', '.', '0', '0',
|
||||
@ -50,8 +50,9 @@ struct SuperBlock {
|
||||
};
|
||||
|
||||
struct MSFLayout {
|
||||
MSFLayout() : SB(nullptr) {}
|
||||
const SuperBlock *SB;
|
||||
MSFLayout() = default;
|
||||
|
||||
const SuperBlock *SB = nullptr;
|
||||
BitVector FreePageMap;
|
||||
ArrayRef<support::ulittle32_t> DirectoryBlocks;
|
||||
ArrayRef<support::ulittle32_t> StreamSizes;
|
||||
@ -90,15 +91,16 @@ inline uint32_t getFpmIntervalLength(const MSFLayout &L) {
|
||||
|
||||
inline uint32_t getNumFpmIntervals(const MSFLayout &L) {
|
||||
uint32_t Length = getFpmIntervalLength(L);
|
||||
return llvm::alignTo(L.SB->NumBlocks, Length) / Length;
|
||||
return alignTo(L.SB->NumBlocks, Length) / Length;
|
||||
}
|
||||
|
||||
inline uint32_t getFullFpmByteSize(const MSFLayout &L) {
|
||||
return llvm::alignTo(L.SB->NumBlocks, 8) / 8;
|
||||
return alignTo(L.SB->NumBlocks, 8) / 8;
|
||||
}
|
||||
|
||||
Error validateSuperBlock(const SuperBlock &SB);
|
||||
} // namespace msf
|
||||
} // namespace llvm
|
||||
|
||||
} // end namespace msf
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_DEBUGINFO_MSF_MSFCOMMON_H
|
||||
|
@ -1,5 +1,4 @@
|
||||
//===- MappedBlockStream.h - Discontiguous stream data in an MSF -*- C++
|
||||
//-*-===//
|
||||
//==- MappedBlockStream.h - Discontiguous stream data in an MSF --*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -13,7 +12,6 @@
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/DebugInfo/MSF/MSFStreamLayout.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/BinaryStream.h"
|
||||
@ -21,6 +19,7 @@
|
||||
#include "llvm/Support/Endian.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
@ -40,6 +39,7 @@ struct MSFLayout;
|
||||
/// of bytes.
|
||||
class MappedBlockStream : public BinaryStream {
|
||||
friend class WritableMappedBlockStream;
|
||||
|
||||
public:
|
||||
static std::unique_ptr<MappedBlockStream>
|
||||
createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
|
||||
@ -57,8 +57,8 @@ class MappedBlockStream : public BinaryStream {
|
||||
createDirectoryStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
|
||||
BumpPtrAllocator &Allocator);
|
||||
|
||||
llvm::support::endianness getEndian() const override {
|
||||
return llvm::support::little;
|
||||
support::endianness getEndian() const override {
|
||||
return support::little;
|
||||
}
|
||||
|
||||
Error readBytes(uint32_t Offset, uint32_t Size,
|
||||
@ -68,7 +68,7 @@ class MappedBlockStream : public BinaryStream {
|
||||
|
||||
uint32_t getLength() override;
|
||||
|
||||
llvm::BumpPtrAllocator &getAllocator() { return Allocator; }
|
||||
BumpPtrAllocator &getAllocator() { return Allocator; }
|
||||
|
||||
void invalidateCache();
|
||||
|
||||
@ -92,7 +92,7 @@ class MappedBlockStream : public BinaryStream {
|
||||
const MSFStreamLayout StreamLayout;
|
||||
BinaryStreamRef MsfData;
|
||||
|
||||
typedef MutableArrayRef<uint8_t> CacheEntry;
|
||||
using CacheEntry = MutableArrayRef<uint8_t>;
|
||||
|
||||
// We just store the allocator by reference. We use this to allocate
|
||||
// contiguous memory for things like arrays or strings that cross a block
|
||||
@ -124,8 +124,8 @@ class WritableMappedBlockStream : public WritableBinaryStream {
|
||||
createFpmStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
|
||||
BumpPtrAllocator &Allocator);
|
||||
|
||||
llvm::support::endianness getEndian() const override {
|
||||
return llvm::support::little;
|
||||
support::endianness getEndian() const override {
|
||||
return support::little;
|
||||
}
|
||||
|
||||
Error readBytes(uint32_t Offset, uint32_t Size,
|
||||
@ -141,6 +141,7 @@ class WritableMappedBlockStream : public WritableBinaryStream {
|
||||
const MSFStreamLayout &getStreamLayout() const {
|
||||
return ReadInterface.getStreamLayout();
|
||||
}
|
||||
|
||||
uint32_t getBlockSize() const { return ReadInterface.getBlockSize(); }
|
||||
uint32_t getNumBlocks() const { return ReadInterface.getNumBlocks(); }
|
||||
uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); }
|
||||
@ -153,7 +154,6 @@ class WritableMappedBlockStream : public WritableBinaryStream {
|
||||
|
||||
private:
|
||||
MappedBlockStream ReadInterface;
|
||||
|
||||
WritableBinaryStreamRef WriteInterface;
|
||||
};
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user