Vendor import of compiler-rt trunk r306325:
https://llvm.org/svn/llvm-project/compiler-rt/trunk@306325
This commit is contained in:
parent
4658ff5fee
commit
10fcf738d7
@ -7,14 +7,14 @@
|
||||
# An important constraint of the build is that it only produces libraries
|
||||
# based on the ability of the host toolchain to target various platforms.
|
||||
|
||||
cmake_minimum_required(VERSION 3.4.3)
|
||||
|
||||
# Check if compiler-rt is built as a standalone project.
|
||||
if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR OR COMPILER_RT_STANDALONE_BUILD)
|
||||
project(CompilerRT C CXX ASM)
|
||||
set(COMPILER_RT_STANDALONE_BUILD TRUE)
|
||||
endif()
|
||||
|
||||
cmake_minimum_required(VERSION 3.4.3)
|
||||
|
||||
# Add path for custom compiler-rt modules.
|
||||
list(INSERT CMAKE_MODULE_PATH 0
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/cmake"
|
||||
|
@ -179,7 +179,7 @@ set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64}
|
||||
set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64})
|
||||
set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64})
|
||||
set(ALL_ESAN_SUPPORTED_ARCH ${X86_64} ${MIPS64})
|
||||
set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64})
|
||||
set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64})
|
||||
set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le)
|
||||
|
||||
if(APPLE)
|
||||
|
@ -60,7 +60,8 @@ extern int __xray_remove_handler();
|
||||
/// start logging their subsequent affected function calls (if patched).
|
||||
///
|
||||
/// Returns 1 on success, 0 on error.
|
||||
extern int __xray_set_handler_arg1(void (*)(int32_t, XRayEntryType, uint64_t));
|
||||
extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType,
|
||||
uint64_t));
|
||||
|
||||
/// Disables the XRay handler used to log first arguments of function calls.
|
||||
/// Returns 1 on success, 0 on error.
|
||||
|
@ -160,7 +160,7 @@ struct QuarantineCallback {
|
||||
}
|
||||
|
||||
void *Allocate(uptr size) {
|
||||
return get_allocator().Allocate(cache_, size, 1, false);
|
||||
return get_allocator().Allocate(cache_, size, 1);
|
||||
}
|
||||
|
||||
void Deallocate(void *p) {
|
||||
@ -266,7 +266,8 @@ struct Allocator {
|
||||
}
|
||||
|
||||
void Initialize(const AllocatorOptions &options) {
|
||||
allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
|
||||
SetAllocatorMayReturnNull(options.may_return_null);
|
||||
allocator.Init(options.release_to_os_interval_ms);
|
||||
SharedInitCode(options);
|
||||
}
|
||||
|
||||
@ -302,7 +303,7 @@ struct Allocator {
|
||||
}
|
||||
|
||||
void ReInitialize(const AllocatorOptions &options) {
|
||||
allocator.SetMayReturnNull(options.may_return_null);
|
||||
SetAllocatorMayReturnNull(options.may_return_null);
|
||||
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
|
||||
SharedInitCode(options);
|
||||
|
||||
@ -323,7 +324,7 @@ struct Allocator {
|
||||
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
|
||||
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
|
||||
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
|
||||
options->may_return_null = allocator.MayReturnNull();
|
||||
options->may_return_null = AllocatorMayReturnNull();
|
||||
options->alloc_dealloc_mismatch =
|
||||
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
|
||||
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
|
||||
@ -374,7 +375,7 @@ struct Allocator {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
AsanInitFromRtl();
|
||||
if (RssLimitExceeded())
|
||||
return allocator.ReturnNullOrDieOnOOM();
|
||||
return AsanAllocator::FailureHandler::OnOOM();
|
||||
Flags &fl = *flags();
|
||||
CHECK(stack);
|
||||
const uptr min_alignment = SHADOW_GRANULARITY;
|
||||
@ -407,23 +408,21 @@ struct Allocator {
|
||||
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
|
||||
(void*)size);
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
}
|
||||
|
||||
AsanThread *t = GetCurrentThread();
|
||||
void *allocated;
|
||||
if (t) {
|
||||
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
|
||||
allocated =
|
||||
allocator.Allocate(cache, needed_size, 8, false);
|
||||
allocated = allocator.Allocate(cache, needed_size, 8);
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocated =
|
||||
allocator.Allocate(cache, needed_size, 8, false);
|
||||
allocated = allocator.Allocate(cache, needed_size, 8);
|
||||
}
|
||||
|
||||
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
|
||||
if (!allocated)
|
||||
return nullptr;
|
||||
|
||||
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
|
||||
// Heap poisoning is enabled, but the allocator provides an unpoisoned
|
||||
@ -634,7 +633,7 @@ struct Allocator {
|
||||
|
||||
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return AsanAllocator::FailureHandler::OnBadRequest();
|
||||
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
|
||||
// If the memory comes from the secondary allocator no need to clear it
|
||||
// as it comes directly from mmap.
|
||||
|
@ -204,6 +204,14 @@ class ScopedInErrorReport {
|
||||
error_report_callback(buffer_copy.data());
|
||||
}
|
||||
|
||||
if (halt_on_error_ && common_flags()->abort_on_error) {
|
||||
// On Android the message is truncated to 512 characters.
|
||||
// FIXME: implement "compact" error format, possibly without, or with
|
||||
// highly compressed stack traces?
|
||||
// FIXME: or just use the summary line as abort message?
|
||||
SetAbortMessage(buffer_copy.data());
|
||||
}
|
||||
|
||||
// In halt_on_error = false mode, reset the current error object (before
|
||||
// unlocking).
|
||||
if (!halt_on_error_)
|
||||
|
@ -410,15 +410,15 @@ if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then
|
||||
install "$TMPDIR/asanwrapper" /system/bin 755
|
||||
install "$TMPDIR/asanwrapper64" /system/bin 755
|
||||
|
||||
adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
|
||||
adb_shell ln -s $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK
|
||||
adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
|
||||
adb_shell ln -sf $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK
|
||||
else
|
||||
install "$TMPDIR/$ASAN_RT" /system/lib 644
|
||||
install "$TMPDIR/app_process32" /system/bin 755 $CTX
|
||||
install "$TMPDIR/app_process.wrap" /system/bin 755 $CTX
|
||||
install "$TMPDIR/asanwrapper" /system/bin 755 $CTX
|
||||
|
||||
adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
|
||||
adb_shell ln -sf $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
|
||||
|
||||
adb_shell rm /system/bin/app_process
|
||||
adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process
|
||||
|
@ -1,3 +1,12 @@
|
||||
___asan_default_options
|
||||
___asan_default_suppressions
|
||||
___asan_on_error
|
||||
___asan_set_shadow_00
|
||||
___asan_set_shadow_f1
|
||||
___asan_set_shadow_f2
|
||||
___asan_set_shadow_f3
|
||||
___asan_set_shadow_f4
|
||||
___asan_set_shadow_f5
|
||||
___asan_set_shadow_f6
|
||||
___asan_set_shadow_f7
|
||||
___asan_set_shadow_f8
|
||||
|
@ -477,7 +477,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
switch (*(u8*)address) {
|
||||
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
|
||||
// movabs eax, dword ptr ds:[XXXXXXXX]
|
||||
return 8;
|
||||
return 9;
|
||||
}
|
||||
|
||||
switch (*(u16*)address) {
|
||||
@ -495,6 +495,11 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
|
||||
case 0x5741: // push r15
|
||||
case 0x9066: // Two-byte NOP
|
||||
return 2;
|
||||
|
||||
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
|
||||
if (rel_offset)
|
||||
*rel_offset = 2;
|
||||
return 6;
|
||||
}
|
||||
|
||||
switch (0x00FFFFFF & *(u32*)address) {
|
||||
|
@ -170,6 +170,13 @@ const u8 kPatchableCode5[] = {
|
||||
0x54, // push esp
|
||||
};
|
||||
|
||||
#if SANITIZER_WINDOWS64
|
||||
u8 kLoadGlobalCode[] = {
|
||||
0x8B, 0x05, 0x00, 0x00, 0x00, 0x00, // mov eax [rip + global]
|
||||
0xC3, // ret
|
||||
};
|
||||
#endif
|
||||
|
||||
const u8 kUnpatchableCode1[] = {
|
||||
0xC3, // ret
|
||||
};
|
||||
@ -502,6 +509,10 @@ TEST(Interception, PatchableFunction) {
|
||||
EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override));
|
||||
EXPECT_TRUE(TestFunctionPatching(kPatchableCode5, override));
|
||||
|
||||
#if SANITIZER_WINDOWS64
|
||||
EXPECT_TRUE(TestFunctionPatching(kLoadGlobalCode, override));
|
||||
#endif
|
||||
|
||||
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));
|
||||
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));
|
||||
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode3, override));
|
||||
|
@ -13,6 +13,7 @@ set(LSAN_SOURCES
|
||||
lsan_allocator.cc
|
||||
lsan_linux.cc
|
||||
lsan_interceptors.cc
|
||||
lsan_mac.cc
|
||||
lsan_malloc_mac.cc
|
||||
lsan_preinit.cc
|
||||
lsan_thread.cc)
|
||||
|
@ -38,6 +38,8 @@
|
||||
GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
|
||||
common_flags()->fast_unwind_on_malloc)
|
||||
|
||||
#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
void InitializeInterceptors();
|
||||
|
@ -38,8 +38,8 @@ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||
static Allocator allocator;
|
||||
|
||||
void InitializeAllocator() {
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator.InitLinkerInitialized(
|
||||
common_flags()->allocator_may_return_null,
|
||||
common_flags()->allocator_release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
|
||||
return nullptr;
|
||||
}
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false);
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
|
||||
// Do not rely on the allocator to clear the memory (it's slow).
|
||||
if (cleared && allocator.FromPrimary(p))
|
||||
memset(p, 0, size);
|
||||
|
@ -79,8 +79,7 @@ void EnableInThisThread() {
|
||||
|
||||
u32 GetCurrentThread() {
|
||||
thread_local_data_t *data = get_tls_val(false);
|
||||
CHECK(data);
|
||||
return data->current_thread_id;
|
||||
return data ? data->current_thread_id : kInvalidTid;
|
||||
}
|
||||
|
||||
void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
|
||||
|
192
lib/lsan/lsan_mac.cc
Normal file
192
lib/lsan/lsan_mac.cc
Normal file
@ -0,0 +1,192 @@
|
||||
//===-- lsan_mac.cc -------------------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of LeakSanitizer, a memory leak checker.
|
||||
//
|
||||
// Mac-specific details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_MAC
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "lsan.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_thread.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
namespace __lsan {
|
||||
// Support for the following functions from libdispatch on Mac OS:
|
||||
// dispatch_async_f()
|
||||
// dispatch_async()
|
||||
// dispatch_sync_f()
|
||||
// dispatch_sync()
|
||||
// dispatch_after_f()
|
||||
// dispatch_after()
|
||||
// dispatch_group_async_f()
|
||||
// dispatch_group_async()
|
||||
// TODO(glider): libdispatch API contains other functions that we don't support
|
||||
// yet.
|
||||
//
|
||||
// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
|
||||
// they can cause jobs to run on a thread different from the current one.
|
||||
// TODO(glider): if so, we need a test for this (otherwise we should remove
|
||||
// them).
|
||||
//
|
||||
// The following functions use dispatch_barrier_async_f() (which isn't a library
|
||||
// function but is exported) and are thus supported:
|
||||
// dispatch_source_set_cancel_handler_f()
|
||||
// dispatch_source_set_cancel_handler()
|
||||
// dispatch_source_set_event_handler_f()
|
||||
// dispatch_source_set_event_handler()
|
||||
//
|
||||
// The reference manual for Grand Central Dispatch is available at
|
||||
// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
|
||||
// The implementation details are at
|
||||
// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
|
||||
|
||||
typedef void *dispatch_group_t;
|
||||
typedef void *dispatch_queue_t;
|
||||
typedef void *dispatch_source_t;
|
||||
typedef u64 dispatch_time_t;
|
||||
typedef void (*dispatch_function_t)(void *block);
|
||||
typedef void *(*worker_t)(void *block);
|
||||
|
||||
// A wrapper for the ObjC blocks used to support libdispatch.
|
||||
typedef struct {
|
||||
void *block;
|
||||
dispatch_function_t func;
|
||||
u32 parent_tid;
|
||||
} lsan_block_context_t;
|
||||
|
||||
ALWAYS_INLINE
|
||||
void lsan_register_worker_thread(int parent_tid) {
|
||||
if (GetCurrentThread() == kInvalidTid) {
|
||||
u32 tid = ThreadCreate(parent_tid, 0, true);
|
||||
ThreadStart(tid, GetTid());
|
||||
SetCurrentThread(tid);
|
||||
}
|
||||
}
|
||||
|
||||
// For use by only those functions that allocated the context via
|
||||
// alloc_lsan_context().
|
||||
extern "C" void lsan_dispatch_call_block_and_release(void *block) {
|
||||
lsan_block_context_t *context = (lsan_block_context_t *)block;
|
||||
VReport(2,
|
||||
"lsan_dispatch_call_block_and_release(): "
|
||||
"context: %p, pthread_self: %p\n",
|
||||
block, pthread_self());
|
||||
lsan_register_worker_thread(context->parent_tid);
|
||||
// Call the original dispatcher for the block.
|
||||
context->func(context->block);
|
||||
lsan_free(context);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
using namespace __lsan; // NOLINT
|
||||
|
||||
// Wrap |ctxt| and |func| into an lsan_block_context_t.
|
||||
// The caller retains control of the allocated context.
|
||||
extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
|
||||
dispatch_function_t func) {
|
||||
GET_STACK_TRACE_THREAD;
|
||||
lsan_block_context_t *lsan_ctxt =
|
||||
(lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
|
||||
lsan_ctxt->block = ctxt;
|
||||
lsan_ctxt->func = func;
|
||||
lsan_ctxt->parent_tid = GetCurrentThread();
|
||||
return lsan_ctxt;
|
||||
}
|
||||
|
||||
// Define interceptor for dispatch_*_f function with the three most common
|
||||
// parameters: dispatch_queue_t, context, dispatch_function_t.
|
||||
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
|
||||
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
|
||||
dispatch_function_t func) { \
|
||||
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
|
||||
return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
|
||||
lsan_dispatch_call_block_and_release); \
|
||||
}
|
||||
|
||||
INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
|
||||
INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
|
||||
INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
|
||||
|
||||
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
|
||||
void *ctxt, dispatch_function_t func) {
|
||||
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
|
||||
return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
|
||||
lsan_dispatch_call_block_and_release);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
|
||||
dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
|
||||
lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
|
||||
REAL(dispatch_group_async_f)
|
||||
(group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
|
||||
}
|
||||
|
||||
#if !defined(MISSING_BLOCKS_SUPPORT)
|
||||
extern "C" {
|
||||
void dispatch_async(dispatch_queue_t dq, void (^work)(void));
|
||||
void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
|
||||
void (^work)(void));
|
||||
void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
|
||||
void (^work)(void));
|
||||
void dispatch_source_set_cancel_handler(dispatch_source_t ds,
|
||||
void (^work)(void));
|
||||
void dispatch_source_set_event_handler(dispatch_source_t ds,
|
||||
void (^work)(void));
|
||||
}
|
||||
|
||||
#define GET_LSAN_BLOCK(work) \
|
||||
void (^lsan_block)(void); \
|
||||
int parent_tid = GetCurrentThread(); \
|
||||
lsan_block = ^(void) { \
|
||||
lsan_register_worker_thread(parent_tid); \
|
||||
work(); \
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_async)(dq, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
|
||||
dispatch_queue_t dq, void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_group_async)(dg, dq, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
|
||||
void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_after)(when, queue, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
|
||||
void (^work)(void)) {
|
||||
if (!work) {
|
||||
REAL(dispatch_source_set_cancel_handler)(ds, work);
|
||||
return;
|
||||
}
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
|
||||
void (^work)(void)) {
|
||||
GET_LSAN_BLOCK(work);
|
||||
REAL(dispatch_source_set_event_handler)(ds, lsan_block);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_MAC
|
@ -77,7 +77,7 @@ u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
|
||||
/* arg */ nullptr);
|
||||
}
|
||||
|
||||
void ThreadStart(u32 tid, tid_t os_id) {
|
||||
void ThreadStart(u32 tid, tid_t os_id, bool workerthread) {
|
||||
OnStartedArgs args;
|
||||
uptr stack_size = 0;
|
||||
uptr tls_size = 0;
|
||||
@ -87,7 +87,7 @@ void ThreadStart(u32 tid, tid_t os_id) {
|
||||
args.tls_end = args.tls_begin + tls_size;
|
||||
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
|
||||
args.dtls = DTLS_Get();
|
||||
thread_registry->StartThread(tid, os_id, /*workerthread*/ false, &args);
|
||||
thread_registry->StartThread(tid, os_id, workerthread, &args);
|
||||
}
|
||||
|
||||
void ThreadFinish() {
|
||||
|
@ -45,7 +45,7 @@ class ThreadContext : public ThreadContextBase {
|
||||
|
||||
void InitializeThreadRegistry();
|
||||
|
||||
void ThreadStart(u32 tid, tid_t os_id);
|
||||
void ThreadStart(u32 tid, tid_t os_id, bool workerthread = false);
|
||||
void ThreadFinish();
|
||||
u32 ThreadCreate(u32 tid, uptr uid, bool detached);
|
||||
void ThreadJoin(u32 tid);
|
||||
|
@ -119,9 +119,8 @@ static AllocatorCache fallback_allocator_cache;
|
||||
static SpinMutex fallback_mutex;
|
||||
|
||||
void MsanAllocatorInit() {
|
||||
allocator.Init(
|
||||
common_flags()->allocator_may_return_null,
|
||||
common_flags()->allocator_release_to_os_interval_ms);
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
|
||||
@ -139,17 +138,17 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
|
||||
if (size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
|
||||
(void *)size);
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
}
|
||||
MsanThread *t = GetCurrentThread();
|
||||
void *allocated;
|
||||
if (t) {
|
||||
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
|
||||
allocated = allocator.Allocate(cache, size, alignment, false);
|
||||
allocated = allocator.Allocate(cache, size, alignment);
|
||||
} else {
|
||||
SpinMutexLock l(&fallback_mutex);
|
||||
AllocatorCache *cache = &fallback_allocator_cache;
|
||||
allocated = allocator.Allocate(cache, size, alignment, false);
|
||||
allocated = allocator.Allocate(cache, size, alignment);
|
||||
}
|
||||
Metadata *meta =
|
||||
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
|
||||
@ -197,7 +196,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
|
||||
|
||||
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
|
||||
return allocator.ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
|
||||
}
|
||||
|
||||
|
@ -94,8 +94,7 @@ InternalAllocator *internal_allocator() {
|
||||
SpinMutexLock l(&internal_alloc_init_mu);
|
||||
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
|
||||
0) {
|
||||
internal_allocator_instance->Init(
|
||||
/* may_return_null */ false, kReleaseToOSIntervalNever);
|
||||
internal_allocator_instance->Init(kReleaseToOSIntervalNever);
|
||||
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
|
||||
}
|
||||
}
|
||||
@ -108,9 +107,9 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
|
||||
if (cache == 0) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Allocate(&internal_allocator_cache, size,
|
||||
alignment, false);
|
||||
alignment);
|
||||
}
|
||||
return internal_allocator()->Allocate(cache, size, alignment, false);
|
||||
return internal_allocator()->Allocate(cache, size, alignment);
|
||||
}
|
||||
|
||||
static void *RawInternalRealloc(void *ptr, uptr size,
|
||||
@ -162,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
||||
|
||||
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
||||
if (CallocShouldReturnNullDueToOverflow(count, size))
|
||||
return internal_allocator()->ReturnNullOrDieOnBadRequest();
|
||||
return InternalAllocator::FailureHandler::OnBadRequest();
|
||||
void *p = InternalAlloc(count * size, cache);
|
||||
if (p) internal_memset(p, 0, count * size);
|
||||
return p;
|
||||
@ -209,12 +208,15 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
|
||||
return (max / size) < n;
|
||||
}
|
||||
|
||||
static atomic_uint8_t reporting_out_of_memory = {0};
|
||||
static atomic_uint8_t allocator_out_of_memory = {0};
|
||||
static atomic_uint8_t allocator_may_return_null = {0};
|
||||
|
||||
bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
|
||||
bool IsAllocatorOutOfMemory() {
|
||||
return atomic_load_relaxed(&allocator_out_of_memory);
|
||||
}
|
||||
|
||||
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
|
||||
if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
|
||||
// Prints error message and kills the program.
|
||||
void NORETURN ReportAllocatorCannotReturnNull() {
|
||||
Report("%s's allocator is terminating the process instead of returning 0\n",
|
||||
SanitizerToolName);
|
||||
Report("If you don't like this behavior set allocator_may_return_null=1\n");
|
||||
@ -222,4 +224,35 @@ void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
|
||||
Die();
|
||||
}
|
||||
|
||||
bool AllocatorMayReturnNull() {
|
||||
return atomic_load(&allocator_may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetAllocatorMayReturnNull(bool may_return_null) {
|
||||
atomic_store(&allocator_may_return_null, may_return_null,
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnFailure::OnBadRequest() {
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnFailure::OnOOM() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
if (AllocatorMayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void *DieOnFailure::OnBadRequest() {
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
void *DieOnFailure::OnOOM() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
ReportAllocatorCannotReturnNull();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -24,12 +24,28 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Returns true if ReportAllocatorCannotReturnNull(true) was called.
|
||||
// Can be use to avoid memory hungry operations.
|
||||
bool IsReportingOOM();
|
||||
// Since flags are immutable and allocator behavior can be changed at runtime
|
||||
// (unit tests or ASan on Android are some examples), allocator_may_return_null
|
||||
// flag value is cached here and can be altered later.
|
||||
bool AllocatorMayReturnNull();
|
||||
void SetAllocatorMayReturnNull(bool may_return_null);
|
||||
|
||||
// Prints error message and kills the program.
|
||||
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
|
||||
// Allocator failure handling policies:
|
||||
// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
|
||||
// dies otherwise.
|
||||
struct ReturnNullOrDieOnFailure {
|
||||
static void *OnBadRequest();
|
||||
static void *OnOOM();
|
||||
};
|
||||
// Always dies on the failure.
|
||||
struct DieOnFailure {
|
||||
static void *OnBadRequest();
|
||||
static void *OnOOM();
|
||||
};
|
||||
|
||||
// Returns true if allocator detected OOM condition. Can be used to avoid memory
|
||||
// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
|
||||
bool IsAllocatorOutOfMemory();
|
||||
|
||||
// Allocators call these callbacks on mmap/munmap.
|
||||
struct NoOpMapUnmapCallback {
|
||||
|
@ -24,31 +24,26 @@ template <class PrimaryAllocator, class AllocatorCache,
|
||||
class SecondaryAllocator> // NOLINT
|
||||
class CombinedAllocator {
|
||||
public:
|
||||
void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
|
||||
typedef typename SecondaryAllocator::FailureHandler FailureHandler;
|
||||
|
||||
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
|
||||
primary_.Init(release_to_os_interval_ms);
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void InitLinkerInitialized(
|
||||
bool may_return_null, s32 release_to_os_interval_ms) {
|
||||
secondary_.InitLinkerInitialized(may_return_null);
|
||||
secondary_.InitLinkerInitialized();
|
||||
stats_.InitLinkerInitialized();
|
||||
InitCommon(may_return_null, release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null, s32 release_to_os_interval_ms) {
|
||||
secondary_.Init(may_return_null);
|
||||
void Init(s32 release_to_os_interval_ms) {
|
||||
primary_.Init(release_to_os_interval_ms);
|
||||
secondary_.Init();
|
||||
stats_.Init();
|
||||
InitCommon(may_return_null, release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
|
||||
bool cleared = false) {
|
||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
|
||||
// Returning 0 on malloc(0) may break a lot of code.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
if (size + alignment < size)
|
||||
return ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
uptr original_size = size;
|
||||
// If alignment requirements are to be fulfilled by the frontend allocator
|
||||
// rather than by the primary or secondary, passing an alignment lower than
|
||||
@ -56,49 +51,24 @@ class CombinedAllocator {
|
||||
// alignment check.
|
||||
if (alignment > 8)
|
||||
size = RoundUpTo(size, alignment);
|
||||
void *res;
|
||||
bool from_primary = primary_.CanAllocate(size, alignment);
|
||||
// The primary allocator should return a 2^x aligned allocation when
|
||||
// requested 2^x bytes, hence using the rounded up 'size' when being
|
||||
// serviced by the primary (this is no longer true when the primary is
|
||||
// using a non-fixed base address). The secondary takes care of the
|
||||
// alignment without such requirement, and allocating 'size' would use
|
||||
// extraneous memory, so we employ 'original_size'.
|
||||
if (from_primary)
|
||||
void *res;
|
||||
if (primary_.CanAllocate(size, alignment))
|
||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||
else
|
||||
res = secondary_.Allocate(&stats_, original_size, alignment);
|
||||
if (!res)
|
||||
return FailureHandler::OnOOM();
|
||||
if (alignment > 8)
|
||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||
// When serviced by the secondary, the chunk comes from a mmap allocation
|
||||
// and will be zero'd out anyway. We only need to clear our the chunk if
|
||||
// it was serviced by the primary, hence using the rounded up 'size'.
|
||||
if (cleared && res && from_primary)
|
||||
internal_bzero_aligned16(res, RoundUpTo(size, 16));
|
||||
return res;
|
||||
}
|
||||
|
||||
bool MayReturnNull() const {
|
||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (MayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (MayReturnNull())
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
secondary_.SetMayReturnNull(may_return_null);
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
s32 ReleaseToOSIntervalMs() const {
|
||||
return primary_.ReleaseToOSIntervalMs();
|
||||
}
|
||||
@ -219,6 +189,5 @@ class CombinedAllocator {
|
||||
PrimaryAllocator primary_;
|
||||
SecondaryAllocator secondary_;
|
||||
AllocatorGlobalStats stats_;
|
||||
atomic_uint8_t may_return_null_;
|
||||
};
|
||||
|
||||
|
@ -47,7 +47,8 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
|
||||
InternalAllocatorCache;
|
||||
|
||||
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
|
||||
LargeMmapAllocator<> > InternalAllocator;
|
||||
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
|
||||
> InternalAllocator;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
|
||||
uptr alignment = 0);
|
||||
|
@ -144,8 +144,10 @@ struct SizeClassAllocator32LocalCache {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(allocator, class_id);
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(allocator, class_id)))
|
||||
return nullptr;
|
||||
}
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
void *res = c->batch[--c->count];
|
||||
PREFETCH(c->batch[c->count - 1]);
|
||||
@ -227,14 +229,17 @@ struct SizeClassAllocator32LocalCache {
|
||||
Deallocate(allocator, batch_class_id, b);
|
||||
}
|
||||
|
||||
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
|
||||
InitCache();
|
||||
PerClass *c = &per_class_[class_id];
|
||||
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
|
||||
if (UNLIKELY(!b))
|
||||
return false;
|
||||
CHECK_GT(b->Count(), 0);
|
||||
b->CopyToArray(c->batch);
|
||||
c->count = b->Count();
|
||||
DestroyBatch(class_id, allocator, b);
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
|
||||
@ -244,6 +249,10 @@ struct SizeClassAllocator32LocalCache {
|
||||
uptr first_idx_to_drain = c->count - cnt;
|
||||
TransferBatch *b = CreateBatch(
|
||||
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
|
||||
// Failure to allocate a batch while releasing memory is non recoverable.
|
||||
// TODO(alekseys): Figure out how to do it without allocating a new batch.
|
||||
if (UNLIKELY(!b))
|
||||
DieOnFailure::OnOOM();
|
||||
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
|
||||
&c->batch[first_idx_to_drain], cnt);
|
||||
c->count -= cnt;
|
||||
|
@ -24,7 +24,8 @@ template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
|
||||
// be returned by MmapOrDie().
|
||||
//
|
||||
// Region:
|
||||
// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
|
||||
// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
|
||||
// kRegionSize).
|
||||
// Since the regions are aligned by kRegionSize, there are exactly
|
||||
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||
// a ByteMap possible_regions to store the size classes of each Region.
|
||||
@ -149,8 +150,9 @@ class SizeClassAllocator32 {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
if (sci->free_list.empty())
|
||||
PopulateFreeList(stat, c, sci, class_id);
|
||||
if (sci->free_list.empty() &&
|
||||
UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
|
||||
return nullptr;
|
||||
CHECK(!sci->free_list.empty());
|
||||
TransferBatch *b = sci->free_list.front();
|
||||
sci->free_list.pop_front();
|
||||
@ -277,8 +279,10 @@ class SizeClassAllocator32 {
|
||||
|
||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
|
||||
"SizeClassAllocator32"));
|
||||
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
|
||||
kRegionSize, kRegionSize, "SizeClassAllocator32"));
|
||||
if (UNLIKELY(!res))
|
||||
return 0;
|
||||
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||
stat->Add(AllocatorStatMapped, kRegionSize);
|
||||
CHECK_EQ(0U, (res & (kRegionSize - 1)));
|
||||
@ -291,16 +295,20 @@ class SizeClassAllocator32 {
|
||||
return &size_class_info_array[class_id];
|
||||
}
|
||||
|
||||
void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
SizeClassInfo *sci, uptr class_id) {
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
uptr reg = AllocateRegion(stat, class_id);
|
||||
if (UNLIKELY(!reg))
|
||||
return false;
|
||||
uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
uptr max_count = TransferBatch::MaxCached(class_id);
|
||||
TransferBatch *b = nullptr;
|
||||
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
|
||||
if (!b) {
|
||||
b = c->CreateBatch(class_id, this, (TransferBatch*)i);
|
||||
if (!b)
|
||||
return false;
|
||||
b->Clear();
|
||||
}
|
||||
b->Add((void*)i);
|
||||
@ -314,6 +322,7 @@ class SizeClassAllocator32 {
|
||||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_back(b);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
ByteMap possible_regions;
|
||||
|
@ -17,17 +17,19 @@
|
||||
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
||||
// The main purpose of this allocator is to cover large and rare allocation
|
||||
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
||||
template <class MapUnmapCallback = NoOpMapUnmapCallback>
|
||||
template <class MapUnmapCallback = NoOpMapUnmapCallback,
|
||||
class FailureHandlerT = ReturnNullOrDieOnFailure>
|
||||
class LargeMmapAllocator {
|
||||
public:
|
||||
void InitLinkerInitialized(bool may_return_null) {
|
||||
typedef FailureHandlerT FailureHandler;
|
||||
|
||||
void InitLinkerInitialized() {
|
||||
page_size_ = GetPageSizeCached();
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Init(bool may_return_null) {
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized(may_return_null);
|
||||
InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
|
||||
@ -37,11 +39,11 @@ class LargeMmapAllocator {
|
||||
map_size += alignment;
|
||||
// Overflow.
|
||||
if (map_size < size)
|
||||
return ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
uptr map_beg = reinterpret_cast<uptr>(
|
||||
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
|
||||
if (!map_beg)
|
||||
return ReturnNullOrDieOnOOM();
|
||||
return FailureHandler::OnOOM();
|
||||
CHECK(IsAligned(map_beg, page_size_));
|
||||
MapUnmapCallback().OnMap(map_beg, map_size);
|
||||
uptr map_end = map_beg + map_size;
|
||||
@ -75,24 +77,6 @@ class LargeMmapAllocator {
|
||||
return reinterpret_cast<void*>(res);
|
||||
}
|
||||
|
||||
bool MayReturnNull() const {
|
||||
return atomic_load(&may_return_null_, memory_order_acquire);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (MayReturnNull()) return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (MayReturnNull()) return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void SetMayReturnNull(bool may_return_null) {
|
||||
atomic_store(&may_return_null_, may_return_null, memory_order_release);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorStats *stat, void *p) {
|
||||
Header *h = GetHeader(p);
|
||||
{
|
||||
@ -278,7 +262,6 @@ class LargeMmapAllocator {
|
||||
struct Stats {
|
||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||
} stats;
|
||||
atomic_uint8_t may_return_null_;
|
||||
SpinMutex mutex_;
|
||||
};
|
||||
|
||||
|
@ -71,16 +71,25 @@ INLINE typename T::Type atomic_exchange(volatile T *a,
|
||||
return v;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
INLINE bool atomic_compare_exchange_strong(volatile T *a,
|
||||
typename T::Type *cmp,
|
||||
template <typename T>
|
||||
INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
|
||||
typename T::Type xchg,
|
||||
memory_order mo) {
|
||||
typedef typename T::Type Type;
|
||||
Type cmpv = *cmp;
|
||||
Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
Type prev;
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
if (sizeof(*a) == 8) {
|
||||
Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use);
|
||||
prev = __mips_sync_val_compare_and_swap<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmpv, (u64)xchg);
|
||||
} else {
|
||||
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
}
|
||||
#else
|
||||
prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
|
||||
#endif
|
||||
if (prev == cmpv) return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
@ -17,6 +17,56 @@
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// MIPS32 does not support atomic > 4 bytes. To address this lack of
|
||||
// functionality, the sanitizer library provides helper methods which use an
|
||||
// internal spin lock mechanism to emulate atomic oprations when the size is
|
||||
// 8 bytes.
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
static void __spin_lock(volatile int *lock) {
|
||||
while (__sync_lock_test_and_set(lock, 1))
|
||||
while (*lock) {
|
||||
}
|
||||
}
|
||||
|
||||
static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
|
||||
|
||||
|
||||
// Make sure the lock is on its own cache line to prevent false sharing.
|
||||
// Put it inside a struct that is aligned and padded to the typical MIPS
|
||||
// cacheline which is 32 bytes.
|
||||
static struct {
|
||||
int lock;
|
||||
char pad[32 - sizeof(int)];
|
||||
} __attribute__((aligned(32))) lock = {0};
|
||||
|
||||
template <class T>
|
||||
T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
|
||||
T ret;
|
||||
|
||||
__spin_lock(&lock.lock);
|
||||
|
||||
ret = *ptr;
|
||||
*ptr = ret + val;
|
||||
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
|
||||
T ret;
|
||||
__spin_lock(&lock.lock);
|
||||
|
||||
ret = *ptr;
|
||||
if (ret == oldval) *ptr = newval;
|
||||
|
||||
__spin_unlock(&lock.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
INLINE void proc_yield(int cnt) {
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
}
|
||||
@ -53,8 +103,15 @@ INLINE typename T::Type atomic_load(
|
||||
// 64-bit load on 32-bit platform.
|
||||
// Gross, but simple and reliable.
|
||||
// Assume that it is not in read-only memory.
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
typename T::Type volatile *val_ptr =
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
||||
v = __mips_sync_fetch_and_add<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), 0);
|
||||
#else
|
||||
v = __sync_fetch_and_add(
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
|
||||
#endif
|
||||
}
|
||||
return v;
|
||||
}
|
||||
@ -84,7 +141,14 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
|
||||
typename T::Type cmp = a->val_dont_use;
|
||||
typename T::Type cur;
|
||||
for (;;) {
|
||||
#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
|
||||
typename T::Type volatile *val_ptr =
|
||||
const_cast<typename T::Type volatile *>(&a->val_dont_use);
|
||||
cur = __mips_sync_val_compare_and_swap<u64>(
|
||||
reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
|
||||
#else
|
||||
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
|
||||
#endif
|
||||
if (cmp == v)
|
||||
break;
|
||||
cmp = cur;
|
||||
|
@ -95,7 +95,9 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size);
|
||||
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
|
||||
void *MmapNoAccess(uptr size);
|
||||
// Map aligned chunk of address space; size and alignment are powers of two.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
|
||||
// Dies on all but out of memory errors, in the latter case returns nullptr.
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type);
|
||||
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
|
||||
// unaccessible memory.
|
||||
bool MprotectNoAccess(uptr addr, uptr size);
|
||||
@ -808,8 +810,11 @@ INLINE void LogMessageOnPrintf(const char *str) {}
|
||||
#if SANITIZER_LINUX
|
||||
// Initialize Android logging. Any writes before this are silently lost.
|
||||
void AndroidLogInit();
|
||||
void SetAbortMessage(const char *);
|
||||
#else
|
||||
INLINE void AndroidLogInit() {}
|
||||
// FIXME: MacOS implementation could use CRSetCrashLogMessage.
|
||||
INLINE void SetAbortMessage(const char *) {}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
@ -919,6 +924,10 @@ const s32 kReleaseToOSIntervalNever = -1;
|
||||
|
||||
void CheckNoDeepBind(const char *filename, int flag);
|
||||
|
||||
// Returns the requested amount of random data (up to 256 bytes) that can then
|
||||
// be used to seed a PRNG.
|
||||
bool GetRandom(void *buffer, uptr length);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
|
@ -1604,6 +1604,32 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool GetRandom(void *buffer, uptr length) {
|
||||
if (!buffer || !length || length > 256)
|
||||
return false;
|
||||
#if defined(__NR_getrandom)
|
||||
static atomic_uint8_t skip_getrandom_syscall;
|
||||
if (!atomic_load_relaxed(&skip_getrandom_syscall)) {
|
||||
// Up to 256 bytes, getrandom will not be interrupted.
|
||||
uptr res = internal_syscall(SYSCALL(getrandom), buffer, length, 0);
|
||||
int rverrno = 0;
|
||||
if (internal_iserror(res, &rverrno) && rverrno == ENOSYS)
|
||||
atomic_store_relaxed(&skip_getrandom_syscall, 1);
|
||||
else if (res == length)
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
uptr fd = internal_open("/dev/urandom", O_RDONLY);
|
||||
if (internal_iserror(fd))
|
||||
return false;
|
||||
// internal_read deals with EINTR.
|
||||
uptr res = internal_read(fd, buffer, length);
|
||||
if (internal_iserror(res))
|
||||
return false;
|
||||
internal_close(fd);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
|
@ -551,6 +551,13 @@ void LogMessageOnPrintf(const char *str) {
|
||||
WriteToSyslog(str);
|
||||
}
|
||||
|
||||
#if SANITIZER_ANDROID && __ANDROID_API__ >= 21
|
||||
extern "C" void android_set_abort_message(const char *msg);
|
||||
void SetAbortMessage(const char *str) { android_set_abort_message(str); }
|
||||
#else
|
||||
void SetAbortMessage(const char *str) {}
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_LINUX
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -923,6 +923,11 @@ void CheckNoDeepBind(const char *filename, int flag) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
// FIXME: implement on this platform.
|
||||
bool GetRandom(void *buffer, uptr length) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_MAC
|
||||
|
@ -164,11 +164,14 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
||||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
// We do it by maping a bit more and then unmaping redundant pieces.
|
||||
// We probably can do it with fewer syscalls in some OS-dependent way.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = size + alignment;
|
||||
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
|
||||
uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
|
||||
if (!map_res)
|
||||
return nullptr;
|
||||
uptr map_end = map_res + map_size;
|
||||
uptr res = map_res;
|
||||
if (res & (alignment - 1)) // Not aligned.
|
||||
|
@ -189,25 +189,7 @@ void UnsetAlternateSignalStack() {
|
||||
|
||||
static void MaybeInstallSigaction(int signum,
|
||||
SignalHandlerType handler) {
|
||||
switch (GetHandleSignalMode(signum)) {
|
||||
case kHandleSignalNo:
|
||||
return;
|
||||
case kHandleSignalYes: {
|
||||
struct sigaction sigact;
|
||||
internal_memset(&sigact, 0, sizeof(sigact));
|
||||
CHECK_EQ(0, internal_sigaction(signum, nullptr, &sigact));
|
||||
if (sigact.sa_flags & SA_SIGINFO) {
|
||||
if (sigact.sa_sigaction) return;
|
||||
} else {
|
||||
if (sigact.sa_handler != SIG_DFL && sigact.sa_handler != SIG_IGN &&
|
||||
sigact.sa_handler != SIG_ERR)
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kHandleSignalExclusive:
|
||||
break;
|
||||
}
|
||||
if (GetHandleSignalMode(signum) == kHandleSignalNo) return;
|
||||
|
||||
struct sigaction sigact;
|
||||
internal_memset(&sigact, 0, sizeof(sigact));
|
||||
|
@ -495,7 +495,7 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
|
||||
VReport(2, "Symbolizer is disabled.\n");
|
||||
return;
|
||||
}
|
||||
if (IsReportingOOM()) {
|
||||
if (IsAllocatorOutOfMemory()) {
|
||||
VReport(2, "Cannot use internal symbolizer: out of memory\n");
|
||||
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
|
||||
VReport(2, "Using internal symbolizer.\n");
|
||||
|
@ -131,18 +131,24 @@ void UnmapOrDie(void *addr, uptr size) {
|
||||
}
|
||||
}
|
||||
|
||||
static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
|
||||
const char *mmap_type) {
|
||||
error_t last_error = GetLastError();
|
||||
if (last_error == ERROR_NOT_ENOUGH_MEMORY)
|
||||
return nullptr;
|
||||
ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
|
||||
}
|
||||
|
||||
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
||||
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
if (rv == 0) {
|
||||
error_t last_error = GetLastError();
|
||||
if (last_error != ERROR_NOT_ENOUGH_MEMORY)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate", last_error);
|
||||
}
|
||||
if (rv == 0)
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
|
||||
return rv;
|
||||
}
|
||||
|
||||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
|
||||
@ -152,7 +158,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
uptr mapped_addr =
|
||||
(uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||
if (!mapped_addr)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
// If we got it right on the first try, return. Otherwise, unmap it and go to
|
||||
// the slow path.
|
||||
@ -172,8 +178,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
mapped_addr =
|
||||
(uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
|
||||
if (!mapped_addr)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
|
||||
GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
// Find the aligned address.
|
||||
uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
|
||||
@ -191,7 +196,7 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
|
||||
|
||||
// Fail if we can't make this work quickly.
|
||||
if (retries == kMaxRetries && mapped_addr == 0)
|
||||
ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
|
||||
return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
|
||||
|
||||
return (void *)mapped_addr;
|
||||
}
|
||||
@ -1002,6 +1007,11 @@ void CheckNoDeepBind(const char *filename, int flag) {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
// FIXME: implement on this platform.
|
||||
bool GetRandom(void *buffer, uptr length) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // _WIN32
|
||||
|
@ -426,8 +426,8 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
|
||||
TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
|
||||
TestMapUnmapCallback::map_count = 0;
|
||||
TestMapUnmapCallback::unmap_count = 0;
|
||||
LargeMmapAllocator<TestMapUnmapCallback> a;
|
||||
a.Init(/* may_return_null */ false);
|
||||
LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a;
|
||||
a.Init();
|
||||
AllocatorStats stats;
|
||||
stats.Init();
|
||||
void *x = a.Allocate(&stats, 1 << 20, 1);
|
||||
@ -463,8 +463,8 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
|
||||
#endif
|
||||
|
||||
TEST(SanitizerCommon, LargeMmapAllocator) {
|
||||
LargeMmapAllocator<> a;
|
||||
a.Init(/* may_return_null */ false);
|
||||
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
|
||||
a.Init();
|
||||
AllocatorStats stats;
|
||||
stats.Init();
|
||||
|
||||
@ -546,8 +546,9 @@ void TestCombinedAllocator() {
|
||||
typedef
|
||||
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
|
||||
Allocator;
|
||||
SetAllocatorMayReturnNull(true);
|
||||
Allocator *a = new Allocator;
|
||||
a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever);
|
||||
a->Init(kReleaseToOSIntervalNever);
|
||||
std::mt19937 r;
|
||||
|
||||
AllocatorCache cache;
|
||||
@ -561,7 +562,7 @@ void TestCombinedAllocator() {
|
||||
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
|
||||
|
||||
// Set to false
|
||||
a->SetMayReturnNull(false);
|
||||
SetAllocatorMayReturnNull(false);
|
||||
EXPECT_DEATH(a->Allocate(&cache, -1, 1),
|
||||
"allocator is terminating the process");
|
||||
|
||||
@ -873,8 +874,8 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
|
||||
}
|
||||
|
||||
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
|
||||
LargeMmapAllocator<> a;
|
||||
a.Init(/* may_return_null */ false);
|
||||
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
|
||||
a.Init();
|
||||
AllocatorStats stats;
|
||||
stats.Init();
|
||||
|
||||
@ -900,8 +901,8 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
|
||||
}
|
||||
|
||||
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
|
||||
LargeMmapAllocator<> a;
|
||||
a.Init(/* may_return_null */ false);
|
||||
LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
|
||||
a.Init();
|
||||
AllocatorStats stats;
|
||||
stats.Init();
|
||||
|
||||
|
@ -72,12 +72,12 @@ TEST(SanitizerCommon, SortTest) {
|
||||
EXPECT_TRUE(IsSorted(array, 2));
|
||||
}
|
||||
|
||||
TEST(SanitizerCommon, MmapAlignedOrDie) {
|
||||
TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) {
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
for (uptr size = 1; size <= 32; size *= 2) {
|
||||
for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
|
||||
for (int iter = 0; iter < 100; iter++) {
|
||||
uptr res = (uptr)MmapAlignedOrDie(
|
||||
uptr res = (uptr)MmapAlignedOrDieOnFatalError(
|
||||
size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
|
||||
EXPECT_EQ(0U, res % (alignment * PageSize));
|
||||
internal_memset((void*)res, 1, size * PageSize);
|
||||
@ -300,4 +300,21 @@ TEST(SanitizerCommon, InternalScopedString) {
|
||||
EXPECT_STREQ("012345678", str.data());
|
||||
}
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
TEST(SanitizerCommon, GetRandom) {
|
||||
u8 buffer_1[32], buffer_2[32];
|
||||
EXPECT_FALSE(GetRandom(nullptr, 32));
|
||||
EXPECT_FALSE(GetRandom(buffer_1, 0));
|
||||
EXPECT_FALSE(GetRandom(buffer_1, 512));
|
||||
EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2));
|
||||
for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) {
|
||||
for (uptr i = 0; i < 100; i++) {
|
||||
EXPECT_TRUE(GetRandom(buffer_1, size));
|
||||
EXPECT_TRUE(GetRandom(buffer_2, size));
|
||||
EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
@ -273,6 +273,8 @@ struct ScudoAllocator {
|
||||
static const uptr MaxAllowedMallocSize =
|
||||
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
|
||||
|
||||
typedef ReturnNullOrDieOnFailure FailureHandler;
|
||||
|
||||
ScudoBackendAllocator BackendAllocator;
|
||||
ScudoQuarantine AllocatorQuarantine;
|
||||
|
||||
@ -326,7 +328,8 @@ struct ScudoAllocator {
|
||||
DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
|
||||
DeleteSizeMismatch = Options.DeleteSizeMismatch;
|
||||
ZeroContents = Options.ZeroContents;
|
||||
BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
|
||||
SetAllocatorMayReturnNull(Options.MayReturnNull);
|
||||
BackendAllocator.Init(Options.ReleaseToOSIntervalMs);
|
||||
AllocatorQuarantine.Init(
|
||||
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
|
||||
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
|
||||
@ -354,11 +357,11 @@ struct ScudoAllocator {
|
||||
dieWithMessage("ERROR: alignment is not a power of 2\n");
|
||||
}
|
||||
if (Alignment > MaxAlignment)
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
if (Alignment < MinAlignment)
|
||||
Alignment = MinAlignment;
|
||||
if (Size >= MaxAllowedMallocSize)
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
if (Size == 0)
|
||||
Size = 1;
|
||||
|
||||
@ -366,7 +369,7 @@ struct ScudoAllocator {
|
||||
uptr AlignedSize = (Alignment > MinAlignment) ?
|
||||
NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
|
||||
if (AlignedSize >= MaxAllowedMallocSize)
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
|
||||
// Primary and Secondary backed allocations have a different treatment. We
|
||||
// deal with alignment requirements of Primary serviced allocations here,
|
||||
@ -391,7 +394,7 @@ struct ScudoAllocator {
|
||||
AllocationAlignment, FromPrimary);
|
||||
}
|
||||
if (!Ptr)
|
||||
return BackendAllocator.ReturnNullOrDieOnOOM();
|
||||
return FailureHandler::OnOOM();
|
||||
|
||||
// If requested, we will zero out the entire contents of the returned chunk.
|
||||
if ((ForceZeroContents || ZeroContents) && FromPrimary)
|
||||
@ -583,7 +586,7 @@ struct ScudoAllocator {
|
||||
initThreadMaybe();
|
||||
uptr Total = NMemB * Size;
|
||||
if (Size != 0 && Total / Size != NMemB) // Overflow check
|
||||
return BackendAllocator.ReturnNullOrDieOnBadRequest();
|
||||
return FailureHandler::OnBadRequest();
|
||||
return allocate(Total, MinAlignment, FromMalloc, true);
|
||||
}
|
||||
|
||||
|
@ -23,11 +23,10 @@ template <class PrimaryAllocator, class AllocatorCache,
|
||||
class SecondaryAllocator>
|
||||
class ScudoCombinedAllocator {
|
||||
public:
|
||||
void Init(bool AllocatorMayReturnNull, s32 ReleaseToOSIntervalMs) {
|
||||
void Init(s32 ReleaseToOSIntervalMs) {
|
||||
Primary.Init(ReleaseToOSIntervalMs);
|
||||
Secondary.Init(AllocatorMayReturnNull);
|
||||
Secondary.Init();
|
||||
Stats.Init();
|
||||
atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment,
|
||||
@ -37,18 +36,6 @@ class ScudoCombinedAllocator {
|
||||
return Secondary.Allocate(&Stats, Size, Alignment);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnBadRequest() {
|
||||
if (atomic_load_relaxed(&MayReturnNull))
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(false);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (atomic_load_relaxed(&MayReturnNull))
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) {
|
||||
if (FromPrimary)
|
||||
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
|
||||
@ -78,7 +65,6 @@ class ScudoCombinedAllocator {
|
||||
PrimaryAllocator Primary;
|
||||
SecondaryAllocator Secondary;
|
||||
AllocatorGlobalStats Stats;
|
||||
atomic_uint8_t MayReturnNull;
|
||||
};
|
||||
|
||||
#endif // SCUDO_ALLOCATOR_COMBINED_H_
|
||||
|
@ -24,9 +24,8 @@
|
||||
class ScudoLargeMmapAllocator {
|
||||
public:
|
||||
|
||||
void Init(bool AllocatorMayReturnNull) {
|
||||
void Init() {
|
||||
PageSize = GetPageSizeCached();
|
||||
atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
|
||||
@ -42,7 +41,7 @@ class ScudoLargeMmapAllocator {
|
||||
|
||||
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
|
||||
if (MapBeg == ~static_cast<uptr>(0))
|
||||
return ReturnNullOrDieOnOOM();
|
||||
return ReturnNullOrDieOnFailure::OnOOM();
|
||||
// A page-aligned pointer is assumed after that, so check it now.
|
||||
CHECK(IsAligned(MapBeg, PageSize));
|
||||
uptr MapEnd = MapBeg + MapSize;
|
||||
@ -96,12 +95,6 @@ class ScudoLargeMmapAllocator {
|
||||
return reinterpret_cast<void *>(Ptr);
|
||||
}
|
||||
|
||||
void *ReturnNullOrDieOnOOM() {
|
||||
if (atomic_load_relaxed(&MayReturnNull))
|
||||
return nullptr;
|
||||
ReportAllocatorCannotReturnNull(true);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorStats *Stats, void *Ptr) {
|
||||
SecondaryHeader *Header = getHeader(Ptr);
|
||||
{
|
||||
@ -140,7 +133,6 @@ class ScudoLargeMmapAllocator {
|
||||
const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
|
||||
uptr PageSize;
|
||||
SpinMutex StatsMutex;
|
||||
atomic_uint8_t MayReturnNull;
|
||||
};
|
||||
|
||||
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
|
||||
|
@ -112,9 +112,8 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
|
||||
}
|
||||
|
||||
void InitializeAllocator() {
|
||||
allocator()->Init(
|
||||
common_flags()->allocator_may_return_null,
|
||||
common_flags()->allocator_release_to_os_interval_ms);
|
||||
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
|
||||
allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void InitializeAllocatorLate() {
|
||||
@ -151,7 +150,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
|
||||
|
||||
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
|
||||
return allocator()->ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
|
||||
if (p == 0)
|
||||
return 0;
|
||||
@ -164,7 +163,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
|
||||
|
||||
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, n))
|
||||
return allocator()->ReturnNullOrDieOnBadRequest();
|
||||
return Allocator::FailureHandler::OnBadRequest();
|
||||
void *p = user_alloc(thr, pc, n * size);
|
||||
if (p)
|
||||
internal_memset(p, 0, n * size);
|
||||
|
@ -473,9 +473,12 @@ void __ubsan::__ubsan_handle_function_type_mismatch_abort(
|
||||
Die();
|
||||
}
|
||||
|
||||
static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts,
|
||||
bool IsAttr) {
|
||||
SourceLocation Loc = Data->Loc.acquire();
|
||||
static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr,
|
||||
ReportOptions Opts, bool IsAttr) {
|
||||
if (!LocPtr)
|
||||
UNREACHABLE("source location pointer is null!");
|
||||
|
||||
SourceLocation Loc = LocPtr->acquire();
|
||||
ErrorType ET = ErrorType::InvalidNullReturn;
|
||||
|
||||
if (ignoreReport(Loc, Opts, ET))
|
||||
@ -491,25 +494,29 @@ static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts,
|
||||
: "_Nonnull return type annotation");
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nonnull_return_v1(NonNullReturnData *Data,
|
||||
SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(false);
|
||||
handleNonNullReturn(Data, Opts, true);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, true);
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nonnull_return_v1_abort(NonNullReturnData *Data,
|
||||
SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(true);
|
||||
handleNonNullReturn(Data, Opts, true);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, true);
|
||||
Die();
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nullability_return(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nullability_return_v1(NonNullReturnData *Data,
|
||||
SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(false);
|
||||
handleNonNullReturn(Data, Opts, false);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, false);
|
||||
}
|
||||
|
||||
void __ubsan::__ubsan_handle_nullability_return_abort(NonNullReturnData *Data) {
|
||||
void __ubsan::__ubsan_handle_nullability_return_v1_abort(
|
||||
NonNullReturnData *Data, SourceLocation *LocPtr) {
|
||||
GET_REPORT_OPTIONS(true);
|
||||
handleNonNullReturn(Data, Opts, false);
|
||||
handleNonNullReturn(Data, LocPtr, Opts, false);
|
||||
Die();
|
||||
}
|
||||
|
||||
|
@ -132,14 +132,13 @@ RECOVERABLE(function_type_mismatch,
|
||||
ValueHandle Val)
|
||||
|
||||
struct NonNullReturnData {
|
||||
SourceLocation Loc;
|
||||
SourceLocation AttrLoc;
|
||||
};
|
||||
|
||||
/// \brief Handle returning null from function with the returns_nonnull
|
||||
/// attribute, or a return type annotated with _Nonnull.
|
||||
RECOVERABLE(nonnull_return, NonNullReturnData *Data)
|
||||
RECOVERABLE(nullability_return, NonNullReturnData *Data)
|
||||
RECOVERABLE(nonnull_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
|
||||
RECOVERABLE(nullability_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
|
||||
|
||||
struct NonNullArgData {
|
||||
SourceLocation Loc;
|
||||
|
@ -28,12 +28,12 @@ INTERFACE_FUNCTION(__ubsan_handle_negate_overflow)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_negate_overflow_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_arg)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_arg_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
|
||||
INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow)
|
||||
|
@ -312,7 +312,7 @@ __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
|
||||
return patchFunction(FuncId, false);
|
||||
}
|
||||
|
||||
int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t)) {
|
||||
int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
|
||||
if (!__sanitizer::atomic_load(&XRayInitialized,
|
||||
__sanitizer::memory_order_acquire))
|
||||
return 0;
|
||||
@ -320,7 +320,7 @@ int __xray_set_handler_arg1(void (*Handler)(int32_t, XRayEntryType, uint64_t)) {
|
||||
// A relaxed write might not be visible even if the current thread gets
|
||||
// scheduled on a different CPU/NUMA node. We need to wait for everyone to
|
||||
// have this handler installed for consistency of collected data across CPUs.
|
||||
__sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(Handler),
|
||||
__sanitizer::atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
|
||||
__sanitizer::memory_order_release);
|
||||
return 1;
|
||||
}
|
||||
|
@ -28,6 +28,9 @@
|
||||
// RUN: %env_asan_opts=allocator_may_return_null=1 %run %t realloc-after-malloc 2>&1 \
|
||||
// RUN: | FileCheck %s --check-prefixes=CHECK-MALLOC-REALLOC,CHECK-NULL
|
||||
|
||||
// ASan shadow memory on s390 is too large for this test.
|
||||
// UNSUPPORTED: s390
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
@ -4,11 +4,13 @@
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s
|
||||
|
||||
// RUN: %clangxx -std=c++11 -DTEST_INSTALL_SIG_HANDLER %s -o %t
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=1 not %run %t 2>&1 | FileCheck --check-prefix=CHECK-HANDLER %s
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-HANDLER
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=1 not %run %t 2>&1 | FileCheck %s
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s
|
||||
|
||||
// RUN: %clangxx -std=c++11 -DTEST_INSTALL_SIG_ACTION %s -o %t
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=1 not %run %t 2>&1 | FileCheck --check-prefix=CHECK-ACTION %s
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-ACTION
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=1 not %run %t 2>&1 | FileCheck %s
|
||||
// RUN: env LD_PRELOAD=%shared_libasan %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s
|
||||
|
||||
// REQUIRES: asan-dynamic-runtime
|
||||
@ -51,22 +53,22 @@ int InternalSigaction(int sig, KernelSigaction *act, KernelSigaction *oact) {
|
||||
return syscall(__NR_rt_sigaction, sig, act, oact, NSIG / 8);
|
||||
}
|
||||
|
||||
struct KernelSigaction sigact = {};
|
||||
struct KernelSigaction pre_asan = {};
|
||||
|
||||
static void Init() {
|
||||
int res = InternalSigaction(SIGSEGV, nullptr, &sigact);
|
||||
int res = InternalSigaction(SIGSEGV, nullptr, &pre_asan);
|
||||
assert(res >= 0);
|
||||
assert(sigact.handler == SIG_DFL || sigact.handler == SIG_IGN);
|
||||
assert(pre_asan.handler == SIG_DFL || pre_asan.handler == SIG_IGN);
|
||||
#if defined(TEST_INSTALL_SIG_HANDLER)
|
||||
sigact = {};
|
||||
sigact.handler = &SigHandler;
|
||||
res = InternalSigaction(SIGSEGV, &sigact, nullptr);
|
||||
pre_asan = {};
|
||||
pre_asan.handler = &SigHandler;
|
||||
res = InternalSigaction(SIGSEGV, &pre_asan, nullptr);
|
||||
assert(res >= 0);
|
||||
#elif defined(TEST_INSTALL_SIG_ACTION)
|
||||
sigact = {};
|
||||
sigact.flags = SA_SIGINFO | SA_NODEFER;
|
||||
sigact.handler = (__sighandler_t)&SigAction;
|
||||
res = InternalSigaction(SIGSEGV, &sigact, nullptr);
|
||||
pre_asan = {};
|
||||
pre_asan.flags = SA_SIGINFO | SA_NODEFER;
|
||||
pre_asan.handler = (__sighandler_t)&SigAction;
|
||||
res = InternalSigaction(SIGSEGV, &pre_asan, nullptr);
|
||||
assert(res >= 0);
|
||||
#endif
|
||||
}
|
||||
@ -74,21 +76,21 @@ static void Init() {
|
||||
__attribute__((section(".preinit_array"), used))
|
||||
void (*__local_test_preinit)(void) = Init;
|
||||
|
||||
bool ShouldAsanInstallHandlers() {
|
||||
bool ExpectUserHandler() {
|
||||
#if defined(TEST_INSTALL_SIG_HANDLER) || defined(TEST_INSTALL_SIG_ACTION)
|
||||
return !strcmp(getenv("ASAN_OPTIONS"), "handle_segv=2");
|
||||
return !strcmp(getenv("ASAN_OPTIONS"), "handle_segv=0");
|
||||
#endif
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
KernelSigaction sigact_asan = {};
|
||||
InternalSigaction(SIGSEGV, nullptr, &sigact_asan);
|
||||
KernelSigaction post_asan = {};
|
||||
InternalSigaction(SIGSEGV, nullptr, &post_asan);
|
||||
|
||||
assert(sigact_asan.handler != SIG_DFL);
|
||||
assert(sigact_asan.handler != SIG_IGN);
|
||||
assert(ShouldAsanInstallHandlers() ==
|
||||
(sigact_asan.handler != sigact.handler));
|
||||
assert(post_asan.handler != SIG_DFL);
|
||||
assert(post_asan.handler != SIG_IGN);
|
||||
assert(ExpectUserHandler() ==
|
||||
(post_asan.handler == pre_asan.handler));
|
||||
|
||||
raise(SIGSEGV);
|
||||
printf("%s\n", handler);
|
||||
|
@ -8,5 +8,5 @@ int main() {
|
||||
while (true) {
|
||||
void *ptr = malloc(200 * 1024 * 1024); // 200MB
|
||||
}
|
||||
// CHECK: failed to allocate
|
||||
// CHECK: allocator is terminating the process instead of returning 0
|
||||
}
|
||||
|
39
test/asan/TestCases/pr33372.cc
Normal file
39
test/asan/TestCases/pr33372.cc
Normal file
@ -0,0 +1,39 @@
|
||||
// RUN: %clangxx_asan -O0 -std=c++11 %s -o %t && %run %t 2>&1 | FileCheck %s
|
||||
// RUN: %clangxx_asan -O1 -std=c++11 %s -o %t && %run %t 2>&1 | FileCheck %s
|
||||
// RUN: %clangxx_asan -O2 -std=c++11 %s -o %t && %run %t 2>&1 | FileCheck %s
|
||||
|
||||
// Test that we do not detect false buffer overflows cased by optimization when
|
||||
// when local variable replaced by a smaller global constant.
|
||||
// https://bugs.llvm.org/show_bug.cgi?id=33372
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
struct A { int x, y, z; };
|
||||
struct B { A a; /*gap*/ long b; };
|
||||
B *bb;
|
||||
|
||||
void test1() {
|
||||
A a1 = {1, 1, 2};
|
||||
B b1 = {a1, 6};
|
||||
bb = new B(b1);
|
||||
}
|
||||
|
||||
const char KKK[] = {1, 1, 2};
|
||||
char bbb[100000];
|
||||
|
||||
void test2() {
|
||||
char cc[sizeof(bbb)];
|
||||
memcpy(cc, KKK , sizeof(KKK));
|
||||
memcpy(bbb, cc, sizeof(bbb));
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
test1();
|
||||
test2();
|
||||
printf("PASSED");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// CHECK-NOT: ERROR: AddressSanitizer
|
||||
// CHECK: PASSED
|
@ -114,6 +114,7 @@ asan_lit_source_dir = get_required_attr(config, "asan_lit_source_dir")
|
||||
if config.android == "1":
|
||||
config.available_features.add('android')
|
||||
compile_wrapper = os.path.join(asan_lit_source_dir, "android_commands", "android_compile.py") + " "
|
||||
config.compile_wrapper = compile_wrapper
|
||||
else:
|
||||
config.available_features.add('not-android')
|
||||
|
||||
|
@ -4,6 +4,9 @@
|
||||
// RUN: %clang -O0 %s -o %t 2>&1
|
||||
// RUN: %run %t 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ESAN
|
||||
|
||||
// FIXME: Re-enable once PR33590 is fixed.
|
||||
// UNSUPPORTED: x86_64
|
||||
|
||||
#include <sanitizer/esan_interface.h>
|
||||
#include <sched.h>
|
||||
#include <stdio.h>
|
||||
|
@ -1,6 +1,9 @@
|
||||
// RUN: %clang_esan_wset -O0 %s -o %t 2>&1
|
||||
// RUN: %run %t 2>&1 | FileCheck %s
|
||||
|
||||
// FIXME: Re-enable once PR33590 is fixed.
|
||||
// UNSUPPORTED: x86_64
|
||||
|
||||
#include <sanitizer/esan_interface.h>
|
||||
#include <sched.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -1,6 +1,9 @@
|
||||
// RUN: %clang_esan_wset -O0 %s -o %t 2>&1
|
||||
// RUN: %run %t 2>&1 | FileCheck %s
|
||||
|
||||
// FIXME: Re-enable once PR33590 is fixed.
|
||||
// UNSUPPORTED: x86_64
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
|
59
test/lsan/TestCases/Darwin/dispatch.mm
Normal file
59
test/lsan/TestCases/Darwin/dispatch.mm
Normal file
@ -0,0 +1,59 @@
|
||||
// Test for threads spawned with wqthread_start
|
||||
// RUN: LSAN_BASE="report_objects=1:use_stacks=0:use_registers=0"
|
||||
// RUN: %clangxx_lsan %s -DDISPATCH_ASYNC -o %t-async -framework Foundation
|
||||
// RUN: %clangxx_lsan %s -DDISPATCH_SYNC -o %t-sync -framework Foundation
|
||||
// RUN: %env_lsan_opts=$LSAN_BASE not %run %t-async 2>&1 | FileCheck %s
|
||||
// RUN: %env_lsan_opts=$LSAN_BASE not %run %t-sync 2>&1 | FileCheck %s
|
||||
|
||||
#include <dispatch/dispatch.h>
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "sanitizer_common/print_address.h"
|
||||
|
||||
bool done = false;
|
||||
|
||||
void worker_do_leak(int size) {
|
||||
void *p = malloc(size);
|
||||
print_address("Test alloc: ", 1, p);
|
||||
done = true;
|
||||
}
|
||||
|
||||
#if DISPATCH_ASYNC
|
||||
// Tests for the Grand Central Dispatch. See
|
||||
// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
|
||||
// for the reference.
|
||||
void TestGCDDispatch() {
|
||||
dispatch_queue_t queue = dispatch_get_global_queue(0, 0);
|
||||
dispatch_block_t block = ^{
|
||||
worker_do_leak(1337);
|
||||
};
|
||||
// dispatch_async() runs the task on a worker thread that does not go through
|
||||
// pthread_create(). We need to verify that LeakSanitizer notices that the
|
||||
// thread has started.
|
||||
dispatch_async(queue, block);
|
||||
while (!done)
|
||||
pthread_yield_np();
|
||||
}
|
||||
#elif DISPATCH_SYNC
|
||||
void TestGCDDispatch() {
|
||||
dispatch_queue_t queue = dispatch_get_global_queue(2, 0);
|
||||
dispatch_block_t block = ^{
|
||||
worker_do_leak(1337);
|
||||
};
|
||||
// dispatch_sync() runs the task on a worker thread that does not go through
|
||||
// pthread_create(). We need to verify that LeakSanitizer notices that the
|
||||
// thread has started.
|
||||
dispatch_sync(queue, block);
|
||||
}
|
||||
#endif
|
||||
|
||||
int main() {
|
||||
TestGCDDispatch();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// CHECK: Test alloc: [[addr:0x[0-9,a-f]+]]
|
||||
// CHECK: LeakSanitizer: detected memory leaks
|
||||
// CHECK: [[addr]] (1337 bytes)
|
||||
// CHECK: SUMMARY: {{(Leak|Address)}}Sanitizer:
|
9
test/lsan/TestCases/Darwin/lit.local.cfg
Normal file
9
test/lsan/TestCases/Darwin/lit.local.cfg
Normal file
@ -0,0 +1,9 @@
|
||||
def getRoot(config):
|
||||
if not config.parent:
|
||||
return config
|
||||
return getRoot(config.parent)
|
||||
|
||||
root = getRoot(config)
|
||||
|
||||
if root.host_os not in ['Darwin']:
|
||||
config.unsupported = True
|
@ -77,4 +77,4 @@ if not (supported_linux or supported_darwin):
|
||||
if re.search('mthumb', config.target_cflags) is not None:
|
||||
config.unsupported = True
|
||||
|
||||
config.suffixes = ['.c', '.cc', '.cpp']
|
||||
config.suffixes = ['.c', '.cc', '.cpp', '.mm']
|
||||
|
59
test/profile/Linux/counter_promo_for.c
Normal file
59
test/profile/Linux/counter_promo_for.c
Normal file
@ -0,0 +1,59 @@
|
||||
// RUN: rm -fr %t.promo.prof
|
||||
// RUN: rm -fr %t.nopromo.prof
|
||||
// RUN: %clang_pgogen=%t.promo.prof/ -o %t.promo.gen -O2 %s
|
||||
// RUN: %clang_pgogen=%t.promo.prof/ -o %t.promo.gen.ll -emit-llvm -S -O2 %s
|
||||
// RUN: cat %t.promo.gen.ll | FileCheck --check-prefix=PROMO %s
|
||||
// RUN: %run %t.promo.gen
|
||||
// RUN: llvm-profdata merge -o %t.promo.profdata %t.promo.prof/
|
||||
// RUN: llvm-profdata show --counts --all-functions %t.promo.profdata > %t.promo.dump
|
||||
// RUN: %clang_pgogen=%t.nopromo.prof/ -mllvm -do-counter-promotion=false -o %t.nopromo.gen -O2 %s
|
||||
// RUN: %clang_pgogen=%t.nopromo.prof/ -mllvm -do-counter-promotion=false -o %t.nopromo.gen.ll -emit-llvm -S -O2 %s
|
||||
// RUN: cat %t.nopromo.gen.ll | FileCheck --check-prefix=NOPROMO %s
|
||||
// RUN: %run %t.nopromo.gen
|
||||
// RUN: llvm-profdata merge -o %t.nopromo.profdata %t.nopromo.prof/
|
||||
// RUN: llvm-profdata show --counts --all-functions %t.nopromo.profdata > %t.nopromo.dump
|
||||
// RUN: diff %t.promo.profdata %t.nopromo.profdata
|
||||
|
||||
int g;
|
||||
__attribute__((noinline)) void bar(int i) { g += i; }
|
||||
|
||||
__attribute__((noinline)) void foo(int n, int N) {
|
||||
// PROMO-LABEL: @foo
|
||||
// PROMO: load{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// PROMO-NEXT: add
|
||||
// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// PROMO-NEXT: add
|
||||
// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
// PROMO-NEXT: add
|
||||
// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
// PROMO: load{{.*}}@__profc_foo{{.*}} 3){{.*}}
|
||||
// PROMO-NEXT: add
|
||||
// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 3){{.*}}
|
||||
//
|
||||
// NOPROMO-LABEL: @foo
|
||||
// NOPROMO: load{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// NOPROMO-NEXT: add
|
||||
// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// NOPROMO: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// NOPROMO-NEXT: add
|
||||
// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// NOPROMO: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
// NOPROMO-NEXT: add
|
||||
// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
int i;
|
||||
for (i = 0; i < N; i++) {
|
||||
if (i < n + 1)
|
||||
bar(1);
|
||||
else if (i < n - 1)
|
||||
bar(2);
|
||||
else
|
||||
bar(3);
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
foo(10, 20);
|
||||
return 0;
|
||||
}
|
55
test/profile/Linux/counter_promo_while.c
Normal file
55
test/profile/Linux/counter_promo_while.c
Normal file
@ -0,0 +1,55 @@
|
||||
// RUN: rm -fr %t.promo.prof
|
||||
// RUN: rm -fr %t.nopromo.prof
|
||||
// RUN: %clang_pgogen=%t.promo.prof/ -o %t.promo.gen -O2 %s
|
||||
// RUN: %clang_pgogen=%t.promo.prof/ -o %t.promo.gen.ll -emit-llvm -S -O2 %s
|
||||
// RUN: cat %t.promo.gen.ll | FileCheck --check-prefix=PROMO %s
|
||||
// RUN: %run %t.promo.gen
|
||||
// RUN: llvm-profdata merge -o %t.promo.profdata %t.promo.prof/
|
||||
// RUN: llvm-profdata show --counts --all-functions %t.promo.profdata > %t.promo.dump
|
||||
// RUN: %clang_pgogen=%t.nopromo.prof/ -mllvm -do-counter-promotion=false -o %t.nopromo.gen -O2 %s
|
||||
// RUN: %clang_pgogen=%t.nopromo.prof/ -mllvm -do-counter-promotion=false -o %t.nopromo.gen.ll -emit-llvm -S -O2 %s
|
||||
// RUN: cat %t.nopromo.gen.ll | FileCheck --check-prefix=NOPROMO %s
|
||||
// RUN: %run %t.nopromo.gen
|
||||
// RUN: llvm-profdata merge -o %t.nopromo.profdata %t.nopromo.prof/
|
||||
// RUN: llvm-profdata show --counts --all-functions %t.nopromo.profdata > %t.nopromo.dump
|
||||
// RUN: diff %t.promo.profdata %t.nopromo.profdata
|
||||
int g;
|
||||
__attribute__((noinline)) void bar(int i) { g += i; }
|
||||
__attribute__((noinline)) void foo(int n, int N) {
|
||||
// PROMO-LABEL: @foo
|
||||
// PROMO: load{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// PROMO-NEXT: add
|
||||
// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// PROMO-NEXT: add
|
||||
// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// PROMO-NEXT: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
// PROMO-NEXT: add
|
||||
// PROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
//
|
||||
// NOPROMO-LABEL: @foo
|
||||
// NOPROMO: load{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// NOPROMO-NEXT: add
|
||||
// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 0){{.*}}
|
||||
// NOPROMO: load{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// NOPROMO-NEXT: add
|
||||
// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 1){{.*}}
|
||||
// NOPROMO: load{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
// NOPROMO-NEXT: add
|
||||
// NOPROMO-NEXT: store{{.*}}@__profc_foo{{.*}} 2){{.*}}
|
||||
int i = 0;
|
||||
while (i < N) {
|
||||
if (i < n + 1)
|
||||
bar(1);
|
||||
else if (i < n - 1)
|
||||
bar(2);
|
||||
else
|
||||
bar(3);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
foo(10, 20);
|
||||
return 0;
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
// Tests -fsanitize-coverage=inline-8bit-counters
|
||||
//
|
||||
// REQUIRES: has_sancovcc,stable-runtime
|
||||
// UNSUPPORTED: i386-darwin, x86_64-darwin, x86_64h-darwin
|
||||
// UNSUPPORTED: i386-darwin
|
||||
//
|
||||
// RUN: %clangxx -O0 %s -fsanitize-coverage=inline-8bit-counters 2>&1
|
||||
|
||||
|
@ -7,7 +7,7 @@
|
||||
// RUN: %run %t 10000 > %T/random_shuffle_tmp_dir/out2
|
||||
// RUN: not diff %T/random_shuffle_tmp_dir/out?
|
||||
// RUN: rm -rf %T/random_shuffle_tmp_dir
|
||||
// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux
|
||||
// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux,mips-linux,mipsel-linux,mips64-linux,mips64el-linux
|
||||
|
||||
// Tests that the allocator shuffles the chunks before returning to the user.
|
||||
|
||||
|
@ -1,15 +1,42 @@
|
||||
// RUN: %clangxx -fsanitize=returns-nonnull-attribute %s -O3 -o %t
|
||||
// RUN: %run %t foo
|
||||
// RUN: %clangxx -fsanitize=returns-nonnull-attribute -w %s -O3 -o %t
|
||||
// RUN: %run %t foo 2>&1 | count 0
|
||||
// RUN: %run %t 2>&1 | FileCheck %s
|
||||
// RUN: %clangxx -fsanitize=returns-nonnull-attribute -fno-sanitize-recover=returns-nonnull-attribute -w %s -O3 -o %t.abort
|
||||
// RUN: not %run %t.abort &> /dev/null
|
||||
|
||||
__attribute__((returns_nonnull)) char *foo(char *a);
|
||||
|
||||
char *foo(char *a) {
|
||||
// CHECK: nonnull.cpp:[[@LINE+2]]:3: runtime error: null pointer returned from function declared to never return null
|
||||
// CHECK-NEXT: nonnull.cpp:[[@LINE-4]]:16: note: returns_nonnull attribute specified here
|
||||
return a;
|
||||
// CHECK: nonnull.cpp:[[@LINE+2]]:1: runtime error: null pointer returned from function declared to never return null
|
||||
// CHECK-NEXT: nonnull.cpp:[[@LINE-5]]:16: note: returns_nonnull attribute specified here
|
||||
}
|
||||
|
||||
__attribute__((returns_nonnull)) char *bar(int x, char *a) {
|
||||
if (x > 10) {
|
||||
// CHECK: nonnull.cpp:[[@LINE+2]]:5: runtime error: null pointer returned from function declared to never return null
|
||||
// CHECK-NEXT: nonnull.cpp:[[@LINE-3]]:16: note: returns_nonnull attribute specified here
|
||||
return a;
|
||||
} else {
|
||||
// CHECK: nonnull.cpp:[[@LINE+2]]:5: runtime error: null pointer returned from function declared to never return null
|
||||
// CHECK-NEXT: nonnull.cpp:[[@LINE-7]]:16: note: returns_nonnull attribute specified here
|
||||
return a;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
return foo(argv[1]) == 0;
|
||||
char *a = argv[1];
|
||||
|
||||
foo(a);
|
||||
|
||||
bar(20, a);
|
||||
|
||||
// We expect to see a runtime error the first time we cover the "else"...
|
||||
bar(5, a);
|
||||
|
||||
// ... but not a second time.
|
||||
// CHECK-NOT: runtime error
|
||||
bar(5, a);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
// RUN: %run %t foo 2>&1 | count 0
|
||||
// RUN: %run %t 2>&1 | FileCheck %s
|
||||
|
||||
// CHECK: nullability.c:[[@LINE+2]]:51: runtime error: null pointer returned from function declared to never return null
|
||||
// CHECK: nullability.c:[[@LINE+2]]:41: runtime error: null pointer returned from function declared to never return null
|
||||
// CHECK-NEXT: nullability.c:[[@LINE+1]]:6: note: _Nonnull return type annotation specified here
|
||||
int *_Nonnull nonnull_retval1(int *p) { return p; }
|
||||
|
||||
|
39
test/xray/TestCases/Linux/arg1-arg0-logging.cc
Normal file
39
test/xray/TestCases/Linux/arg1-arg0-logging.cc
Normal file
@ -0,0 +1,39 @@
|
||||
// Allow having both the no-arg and arg1 logging implementation live together,
|
||||
// and be called in the correct cases.
|
||||
//
|
||||
// RUN: rm arg0-arg1-logging-* || true
|
||||
// RUN: %clangxx_xray -std=c++11 %s -o %t
|
||||
// RUN: XRAY_OPTIONS="patch_premain=true verbosity=1 xray_logfile_base=arg0-arg1-logging-" %run %t
|
||||
//
|
||||
// TODO: Support these in ARM and PPC
|
||||
// XFAIL: arm || aarch64 || mips
|
||||
// UNSUPPORTED: powerpc64le
|
||||
|
||||
#include "xray/xray_interface.h"
|
||||
#include <cassert>
|
||||
#include <cstdio>
|
||||
|
||||
using namespace std;
|
||||
|
||||
bool arg0loggercalled = false;
|
||||
void arg0logger(int32_t, XRayEntryType) { arg0loggercalled = true; }
|
||||
|
||||
[[clang::xray_always_instrument]] void arg0fn() { printf("hello, arg0!\n"); }
|
||||
|
||||
bool arg1loggercalled = false;
|
||||
void arg1logger(int32_t, XRayEntryType, uint64_t) { arg1loggercalled = true; }
|
||||
|
||||
[[ clang::xray_always_instrument, clang::xray_log_args(1) ]] void
|
||||
arg1fn(uint64_t arg1) {
|
||||
printf("hello, arg1!\n");
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
__xray_set_handler(arg0logger);
|
||||
__xray_set_handler_arg1(arg1logger);
|
||||
arg0fn();
|
||||
arg1fn(0xcafef00d);
|
||||
__xray_remove_handler_arg1();
|
||||
__xray_remove_handler();
|
||||
assert(arg0loggercalled && arg1loggercalled);
|
||||
}
|
Loading…
Reference in New Issue
Block a user