Merge compiler-rt trunk r300890, and update build glue.
This commit is contained in:
commit
289fa303d6
@ -158,8 +158,10 @@ extern "C" {
|
||||
// Prints stack traces for all live heap allocations ordered by total
|
||||
// allocation size until `top_percent` of total live heap is shown.
|
||||
// `top_percent` should be between 1 and 100.
|
||||
// At most `max_number_of_contexts` contexts (stack traces) is printed.
|
||||
// Experimental feature currently available only with asan on Linux/x86_64.
|
||||
void __sanitizer_print_memory_profile(size_t top_percent);
|
||||
void __sanitizer_print_memory_profile(size_t top_percent,
|
||||
size_t max_number_of_contexts);
|
||||
|
||||
// Fiber annotation interface.
|
||||
// Before switching to a different stack, one must call
|
||||
|
@ -35,35 +35,6 @@ extern "C" {
|
||||
// Get the number of unique covered blocks (or edges).
|
||||
// This can be useful for coverage-directed in-process fuzzers.
|
||||
uintptr_t __sanitizer_get_total_unique_coverage();
|
||||
// Get the number of unique indirect caller-callee pairs.
|
||||
uintptr_t __sanitizer_get_total_unique_caller_callee_pairs();
|
||||
|
||||
// Reset the basic-block (edge) coverage to the initial state.
|
||||
// Useful for in-process fuzzing to start collecting coverage from scratch.
|
||||
// Experimental, will likely not work for multi-threaded process.
|
||||
void __sanitizer_reset_coverage();
|
||||
// Set *data to the array of covered PCs and return the size of that array.
|
||||
// Some of the entries in *data will be zero.
|
||||
uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
|
||||
|
||||
// The coverage instrumentation may optionally provide imprecise counters.
|
||||
// Rather than exposing the counter values to the user we instead map
|
||||
// the counters to a bitset.
|
||||
// Every counter is associated with 8 bits in the bitset.
|
||||
// We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+
|
||||
// The i-th bit is set to 1 if the counter value is in the i-th range.
|
||||
// This counter-based coverage implementation is *not* thread-safe.
|
||||
|
||||
// Returns the number of registered coverage counters.
|
||||
uintptr_t __sanitizer_get_number_of_counters();
|
||||
// Updates the counter 'bitset', clears the counters and returns the number of
|
||||
// new bits in 'bitset'.
|
||||
// If 'bitset' is nullptr, only clears the counters.
|
||||
// Otherwise 'bitset' should be at least
|
||||
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
|
||||
uintptr_t
|
||||
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
121
contrib/compiler-rt/include/sanitizer/tsan_interface.h
Normal file
121
contrib/compiler-rt/include/sanitizer/tsan_interface.h
Normal file
@ -0,0 +1,121 @@
|
||||
//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer (TSan), a race detector.
|
||||
//
|
||||
// Public interface header for TSan.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_TSAN_INTERFACE_H
|
||||
#define SANITIZER_TSAN_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// __tsan_release establishes a happens-before relation with a preceding
|
||||
// __tsan_acquire on the same address.
|
||||
void __tsan_acquire(void *addr);
|
||||
void __tsan_release(void *addr);
|
||||
|
||||
// Annotations for custom mutexes.
|
||||
// The annotations allow to get better reports (with sets of locked mutexes),
|
||||
// detect more types of bugs (e.g. mutex misuses, races between lock/unlock and
|
||||
// destruction and potential deadlocks) and improve precision and performance
|
||||
// (by ignoring individual atomic operations in mutex code). However, the
|
||||
// downside is that annotated mutex code itself is not checked for correctness.
|
||||
|
||||
// Mutex creation flags are passed to __tsan_mutex_create annotation.
|
||||
// If mutex has no constructor and __tsan_mutex_create is not called,
|
||||
// the flags may be passed to __tsan_mutex_pre_lock/__tsan_mutex_post_lock
|
||||
// annotations.
|
||||
|
||||
// Mutex has static storage duration and no-op constructor and destructor.
|
||||
// This effectively makes tsan ignore destroy annotation.
|
||||
const unsigned __tsan_mutex_linker_init = 1 << 0;
|
||||
// Mutex is write reentrant.
|
||||
const unsigned __tsan_mutex_write_reentrant = 1 << 1;
|
||||
// Mutex is read reentrant.
|
||||
const unsigned __tsan_mutex_read_reentrant = 1 << 2;
|
||||
|
||||
// Mutex operation flags:
|
||||
|
||||
// Denotes read lock operation.
|
||||
const unsigned __tsan_mutex_read_lock = 1 << 3;
|
||||
// Denotes try lock operation.
|
||||
const unsigned __tsan_mutex_try_lock = 1 << 4;
|
||||
// Denotes that a try lock operation has failed to acquire the mutex.
|
||||
const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
|
||||
// Denotes that the lock operation acquires multiple recursion levels.
|
||||
// Number of levels is passed in recursion parameter.
|
||||
// This is useful for annotation of e.g. Java builtin monitors,
|
||||
// for which wait operation releases all recursive acquisitions of the mutex.
|
||||
const unsigned __tsan_mutex_recursive_lock = 1 << 6;
|
||||
// Denotes that the unlock operation releases all recursion levels.
|
||||
// Number of released levels is returned and later must be passed to
|
||||
// the corresponding __tsan_mutex_post_lock annotation.
|
||||
const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
|
||||
|
||||
// Annotate creation of a mutex.
|
||||
// Supported flags: mutex creation flags.
|
||||
void __tsan_mutex_create(void *addr, unsigned flags);
|
||||
|
||||
// Annotate destruction of a mutex.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_destroy(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start of lock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock
|
||||
// - __tsan_mutex_try_lock
|
||||
// - all mutex creation flags
|
||||
void __tsan_mutex_pre_lock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate end of lock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_lock)
|
||||
// - __tsan_mutex_try_lock (must match __tsan_mutex_pre_lock)
|
||||
// - __tsan_mutex_try_lock_failed
|
||||
// - __tsan_mutex_recursive_lock
|
||||
// - all mutex creation flags
|
||||
void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion);
|
||||
|
||||
// Annotate start of unlock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock
|
||||
// - __tsan_mutex_recursive_unlock
|
||||
int __tsan_mutex_pre_unlock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate end of unlock operation.
|
||||
// Supported flags:
|
||||
// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock)
|
||||
void __tsan_mutex_post_unlock(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start/end of notify/signal/broadcast operation.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_pre_signal(void *addr, unsigned flags);
|
||||
void __tsan_mutex_post_signal(void *addr, unsigned flags);
|
||||
|
||||
// Annotate start/end of a region of code where lock/unlock/signal operation
|
||||
// diverts to do something else unrelated to the mutex. This can be used to
|
||||
// annotate, for example, calls into cooperative scheduler or contention
|
||||
// profiling code.
|
||||
// These annotations must be called only from within
|
||||
// __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock,
|
||||
// __tsan_mutex_pre/post_signal regions.
|
||||
// Supported flags: none.
|
||||
void __tsan_mutex_pre_divert(void *addr, unsigned flags);
|
||||
void __tsan_mutex_post_divert(void *addr, unsigned flags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_TSAN_INTERFACE_H
|
@ -18,7 +18,13 @@
|
||||
|
||||
extern "C" {
|
||||
|
||||
enum XRayEntryType { ENTRY = 0, EXIT = 1, TAIL = 2 };
|
||||
// Synchronize this with AsmPrinter::SledKind in LLVM.
|
||||
enum XRayEntryType {
|
||||
ENTRY = 0,
|
||||
EXIT = 1,
|
||||
TAIL = 2,
|
||||
LOG_ARGS_ENTRY = 3,
|
||||
};
|
||||
|
||||
// Provide a function to invoke for when instrumentation points are hit. This is
|
||||
// a user-visible control surface that overrides the default implementation. The
|
||||
@ -60,6 +66,17 @@ extern XRayPatchingStatus __xray_patch();
|
||||
// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible
|
||||
// result values.
|
||||
extern XRayPatchingStatus __xray_unpatch();
|
||||
|
||||
// Use XRay to log the first argument of each (instrumented) function call.
|
||||
// When this function exits, all threads will have observed the effect and
|
||||
// start logging their subsequent affected function calls (if patched).
|
||||
//
|
||||
// Returns 1 on success, 0 on error.
|
||||
extern int __xray_set_handler_arg1(void (*)(int32_t, XRayEntryType, uint64_t));
|
||||
|
||||
// Disables the XRay handler used to log first arguments of function calls.
|
||||
// Returns 1 on success, 0 on error.
|
||||
extern int __xray_remove_handler_arg1();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
60
contrib/compiler-rt/include/xray/xray_log_interface.h
Normal file
60
contrib/compiler-rt/include/xray/xray_log_interface.h
Normal file
@ -0,0 +1,60 @@
|
||||
//===-- xray_log_interface.h ----------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of XRay, a function call tracing system.
|
||||
//
|
||||
// APIs for installing a new logging implementation.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef XRAY_XRAY_LOG_INTERFACE_H
|
||||
#define XRAY_XRAY_LOG_INTERFACE_H
|
||||
|
||||
#include "xray/xray_interface.h"
|
||||
#include <stddef.h>
|
||||
|
||||
extern "C" {
|
||||
|
||||
enum XRayLogInitStatus {
|
||||
XRAY_LOG_UNINITIALIZED = 0,
|
||||
XRAY_LOG_INITIALIZING = 1,
|
||||
XRAY_LOG_INITIALIZED = 2,
|
||||
XRAY_LOG_FINALIZING = 3,
|
||||
XRAY_LOG_FINALIZED = 4,
|
||||
};
|
||||
|
||||
enum XRayLogFlushStatus {
|
||||
XRAY_LOG_NOT_FLUSHING = 0,
|
||||
XRAY_LOG_FLUSHING = 1,
|
||||
XRAY_LOG_FLUSHED = 2,
|
||||
};
|
||||
|
||||
struct XRayLogImpl {
|
||||
XRayLogInitStatus (*log_init)(size_t, size_t, void *, size_t);
|
||||
XRayLogInitStatus (*log_finalize)();
|
||||
void (*handle_arg0)(int32_t, XRayEntryType);
|
||||
XRayLogFlushStatus (*flush_log)();
|
||||
};
|
||||
|
||||
void __xray_set_log_impl(XRayLogImpl Impl);
|
||||
XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers,
|
||||
void *Args, size_t ArgsSize);
|
||||
XRayLogInitStatus __xray_log_finalize();
|
||||
XRayLogFlushStatus __xray_log_flushLog();
|
||||
|
||||
} // extern "C"
|
||||
|
||||
namespace __xray {
|
||||
// Options used by the LLVM XRay FDR implementation.
|
||||
struct FDRLoggingOptions {
|
||||
bool ReportErrors = false;
|
||||
int Fd = -1;
|
||||
};
|
||||
|
||||
} // namespace __xray
|
||||
|
||||
#endif // XRAY_XRAY_LOG_INTERFACE_H
|
@ -21,8 +21,17 @@ namespace __xray {
|
||||
|
||||
enum FileTypes {
|
||||
NAIVE_LOG = 0,
|
||||
FDR_LOG = 1,
|
||||
};
|
||||
|
||||
// FDR mode use of the union field in the XRayFileHeader.
|
||||
struct alignas(16) FdrAdditionalHeaderData {
|
||||
uint64_t ThreadBufferSize;
|
||||
};
|
||||
|
||||
static_assert(sizeof(FdrAdditionalHeaderData) == 16,
|
||||
"FdrAdditionalHeaderData != 16 bytes");
|
||||
|
||||
// This data structure is used to describe the contents of the file. We use this
|
||||
// for versioning the supported XRay file formats.
|
||||
struct alignas(32) XRayFileHeader {
|
||||
@ -40,6 +49,16 @@ struct alignas(32) XRayFileHeader {
|
||||
|
||||
// The frequency by which TSC increases per-second.
|
||||
alignas(8) uint64_t CycleFrequency = 0;
|
||||
|
||||
union {
|
||||
char FreeForm[16];
|
||||
// The current civiltime timestamp, as retrived from 'clock_gettime'. This
|
||||
// allows readers of the file to determine when the file was created or
|
||||
// written down.
|
||||
struct timespec TS;
|
||||
|
||||
struct FdrAdditionalHeaderData FdrData;
|
||||
};
|
||||
} __attribute__((packed));
|
||||
|
||||
static_assert(sizeof(XRayFileHeader) == 32, "XRayFileHeader != 32 bytes");
|
||||
|
@ -1,3 +1,4 @@
|
||||
__asan_*
|
||||
__lsan_*
|
||||
__ubsan_*
|
||||
__sancov_*
|
||||
|
@ -523,6 +523,18 @@ struct Allocator {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
m->free_tid = t ? t->tid() : 0;
|
||||
m->free_context_id = StackDepotPut(*stack);
|
||||
|
||||
Flags &fl = *flags();
|
||||
if (fl.max_free_fill_size > 0) {
|
||||
// We have to skip the chunk header, it contains free_context_id.
|
||||
uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
|
||||
if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
|
||||
uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
|
||||
size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
|
||||
REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
|
||||
}
|
||||
}
|
||||
|
||||
// Poison the region.
|
||||
PoisonShadow(m->Beg(),
|
||||
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
|
||||
@ -554,7 +566,17 @@ struct Allocator {
|
||||
uptr chunk_beg = p - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
|
||||
// On Windows, uninstrumented DLLs may allocate memory before ASan hooks
|
||||
// malloc. Don't report an invalid free in this case.
|
||||
if (SANITIZER_WINDOWS &&
|
||||
!get_allocator().PointerIsMine(ptr)) {
|
||||
if (!IsSystemHeapAddress(p))
|
||||
ReportFreeNotMalloced(p, stack);
|
||||
return;
|
||||
}
|
||||
|
||||
ASAN_FREE_HOOK(ptr);
|
||||
|
||||
// Must mark the chunk as quarantined before any changes to its metadata.
|
||||
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
|
||||
if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
|
||||
@ -790,8 +812,12 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
|
||||
if (!p)
|
||||
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
|
||||
if (size == 0) {
|
||||
instance.Deallocate(p, 0, stack, FROM_MALLOC);
|
||||
return nullptr;
|
||||
if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
|
||||
instance.Deallocate(p, 0, stack, FROM_MALLOC);
|
||||
return nullptr;
|
||||
}
|
||||
// Allocate a size of 1 if we shouldn't free() on Realloc to 0
|
||||
size = 1;
|
||||
}
|
||||
return instance.Reallocate(p, size, stack);
|
||||
}
|
||||
@ -958,15 +984,13 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_malloc_hook(void *ptr, uptr size) {
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
|
||||
void *ptr, uptr size) {
|
||||
(void)ptr;
|
||||
(void)size;
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_free_hook(void *ptr) {
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
|
||||
(void)ptr;
|
||||
}
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -252,6 +252,9 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
|
||||
str.append("%c", var.name_pos[i]);
|
||||
}
|
||||
str.append("'");
|
||||
if (var.line > 0) {
|
||||
str.append(" (line %d)", var.line);
|
||||
}
|
||||
if (pos_descr) {
|
||||
Decorator d;
|
||||
// FIXME: we may want to also print the size of the access here,
|
||||
|
@ -58,10 +58,22 @@ static void MaybeDumpRegisters(void *context) {
|
||||
SignalContext::DumpAllRegisters(context);
|
||||
}
|
||||
|
||||
static void MaybeReportNonExecRegion(uptr pc) {
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
||||
uptr start, end, protection;
|
||||
while (proc_maps.Next(&start, &end, nullptr, nullptr, 0, &protection)) {
|
||||
if (pc >= start && pc < end &&
|
||||
!(protection & MemoryMappingLayout::kProtectionExecute))
|
||||
Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void ErrorDeadlySignal::Print() {
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
const char *description = DescribeSignalOrException(signo);
|
||||
const char *description = __sanitizer::DescribeSignalOrException(signo);
|
||||
Report(
|
||||
"ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
|
||||
"T%d)\n",
|
||||
@ -77,6 +89,7 @@ void ErrorDeadlySignal::Print() {
|
||||
if (addr < GetPageSizeCached())
|
||||
Report("Hint: address points to the zero page.\n");
|
||||
}
|
||||
MaybeReportNonExecRegion(pc);
|
||||
scariness.Print();
|
||||
BufferedStackTrace stack;
|
||||
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
|
||||
|
@ -61,7 +61,7 @@ void InitializeFlags() {
|
||||
{
|
||||
CommonFlags cf;
|
||||
cf.CopyFrom(*common_flags());
|
||||
cf.detect_leaks = CAN_SANITIZE_LEAKS;
|
||||
cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS;
|
||||
cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
|
||||
cf.malloc_context_size = kDefaultMallocContextSize;
|
||||
cf.intercept_tls_get_addr = true;
|
||||
@ -95,6 +95,18 @@ void InitializeFlags() {
|
||||
RegisterCommonFlags(&ubsan_parser);
|
||||
#endif
|
||||
|
||||
if (SANITIZER_MAC) {
|
||||
// Support macOS MallocScribble and MallocPreScribble:
|
||||
// <https://developer.apple.com/library/content/documentation/Performance/
|
||||
// Conceptual/ManagingMemory/Articles/MallocDebug.html>
|
||||
if (GetEnv("MallocScribble")) {
|
||||
f->max_free_fill_size = 0x1000;
|
||||
}
|
||||
if (GetEnv("MallocPreScribble")) {
|
||||
f->malloc_fill_byte = 0xaa;
|
||||
}
|
||||
}
|
||||
|
||||
// Override from ASan compile definition.
|
||||
const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
|
||||
asan_parser.ParseString(asan_compile_def);
|
||||
@ -186,9 +198,6 @@ void InitializeFlags() {
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char* __asan_default_options() { return ""; }
|
||||
} // extern "C"
|
||||
#endif
|
||||
SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) {
|
||||
return "";
|
||||
}
|
||||
|
@ -63,8 +63,14 @@ ASAN_FLAG(
|
||||
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
|
||||
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
|
||||
"bytes that will be filled with malloc_fill_byte on malloc.")
|
||||
ASAN_FLAG(
|
||||
int, max_free_fill_size, 0,
|
||||
"ASan allocator flag. max_free_fill_size is the maximal amount of "
|
||||
"bytes that will be filled with free_fill_byte during free.")
|
||||
ASAN_FLAG(int, malloc_fill_byte, 0xbe,
|
||||
"Value used to fill the newly allocated memory.")
|
||||
ASAN_FLAG(int, free_fill_byte, 0x55,
|
||||
"Value used to fill deallocated memory.")
|
||||
ASAN_FLAG(bool, allow_user_poisoning, true,
|
||||
"If set, user may manually mark memory regions as poisoned or "
|
||||
"unpoisoned.")
|
||||
@ -148,3 +154,10 @@ ASAN_FLAG(bool, halt_on_error, true,
|
||||
"(WARNING: USE AT YOUR OWN RISK!)")
|
||||
ASAN_FLAG(bool, use_odr_indicator, false,
|
||||
"Use special ODR indicator symbol for ODR violation detection")
|
||||
ASAN_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
|
||||
"realloc(p, 0) is equivalent to free(p) by default (Same as the "
|
||||
"POSIX standard). If set to false, realloc(p, 0) will return a "
|
||||
"pointer to an allocated space which can not be used.")
|
||||
ASAN_FLAG(bool, verify_asan_link_order, true,
|
||||
"Check position of ASan runtime in library list (needs to be disabled"
|
||||
" when other library has to be preloaded system-wide)")
|
||||
|
@ -29,7 +29,7 @@ static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
|
||||
__asan_global *end = &__asan_globals_end;
|
||||
uptr bytediff = (uptr)end - (uptr)start;
|
||||
if (bytediff % sizeof(__asan_global) != 0) {
|
||||
#ifdef ASAN_DLL_THUNK
|
||||
#ifdef SANITIZER_DLL_THUNK
|
||||
__debugbreak();
|
||||
#else
|
||||
CHECK("corrupt asan global array");
|
||||
|
@ -1,34 +0,0 @@
|
||||
//===-- asan_globals_win.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Interface to the Windows-specific global management code. Separated into a
|
||||
// standalone header to allow inclusion from asan_win_dynamic_runtime_thunk,
|
||||
// which defines symbols that clash with other sanitizer headers.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_GLOBALS_WIN_H
|
||||
#define ASAN_GLOBALS_WIN_H
|
||||
|
||||
#if !defined(_MSC_VER)
|
||||
#error "this file is Windows-only, and uses MSVC pragmas"
|
||||
#endif
|
||||
|
||||
#if defined(_WIN64)
|
||||
#define SANITIZER_SYM_PREFIX
|
||||
#else
|
||||
#define SANITIZER_SYM_PREFIX "_"
|
||||
#endif
|
||||
|
||||
// Use this macro to force linking asan_globals_win.cc into the DSO.
|
||||
#define ASAN_LINK_GLOBALS_WIN() \
|
||||
__pragma( \
|
||||
comment(linker, "/include:" SANITIZER_SYM_PREFIX "__asan_dso_reg_hook"))
|
||||
|
||||
#endif // ASAN_GLOBALS_WIN_H
|
@ -228,9 +228,11 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
// Strict init-order checking is dlopen-hostile:
|
||||
// https://github.com/google/sanitizers/issues/178
|
||||
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
|
||||
if (flags()->strict_init_order) { \
|
||||
StopInitOrderChecking(); \
|
||||
}
|
||||
do { \
|
||||
if (flags()->strict_init_order) \
|
||||
StopInitOrderChecking(); \
|
||||
CheckNoDeepBind(filename, flag); \
|
||||
} while (false)
|
||||
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
|
||||
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
|
||||
CoverageUpdateMapping()
|
||||
|
167
contrib/compiler-rt/lib/asan/asan_interface.inc
Normal file
167
contrib/compiler-rt/lib/asan/asan_interface.inc
Normal file
@ -0,0 +1,167 @@
|
||||
//===-- asan_interface.inc ------------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Asan interface list.
|
||||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
|
||||
INTERFACE_FUNCTION(__asan_address_is_poisoned)
|
||||
INTERFACE_FUNCTION(__asan_after_dynamic_init)
|
||||
INTERFACE_FUNCTION(__asan_alloca_poison)
|
||||
INTERFACE_FUNCTION(__asan_allocas_unpoison)
|
||||
INTERFACE_FUNCTION(__asan_before_dynamic_init)
|
||||
INTERFACE_FUNCTION(__asan_describe_address)
|
||||
INTERFACE_FUNCTION(__asan_exp_load1)
|
||||
INTERFACE_FUNCTION(__asan_exp_load2)
|
||||
INTERFACE_FUNCTION(__asan_exp_load4)
|
||||
INTERFACE_FUNCTION(__asan_exp_load8)
|
||||
INTERFACE_FUNCTION(__asan_exp_load16)
|
||||
INTERFACE_FUNCTION(__asan_exp_loadN)
|
||||
INTERFACE_FUNCTION(__asan_exp_store1)
|
||||
INTERFACE_FUNCTION(__asan_exp_store2)
|
||||
INTERFACE_FUNCTION(__asan_exp_store4)
|
||||
INTERFACE_FUNCTION(__asan_exp_store8)
|
||||
INTERFACE_FUNCTION(__asan_exp_store16)
|
||||
INTERFACE_FUNCTION(__asan_exp_storeN)
|
||||
INTERFACE_FUNCTION(__asan_get_alloc_stack)
|
||||
INTERFACE_FUNCTION(__asan_get_current_fake_stack)
|
||||
INTERFACE_FUNCTION(__asan_get_free_stack)
|
||||
INTERFACE_FUNCTION(__asan_get_report_access_size)
|
||||
INTERFACE_FUNCTION(__asan_get_report_access_type)
|
||||
INTERFACE_FUNCTION(__asan_get_report_address)
|
||||
INTERFACE_FUNCTION(__asan_get_report_bp)
|
||||
INTERFACE_FUNCTION(__asan_get_report_description)
|
||||
INTERFACE_FUNCTION(__asan_get_report_pc)
|
||||
INTERFACE_FUNCTION(__asan_get_report_sp)
|
||||
INTERFACE_FUNCTION(__asan_get_shadow_mapping)
|
||||
INTERFACE_FUNCTION(__asan_handle_no_return)
|
||||
INTERFACE_FUNCTION(__asan_init)
|
||||
INTERFACE_FUNCTION(__asan_load_cxx_array_cookie)
|
||||
INTERFACE_FUNCTION(__asan_load1)
|
||||
INTERFACE_FUNCTION(__asan_load2)
|
||||
INTERFACE_FUNCTION(__asan_load4)
|
||||
INTERFACE_FUNCTION(__asan_load8)
|
||||
INTERFACE_FUNCTION(__asan_load16)
|
||||
INTERFACE_FUNCTION(__asan_loadN)
|
||||
INTERFACE_FUNCTION(__asan_load1_noabort)
|
||||
INTERFACE_FUNCTION(__asan_load2_noabort)
|
||||
INTERFACE_FUNCTION(__asan_load4_noabort)
|
||||
INTERFACE_FUNCTION(__asan_load8_noabort)
|
||||
INTERFACE_FUNCTION(__asan_load16_noabort)
|
||||
INTERFACE_FUNCTION(__asan_loadN_noabort)
|
||||
INTERFACE_FUNCTION(__asan_locate_address)
|
||||
INTERFACE_FUNCTION(__asan_memcpy)
|
||||
INTERFACE_FUNCTION(__asan_memmove)
|
||||
INTERFACE_FUNCTION(__asan_memset)
|
||||
INTERFACE_FUNCTION(__asan_poison_cxx_array_cookie)
|
||||
INTERFACE_FUNCTION(__asan_poison_intra_object_redzone)
|
||||
INTERFACE_FUNCTION(__asan_poison_memory_region)
|
||||
INTERFACE_FUNCTION(__asan_poison_stack_memory)
|
||||
INTERFACE_FUNCTION(__asan_print_accumulated_stats)
|
||||
INTERFACE_FUNCTION(__asan_region_is_poisoned)
|
||||
INTERFACE_FUNCTION(__asan_register_globals)
|
||||
INTERFACE_FUNCTION(__asan_register_image_globals)
|
||||
INTERFACE_FUNCTION(__asan_report_error)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_load1)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_load2)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_load4)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_load8)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_load16)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_load_n)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_store1)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_store2)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_store4)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_store8)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_store16)
|
||||
INTERFACE_FUNCTION(__asan_report_exp_store_n)
|
||||
INTERFACE_FUNCTION(__asan_report_load1)
|
||||
INTERFACE_FUNCTION(__asan_report_load2)
|
||||
INTERFACE_FUNCTION(__asan_report_load4)
|
||||
INTERFACE_FUNCTION(__asan_report_load8)
|
||||
INTERFACE_FUNCTION(__asan_report_load16)
|
||||
INTERFACE_FUNCTION(__asan_report_load_n)
|
||||
INTERFACE_FUNCTION(__asan_report_load1_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_load2_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_load4_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_load8_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_load16_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_load_n_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_present)
|
||||
INTERFACE_FUNCTION(__asan_report_store1)
|
||||
INTERFACE_FUNCTION(__asan_report_store2)
|
||||
INTERFACE_FUNCTION(__asan_report_store4)
|
||||
INTERFACE_FUNCTION(__asan_report_store8)
|
||||
INTERFACE_FUNCTION(__asan_report_store16)
|
||||
INTERFACE_FUNCTION(__asan_report_store_n)
|
||||
INTERFACE_FUNCTION(__asan_report_store1_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_store2_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_store4_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_store8_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_store16_noabort)
|
||||
INTERFACE_FUNCTION(__asan_report_store_n_noabort)
|
||||
INTERFACE_FUNCTION(__asan_set_death_callback)
|
||||
INTERFACE_FUNCTION(__asan_set_error_report_callback)
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_00)
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f1)
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f2)
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f3)
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f5)
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f8)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_0)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_1)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_2)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_3)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_4)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_5)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_6)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_7)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_8)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_9)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_10)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_0)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_1)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_2)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_3)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_4)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_5)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_6)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_7)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_8)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_9)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_10)
|
||||
INTERFACE_FUNCTION(__asan_store1)
|
||||
INTERFACE_FUNCTION(__asan_store2)
|
||||
INTERFACE_FUNCTION(__asan_store4)
|
||||
INTERFACE_FUNCTION(__asan_store8)
|
||||
INTERFACE_FUNCTION(__asan_store16)
|
||||
INTERFACE_FUNCTION(__asan_storeN)
|
||||
INTERFACE_FUNCTION(__asan_store1_noabort)
|
||||
INTERFACE_FUNCTION(__asan_store2_noabort)
|
||||
INTERFACE_FUNCTION(__asan_store4_noabort)
|
||||
INTERFACE_FUNCTION(__asan_store8_noabort)
|
||||
INTERFACE_FUNCTION(__asan_store16_noabort)
|
||||
INTERFACE_FUNCTION(__asan_storeN_noabort)
|
||||
INTERFACE_FUNCTION(__asan_unpoison_intra_object_redzone)
|
||||
INTERFACE_FUNCTION(__asan_unpoison_memory_region)
|
||||
INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
|
||||
INTERFACE_FUNCTION(__asan_unregister_globals)
|
||||
INTERFACE_FUNCTION(__asan_unregister_image_globals)
|
||||
INTERFACE_FUNCTION(__asan_version_mismatch_check_v8)
|
||||
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
|
||||
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
|
||||
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
|
||||
INTERFACE_WEAK_FUNCTION(__asan_default_options)
|
||||
INTERFACE_WEAK_FUNCTION(__asan_default_suppressions)
|
||||
INTERFACE_WEAK_FUNCTION(__asan_on_error)
|
@ -165,12 +165,12 @@ extern "C" {
|
||||
void __asan_set_error_report_callback(void (*callback)(const char*));
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
/* OPTIONAL */ void __asan_on_error();
|
||||
void __asan_on_error();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
/* OPTIONAL */ const char* __asan_default_options();
|
||||
const char* __asan_default_options();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
extern uptr __asan_shadow_memory_dynamic_address;
|
||||
@ -242,6 +242,9 @@ extern "C" {
|
||||
void __asan_alloca_poison(uptr addr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_allocas_unpoison(uptr top, uptr bottom);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char* __asan_default_suppressions();
|
||||
} // extern "C"
|
||||
|
||||
#endif // ASAN_INTERFACE_INTERNAL_H
|
||||
|
@ -64,9 +64,9 @@ void AsanInitFromRtl();
|
||||
|
||||
// asan_win.cc
|
||||
void InitializePlatformExceptionHandlers();
|
||||
|
||||
// asan_win.cc / asan_posix.cc
|
||||
const char *DescribeSignalOrException(int signo);
|
||||
// Returns whether an address is a valid allocated system heap block.
|
||||
// 'addr' must point to the beginning of the block.
|
||||
bool IsSystemHeapAddress(uptr addr);
|
||||
|
||||
// asan_rtl.cc
|
||||
void NORETURN ShowStatsAndAbort();
|
||||
|
@ -70,6 +70,7 @@ namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
bool IsSystemHeapAddress (uptr addr) { return false; }
|
||||
|
||||
void *AsanDoesNotSupportStaticLinkage() {
|
||||
// This will fail to link with -static.
|
||||
@ -110,7 +111,7 @@ static void ReportIncompatibleRT() {
|
||||
}
|
||||
|
||||
void AsanCheckDynamicRTPrereqs() {
|
||||
if (!ASAN_DYNAMIC)
|
||||
if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order)
|
||||
return;
|
||||
|
||||
// Ensure that dynamic RT is the first DSO in the list
|
||||
|
@ -48,6 +48,7 @@ namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
bool IsSystemHeapAddress (uptr addr) { return false; }
|
||||
|
||||
// No-op. Mac does not support static linkage anyway.
|
||||
void *AsanDoesNotSupportStaticLinkage() {
|
||||
@ -138,7 +139,8 @@ void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
|
||||
t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
|
||||
parent_tid, stack, /* detached */ true);
|
||||
t->Init();
|
||||
asanThreadRegistry().StartThread(t->tid(), 0, 0);
|
||||
asanThreadRegistry().StartThread(t->tid(), GetTid(),
|
||||
/* workerthread */ true, 0);
|
||||
SetCurrentThread(t);
|
||||
}
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ void *realloc(void *ptr, size_t size) {
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_realloc_dbg(void *ptr, size_t size, int) {
|
||||
CHECK(!"_realloc_dbg should not exist!");
|
||||
UNREACHABLE("_realloc_dbg should not exist!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,6 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
|
||||
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
|
||||
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
|
||||
#define SHADOW_TO_MEM(shadow) (((shadow) - SHADOW_OFFSET) << SHADOW_SCALE)
|
||||
|
||||
#define kLowMemBeg 0
|
||||
#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
|
||||
|
@ -48,7 +48,7 @@ class HeapProfile {
|
||||
}
|
||||
}
|
||||
|
||||
void Print(uptr top_percent) {
|
||||
void Print(uptr top_percent, uptr max_number_of_contexts) {
|
||||
InternalSort(&allocations_, allocations_.size(),
|
||||
[](const AllocationSite &a, const AllocationSite &b) {
|
||||
return a.total_size > b.total_size;
|
||||
@ -57,12 +57,14 @@ class HeapProfile {
|
||||
uptr total_shown = 0;
|
||||
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
|
||||
"%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
|
||||
"showing top %zd%%\n",
|
||||
"showing top %zd%% (at most %zd unique contexts)\n",
|
||||
total_allocated_user_size_, total_allocated_count_,
|
||||
total_quarantined_user_size_, total_quarantined_count_,
|
||||
total_other_count_, total_allocated_count_ +
|
||||
total_quarantined_count_ + total_other_count_, top_percent);
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
total_quarantined_count_ + total_other_count_, top_percent,
|
||||
max_number_of_contexts);
|
||||
for (uptr i = 0; i < Min(allocations_.size(), max_number_of_contexts);
|
||||
i++) {
|
||||
auto &a = allocations_[i];
|
||||
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
|
||||
a.total_size * 100 / total_allocated_user_size_, a.count);
|
||||
@ -103,16 +105,23 @@ static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
|
||||
void *argument) {
|
||||
HeapProfile hp;
|
||||
__lsan::ForEachChunk(ChunkCallback, &hp);
|
||||
hp.Print(reinterpret_cast<uptr>(argument));
|
||||
uptr *Arg = reinterpret_cast<uptr*>(argument);
|
||||
hp.Print(Arg[0], Arg[1]);
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_print_memory_profile(uptr top_percent) {
|
||||
__sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
|
||||
void __sanitizer_print_memory_profile(uptr top_percent,
|
||||
uptr max_number_of_contexts) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
uptr Arg[2];
|
||||
Arg[0] = top_percent;
|
||||
Arg[1] = max_number_of_contexts;
|
||||
__sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
@ -33,19 +33,6 @@
|
||||
|
||||
namespace __asan {
|
||||
|
||||
const char *DescribeSignalOrException(int signo) {
|
||||
switch (signo) {
|
||||
case SIGFPE:
|
||||
return "FPE";
|
||||
case SIGILL:
|
||||
return "ILL";
|
||||
case SIGABRT:
|
||||
return "ABRT";
|
||||
default:
|
||||
return "SEGV";
|
||||
}
|
||||
}
|
||||
|
||||
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
ScopedDeadlySignal signal_scope(GetCurrentThread());
|
||||
int code = (int)((siginfo_t*)siginfo)->si_code;
|
||||
|
@ -88,7 +88,8 @@ bool ParseFrameDescription(const char *frame_descr,
|
||||
char *p;
|
||||
// This string is created by the compiler and has the following form:
|
||||
// "n alloc_1 alloc_2 ... alloc_n"
|
||||
// where alloc_i looks like "offset size len ObjectName".
|
||||
// where alloc_i looks like "offset size len ObjectName"
|
||||
// or "offset size len ObjectName:line".
|
||||
uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
|
||||
if (n_objects == 0)
|
||||
return false;
|
||||
@ -101,7 +102,14 @@ bool ParseFrameDescription(const char *frame_descr,
|
||||
return false;
|
||||
}
|
||||
p++;
|
||||
StackVarDescr var = {beg, size, p, len};
|
||||
char *colon_pos = internal_strchr(p, ':');
|
||||
uptr line = 0;
|
||||
uptr name_len = len;
|
||||
if (colon_pos != nullptr && colon_pos < p + len) {
|
||||
name_len = colon_pos - p;
|
||||
line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10);
|
||||
}
|
||||
StackVarDescr var = {beg, size, p, name_len, line};
|
||||
vars->push_back(var);
|
||||
p += len;
|
||||
}
|
||||
@ -488,9 +496,6 @@ void __sanitizer_ptr_cmp(void *a, void *b) {
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// Provide default implementation of __asan_on_error that does nothing
|
||||
// and may be overriden by user.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
|
||||
void __asan_on_error() {}
|
||||
#endif
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
|
||||
|
@ -23,6 +23,7 @@ struct StackVarDescr {
|
||||
uptr size;
|
||||
const char *name_pos;
|
||||
uptr name_len;
|
||||
uptr line;
|
||||
};
|
||||
|
||||
// Returns the number of globals close to the provided address and copies
|
||||
|
@ -31,15 +31,9 @@ static const char *kSuppressionTypes[] = {
|
||||
kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
|
||||
kODRViolation};
|
||||
|
||||
extern "C" {
|
||||
#if SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
const char *__asan_default_suppressions();
|
||||
#else
|
||||
// No week hooks, provide empty implementation.
|
||||
const char *__asan_default_suppressions() { return ""; }
|
||||
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
} // extern "C"
|
||||
SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) {
|
||||
return "";
|
||||
}
|
||||
|
||||
void InitializeSuppressions() {
|
||||
CHECK_EQ(nullptr, suppression_ctx);
|
||||
|
@ -237,9 +237,10 @@ void AsanThread::Init() {
|
||||
}
|
||||
|
||||
thread_return_t AsanThread::ThreadStart(
|
||||
uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
|
||||
tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
|
||||
Init();
|
||||
asanThreadRegistry().StartThread(tid(), os_id, nullptr);
|
||||
asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false,
|
||||
nullptr);
|
||||
if (signal_thread_is_registered)
|
||||
atomic_store(signal_thread_is_registered, 1, memory_order_release);
|
||||
|
||||
@ -299,24 +300,27 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
||||
return true;
|
||||
}
|
||||
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
|
||||
uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
|
||||
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
|
||||
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
|
||||
|
||||
while (shadow_ptr >= shadow_bottom &&
|
||||
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= SHADOW_GRANULARITY;
|
||||
}
|
||||
|
||||
while (shadow_ptr >= shadow_bottom &&
|
||||
*shadow_ptr == kAsanStackLeftRedzoneMagic) {
|
||||
shadow_ptr--;
|
||||
mem_ptr -= SHADOW_GRANULARITY;
|
||||
}
|
||||
|
||||
if (shadow_ptr < shadow_bottom) {
|
||||
return false;
|
||||
}
|
||||
|
||||
uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
|
||||
uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
|
||||
CHECK(ptr[0] == kCurrentStackFrameMagic);
|
||||
access->offset = addr - (uptr)ptr;
|
||||
access->frame_pc = ptr[2];
|
||||
@ -391,7 +395,7 @@ void EnsureMainThreadIDIsCorrect() {
|
||||
context->os_id = GetTid();
|
||||
}
|
||||
|
||||
__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
|
||||
__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
|
||||
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
|
||||
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
|
||||
if (!context) return nullptr;
|
||||
@ -401,7 +405,7 @@ __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
|
||||
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
||||
@ -417,7 +421,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
return true;
|
||||
}
|
||||
|
||||
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
|
||||
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
||||
void *arg) {
|
||||
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
||||
if (t && t->has_fake_stack())
|
||||
|
@ -63,7 +63,7 @@ class AsanThread {
|
||||
void Destroy();
|
||||
|
||||
void Init(); // Should be called from the thread itself.
|
||||
thread_return_t ThreadStart(uptr os_id,
|
||||
thread_return_t ThreadStart(tid_t os_id,
|
||||
atomic_uintptr_t *signal_thread_is_registered);
|
||||
|
||||
uptr stack_top();
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "asan_globals_win.h"
|
||||
#include "asan_interceptors.h"
|
||||
#include "asan_internal.h"
|
||||
#include "asan_report.h"
|
||||
@ -28,6 +27,8 @@
|
||||
#include "asan_mapping.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
#include "sanitizer_common/sanitizer_win.h"
|
||||
#include "sanitizer_common/sanitizer_win_defs.h"
|
||||
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
@ -43,35 +44,50 @@ uptr __asan_get_shadow_memory_dynamic_address() {
|
||||
__asan_init();
|
||||
return __asan_shadow_memory_dynamic_address;
|
||||
}
|
||||
|
||||
// -------------------- A workaround for the absence of weak symbols ----- {{{
|
||||
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
|
||||
// use the /alternatename directive to tell the linker to default a specific
|
||||
// symbol to a specific value, which works nicely for allocator hooks and
|
||||
// __asan_default_options().
|
||||
void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
|
||||
void __sanitizer_default_free_hook(void *ptr) { }
|
||||
const char* __asan_default_default_options() { return ""; }
|
||||
const char* __asan_default_default_suppressions() { return ""; }
|
||||
void __asan_default_on_error() {}
|
||||
// 64-bit msvc will not prepend an underscore for symbols.
|
||||
#ifdef _WIN64
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_malloc_hook=__sanitizer_default_malloc_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_free_hook=__sanitizer_default_free_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_default_options=__asan_default_default_options") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_default_suppressions=__asan_default_default_suppressions") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_on_error=__asan_default_on_error") // NOLINT
|
||||
#else
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
|
||||
#endif
|
||||
// }}}
|
||||
} // extern "C"
|
||||
|
||||
// ---------------------- Windows-specific interceptors ---------------- {{{
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler;
|
||||
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
|
||||
long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
|
||||
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
|
||||
CONTEXT *context = info->ContextRecord;
|
||||
|
||||
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
|
||||
|
||||
SignalContext sig = SignalContext::Create(exception_record, context);
|
||||
ReportDeadlySignal(exception_record->ExceptionCode, sig);
|
||||
UNREACHABLE("returned from reporting deadly signal");
|
||||
}
|
||||
|
||||
// Wrapper SEH Handler. If the exception should be handled by asan, we call
|
||||
// __asan_unhandled_exception_filter, otherwise, we execute the user provided
|
||||
// exception handler or the default.
|
||||
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
|
||||
DWORD exception_code = info->ExceptionRecord->ExceptionCode;
|
||||
if (__sanitizer::IsHandledDeadlyException(exception_code))
|
||||
return __asan_unhandled_exception_filter(info);
|
||||
if (user_seh_handler)
|
||||
return user_seh_handler(info);
|
||||
// Bubble out to the default exception filter.
|
||||
if (default_seh_handler)
|
||||
return default_seh_handler(info);
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter,
|
||||
LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) {
|
||||
CHECK(REAL(SetUnhandledExceptionFilter));
|
||||
if (ExceptionFilter == &SEHHandler || common_flags()->allow_user_segv_handler)
|
||||
return REAL(SetUnhandledExceptionFilter)(ExceptionFilter);
|
||||
// We record the user provided exception handler to be called for all the
|
||||
// exceptions unhandled by asan.
|
||||
Swap(ExceptionFilter, user_seh_handler);
|
||||
return ExceptionFilter;
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
|
||||
CHECK(REAL(RtlRaiseException));
|
||||
// This is a noreturn function, unless it's one of the exceptions raised to
|
||||
@ -144,6 +160,7 @@ namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {
|
||||
ASAN_INTERCEPT_FUNC(CreateThread);
|
||||
ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
|
||||
|
||||
#ifdef _WIN64
|
||||
ASAN_INTERCEPT_FUNC(__C_specific_handler);
|
||||
@ -260,60 +277,8 @@ void InitializePlatformExceptionHandlers() {
|
||||
#endif
|
||||
}
|
||||
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
|
||||
|
||||
// Check based on flags if we should report this exception.
|
||||
static bool ShouldReportDeadlyException(unsigned code) {
|
||||
switch (code) {
|
||||
case EXCEPTION_ACCESS_VIOLATION:
|
||||
case EXCEPTION_IN_PAGE_ERROR:
|
||||
return common_flags()->handle_segv;
|
||||
case EXCEPTION_BREAKPOINT:
|
||||
case EXCEPTION_ILLEGAL_INSTRUCTION: {
|
||||
return common_flags()->handle_sigill;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Return the textual name for this exception.
|
||||
const char *DescribeSignalOrException(int signo) {
|
||||
unsigned code = signo;
|
||||
// Get the string description of the exception if this is a known deadly
|
||||
// exception.
|
||||
switch (code) {
|
||||
case EXCEPTION_ACCESS_VIOLATION:
|
||||
return "access-violation";
|
||||
case EXCEPTION_IN_PAGE_ERROR:
|
||||
return "in-page-error";
|
||||
case EXCEPTION_BREAKPOINT:
|
||||
return "breakpoint";
|
||||
case EXCEPTION_ILLEGAL_INSTRUCTION:
|
||||
return "illegal-instruction";
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
|
||||
long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
|
||||
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
|
||||
CONTEXT *context = info->ContextRecord;
|
||||
|
||||
// Continue the search if the signal wasn't deadly.
|
||||
if (!ShouldReportDeadlyException(exception_record->ExceptionCode))
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
|
||||
|
||||
SignalContext sig = SignalContext::Create(exception_record, context);
|
||||
ReportDeadlySignal(exception_record->ExceptionCode, sig);
|
||||
UNREACHABLE("returned from reporting deadly signal");
|
||||
}
|
||||
|
||||
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
|
||||
__asan_unhandled_exception_filter(info);
|
||||
|
||||
// Bubble out to the default exception filter.
|
||||
return default_seh_handler(info);
|
||||
bool IsSystemHeapAddress(uptr addr) {
|
||||
return ::HeapValidate(GetProcessHeap(), 0, (void*)addr) != FALSE;
|
||||
}
|
||||
|
||||
// We want to install our own exception handler (EH) to print helpful reports
|
||||
@ -368,7 +333,7 @@ __declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
|
||||
unsigned long, void *) = asan_thread_init;
|
||||
#endif
|
||||
|
||||
ASAN_LINK_GLOBALS_WIN()
|
||||
WIN_FORCE_LINK(__asan_dso_reg_hook)
|
||||
|
||||
// }}}
|
||||
} // namespace __asan
|
||||
|
@ -15,388 +15,41 @@
|
||||
// See https://github.com/google/sanitizers/issues/209 for the details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Only compile this code when building asan_dll_thunk.lib
|
||||
// Using #ifdef rather than relying on Makefiles etc.
|
||||
// simplifies the build procedure.
|
||||
#ifdef ASAN_DLL_THUNK
|
||||
#ifdef SANITIZER_DLL_THUNK
|
||||
#include "asan_init_version.h"
|
||||
#include "asan_globals_win.h"
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_win_defs.h"
|
||||
#include "sanitizer_common/sanitizer_win_dll_thunk.h"
|
||||
#include "sanitizer_common/sanitizer_platform_interceptors.h"
|
||||
|
||||
#ifdef _M_IX86
|
||||
#define WINAPI __stdcall
|
||||
#else
|
||||
#define WINAPI
|
||||
#endif
|
||||
// ASan own interface functions.
|
||||
#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
|
||||
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
|
||||
#include "asan_interface.inc"
|
||||
|
||||
// ---------- Function interception helper functions and macros ----------- {{{1
|
||||
extern "C" {
|
||||
void *WINAPI GetModuleHandleA(const char *module_name);
|
||||
void *WINAPI GetProcAddress(void *module, const char *proc_name);
|
||||
void abort();
|
||||
}
|
||||
// Memory allocation functions.
|
||||
INTERCEPT_WRAP_V_W(free)
|
||||
INTERCEPT_WRAP_V_W(_free_base)
|
||||
INTERCEPT_WRAP_V_WW(_free_dbg)
|
||||
|
||||
using namespace __sanitizer;
|
||||
INTERCEPT_WRAP_W_W(malloc)
|
||||
INTERCEPT_WRAP_W_W(_malloc_base)
|
||||
INTERCEPT_WRAP_W_WWWW(_malloc_dbg)
|
||||
|
||||
static uptr getRealProcAddressOrDie(const char *name) {
|
||||
uptr ret =
|
||||
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
|
||||
if (!ret)
|
||||
abort();
|
||||
return ret;
|
||||
}
|
||||
INTERCEPT_WRAP_W_WW(calloc)
|
||||
INTERCEPT_WRAP_W_WW(_calloc_base)
|
||||
INTERCEPT_WRAP_W_WWWWW(_calloc_dbg)
|
||||
INTERCEPT_WRAP_W_WWW(_calloc_impl)
|
||||
|
||||
// We need to intercept some functions (e.g. ASan interface, memory allocator --
|
||||
// let's call them "hooks") exported by the DLL thunk and forward the hooks to
|
||||
// the runtime in the main module.
|
||||
// However, we don't want to keep two lists of these hooks.
|
||||
// To avoid that, the list of hooks should be defined using the
|
||||
// INTERCEPT_WHEN_POSSIBLE macro. Then, all these hooks can be intercepted
|
||||
// at once by calling INTERCEPT_HOOKS().
|
||||
INTERCEPT_WRAP_W_WW(realloc)
|
||||
INTERCEPT_WRAP_W_WW(_realloc_base)
|
||||
INTERCEPT_WRAP_W_WWW(_realloc_dbg)
|
||||
INTERCEPT_WRAP_W_WWW(_recalloc)
|
||||
INTERCEPT_WRAP_W_WWW(_recalloc_base)
|
||||
|
||||
// Use macro+template magic to automatically generate the list of hooks.
|
||||
// Each hook at line LINE defines a template class with a static
|
||||
// FunctionInterceptor<LINE>::Execute() method intercepting the hook.
|
||||
// The default implementation of FunctionInterceptor<LINE> is to call
|
||||
// the Execute() method corresponding to the previous line.
|
||||
template<int LINE>
|
||||
struct FunctionInterceptor {
|
||||
static void Execute() { FunctionInterceptor<LINE-1>::Execute(); }
|
||||
};
|
||||
|
||||
// There shouldn't be any hooks with negative definition line number.
|
||||
template<>
|
||||
struct FunctionInterceptor<0> {
|
||||
static void Execute() {}
|
||||
};
|
||||
|
||||
#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
|
||||
template <> struct FunctionInterceptor<__LINE__> { \
|
||||
static void Execute() { \
|
||||
uptr wrapper = getRealProcAddressOrDie(main_function); \
|
||||
if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \
|
||||
abort(); \
|
||||
FunctionInterceptor<__LINE__ - 1>::Execute(); \
|
||||
} \
|
||||
};
|
||||
|
||||
// Special case of hooks -- ASan own interface functions. Those are only called
|
||||
// after __asan_init, thus an empty implementation is sufficient.
|
||||
#define INTERFACE_FUNCTION(name) \
|
||||
extern "C" __declspec(noinline) void name() { \
|
||||
volatile int prevent_icf = (__LINE__ << 8); (void)prevent_icf; \
|
||||
__debugbreak(); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name)
|
||||
|
||||
// INTERCEPT_HOOKS must be used after the last INTERCEPT_WHEN_POSSIBLE.
|
||||
#define INTERCEPT_HOOKS FunctionInterceptor<__LINE__>::Execute
|
||||
|
||||
// We can't define our own version of strlen etc. because that would lead to
|
||||
// link-time or even type mismatch errors. Instead, we can declare a function
|
||||
// just to be able to get its address. Me may miss the first few calls to the
|
||||
// functions since it can be called before __asan_init, but that would lead to
|
||||
// false negatives in the startup code before user's global initializers, which
|
||||
// isn't a big deal.
|
||||
#define INTERCEPT_LIBRARY_FUNCTION(name) \
|
||||
extern "C" void name(); \
|
||||
INTERCEPT_WHEN_POSSIBLE(WRAPPER_NAME(name), name)
|
||||
|
||||
// Disable compiler warnings that show up if we declare our own version
|
||||
// of a compiler intrinsic (e.g. strlen).
|
||||
#pragma warning(disable: 4391)
|
||||
#pragma warning(disable: 4392)
|
||||
|
||||
static void InterceptHooks();
|
||||
// }}}
|
||||
|
||||
// ---------- Function wrapping helpers ----------------------------------- {{{1
|
||||
#define WRAP_V_V(name) \
|
||||
extern "C" void name() { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
fn(); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_V_W(name) \
|
||||
extern "C" void name(void *arg) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
fn(arg); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_V_WW(name) \
|
||||
extern "C" void name(void *arg1, void *arg2) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
fn(arg1, arg2); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_V_WWW(name) \
|
||||
extern "C" void name(void *arg1, void *arg2, void *arg3) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
fn(arg1, arg2, arg3); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_W_V(name) \
|
||||
extern "C" void *name() { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
return fn(); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_W_W(name) \
|
||||
extern "C" void *name(void *arg) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
return fn(arg); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_W_WW(name) \
|
||||
extern "C" void *name(void *arg1, void *arg2) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
return fn(arg1, arg2); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_W_WWW(name) \
|
||||
extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
return fn(arg1, arg2, arg3); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_W_WWWW(name) \
|
||||
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
return fn(arg1, arg2, arg3, arg4); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_W_WWWWW(name) \
|
||||
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
|
||||
void *arg5) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
return fn(arg1, arg2, arg3, arg4, arg5); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
|
||||
#define WRAP_W_WWWWWW(name) \
|
||||
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
|
||||
void *arg5, void *arg6) { \
|
||||
typedef decltype(name) *fntype; \
|
||||
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
|
||||
return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
|
||||
} \
|
||||
INTERCEPT_WHEN_POSSIBLE(#name, name);
|
||||
// }}}
|
||||
|
||||
// ----------------- ASan own interface functions --------------------
|
||||
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
|
||||
// want to call it in the __asan_init interceptor.
|
||||
WRAP_W_V(__asan_should_detect_stack_use_after_return)
|
||||
WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
|
||||
|
||||
extern "C" {
|
||||
int __asan_option_detect_stack_use_after_return;
|
||||
uptr __asan_shadow_memory_dynamic_address;
|
||||
|
||||
// Manually wrap __asan_init as we need to initialize
|
||||
// __asan_option_detect_stack_use_after_return afterwards.
|
||||
void __asan_init() {
|
||||
typedef void (*fntype)();
|
||||
static fntype fn = 0;
|
||||
// __asan_init is expected to be called by only one thread.
|
||||
if (fn) return;
|
||||
|
||||
fn = (fntype)getRealProcAddressOrDie("__asan_init");
|
||||
fn();
|
||||
__asan_option_detect_stack_use_after_return =
|
||||
(__asan_should_detect_stack_use_after_return() != 0);
|
||||
__asan_shadow_memory_dynamic_address =
|
||||
(uptr)__asan_get_shadow_memory_dynamic_address();
|
||||
InterceptHooks();
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" void __asan_version_mismatch_check() {
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
INTERFACE_FUNCTION(__asan_handle_no_return)
|
||||
INTERFACE_FUNCTION(__asan_unhandled_exception_filter)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_report_store1)
|
||||
INTERFACE_FUNCTION(__asan_report_store2)
|
||||
INTERFACE_FUNCTION(__asan_report_store4)
|
||||
INTERFACE_FUNCTION(__asan_report_store8)
|
||||
INTERFACE_FUNCTION(__asan_report_store16)
|
||||
INTERFACE_FUNCTION(__asan_report_store_n)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_report_load1)
|
||||
INTERFACE_FUNCTION(__asan_report_load2)
|
||||
INTERFACE_FUNCTION(__asan_report_load4)
|
||||
INTERFACE_FUNCTION(__asan_report_load8)
|
||||
INTERFACE_FUNCTION(__asan_report_load16)
|
||||
INTERFACE_FUNCTION(__asan_report_load_n)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_store1)
|
||||
INTERFACE_FUNCTION(__asan_store2)
|
||||
INTERFACE_FUNCTION(__asan_store4)
|
||||
INTERFACE_FUNCTION(__asan_store8)
|
||||
INTERFACE_FUNCTION(__asan_store16)
|
||||
INTERFACE_FUNCTION(__asan_storeN)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_load1)
|
||||
INTERFACE_FUNCTION(__asan_load2)
|
||||
INTERFACE_FUNCTION(__asan_load4)
|
||||
INTERFACE_FUNCTION(__asan_load8)
|
||||
INTERFACE_FUNCTION(__asan_load16)
|
||||
INTERFACE_FUNCTION(__asan_loadN)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_memcpy);
|
||||
INTERFACE_FUNCTION(__asan_memset);
|
||||
INTERFACE_FUNCTION(__asan_memmove);
|
||||
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_00);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f1);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f2);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f3);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f5);
|
||||
INTERFACE_FUNCTION(__asan_set_shadow_f8);
|
||||
|
||||
INTERFACE_FUNCTION(__asan_alloca_poison);
|
||||
INTERFACE_FUNCTION(__asan_allocas_unpoison);
|
||||
|
||||
INTERFACE_FUNCTION(__asan_register_globals)
|
||||
INTERFACE_FUNCTION(__asan_unregister_globals)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_before_dynamic_init)
|
||||
INTERFACE_FUNCTION(__asan_after_dynamic_init)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_poison_stack_memory)
|
||||
INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_poison_memory_region)
|
||||
INTERFACE_FUNCTION(__asan_unpoison_memory_region)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_address_is_poisoned)
|
||||
INTERFACE_FUNCTION(__asan_region_is_poisoned)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_get_current_fake_stack)
|
||||
INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_0)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_1)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_2)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_3)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_4)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_5)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_6)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_7)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_8)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_9)
|
||||
INTERFACE_FUNCTION(__asan_stack_malloc_10)
|
||||
|
||||
INTERFACE_FUNCTION(__asan_stack_free_0)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_1)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_2)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_4)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_5)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_6)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_7)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_8)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_9)
|
||||
INTERFACE_FUNCTION(__asan_stack_free_10)
|
||||
|
||||
// FIXME: we might want to have a sanitizer_win_dll_thunk?
|
||||
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_dump)
|
||||
INTERFACE_FUNCTION(__sanitizer_dump_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_heap_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_ownership)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_total_unique_caller_callee_pairs)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
|
||||
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
|
||||
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
|
||||
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
|
||||
INTERFACE_FUNCTION(__sanitizer_reset_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_number_of_counters)
|
||||
INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
|
||||
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
|
||||
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
|
||||
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
|
||||
|
||||
// TODO(timurrrr): Add more interface functions on the as-needed basis.
|
||||
|
||||
// ----------------- Memory allocation functions ---------------------
|
||||
WRAP_V_W(free)
|
||||
WRAP_V_W(_free_base)
|
||||
WRAP_V_WW(_free_dbg)
|
||||
|
||||
WRAP_W_W(malloc)
|
||||
WRAP_W_W(_malloc_base)
|
||||
WRAP_W_WWWW(_malloc_dbg)
|
||||
|
||||
WRAP_W_WW(calloc)
|
||||
WRAP_W_WW(_calloc_base)
|
||||
WRAP_W_WWWWW(_calloc_dbg)
|
||||
WRAP_W_WWW(_calloc_impl)
|
||||
|
||||
WRAP_W_WW(realloc)
|
||||
WRAP_W_WW(_realloc_base)
|
||||
WRAP_W_WWW(_realloc_dbg)
|
||||
WRAP_W_WWW(_recalloc)
|
||||
WRAP_W_WWW(_recalloc_base)
|
||||
|
||||
WRAP_W_W(_msize)
|
||||
WRAP_W_W(_expand)
|
||||
WRAP_W_W(_expand_dbg)
|
||||
INTERCEPT_WRAP_W_W(_msize)
|
||||
INTERCEPT_WRAP_W_W(_expand)
|
||||
INTERCEPT_WRAP_W_W(_expand_dbg)
|
||||
|
||||
// TODO(timurrrr): Might want to add support for _aligned_* allocation
|
||||
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
|
||||
@ -405,20 +58,6 @@ WRAP_W_W(_expand_dbg)
|
||||
|
||||
INTERCEPT_LIBRARY_FUNCTION(atoi);
|
||||
INTERCEPT_LIBRARY_FUNCTION(atol);
|
||||
|
||||
#ifdef _WIN64
|
||||
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
|
||||
#else
|
||||
INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
|
||||
|
||||
// _except_handler4 checks -GS cookie which is different for each module, so we
|
||||
// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
|
||||
INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
#endif
|
||||
|
||||
INTERCEPT_LIBRARY_FUNCTION(frexp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(longjmp);
|
||||
#if SANITIZER_INTERCEPT_MEMCHR
|
||||
@ -443,41 +82,70 @@ INTERCEPT_LIBRARY_FUNCTION(strpbrk);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strrchr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strspn);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strstr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtok);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtol);
|
||||
INTERCEPT_LIBRARY_FUNCTION(wcslen);
|
||||
|
||||
// Must be after all the interceptor declarations due to the way INTERCEPT_HOOKS
|
||||
// is defined.
|
||||
void InterceptHooks() {
|
||||
INTERCEPT_HOOKS();
|
||||
#ifdef _WIN64
|
||||
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
|
||||
#else
|
||||
INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
|
||||
// _except_handler4 checks -GS cookie which is different for each module, so we
|
||||
// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
|
||||
INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Window specific functions not included in asan_interface.inc.
|
||||
INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
|
||||
INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
|
||||
INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
extern "C" {
|
||||
int __asan_option_detect_stack_use_after_return;
|
||||
uptr __asan_shadow_memory_dynamic_address;
|
||||
} // extern "C"
|
||||
|
||||
static int asan_dll_thunk_init() {
|
||||
typedef void (*fntype)();
|
||||
static fntype fn = 0;
|
||||
// asan_dll_thunk_init is expected to be called by only one thread.
|
||||
if (fn) return 0;
|
||||
|
||||
// Ensure all interception was executed.
|
||||
__dll_thunk_init();
|
||||
|
||||
fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init");
|
||||
fn();
|
||||
__asan_option_detect_stack_use_after_return =
|
||||
(__asan_should_detect_stack_use_after_return() != 0);
|
||||
__asan_shadow_memory_dynamic_address =
|
||||
(uptr)__asan_get_shadow_memory_dynamic_address();
|
||||
|
||||
#ifndef _WIN64
|
||||
INTERCEPT_FUNCTION(_except_handler4);
|
||||
#endif
|
||||
}
|
||||
|
||||
// We want to call __asan_init before C/C++ initializers/constructors are
|
||||
// executed, otherwise functions like memset might be invoked.
|
||||
// For some strange reason, merely linking in asan_preinit.cc doesn't work
|
||||
// as the callback is never called... Is link.exe doing something too smart?
|
||||
|
||||
// In DLLs, the callbacks are expected to return 0,
|
||||
// otherwise CRT initialization fails.
|
||||
static int call_asan_init() {
|
||||
__asan_init();
|
||||
// In DLLs, the callbacks are expected to return 0,
|
||||
// otherwise CRT initialization fails.
|
||||
return 0;
|
||||
}
|
||||
|
||||
#pragma section(".CRT$XIB", long, read) // NOLINT
|
||||
__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
|
||||
__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init;
|
||||
|
||||
static void WINAPI asan_thread_init(void *mod, unsigned long reason,
|
||||
void *reserved) {
|
||||
if (reason == /*DLL_PROCESS_ATTACH=*/1) __asan_init();
|
||||
void *reserved) {
|
||||
if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init();
|
||||
}
|
||||
|
||||
#pragma section(".CRT$XLAB", long, read) // NOLINT
|
||||
__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
|
||||
unsigned long, void *) = asan_thread_init;
|
||||
|
||||
ASAN_LINK_GLOBALS_WIN()
|
||||
WIN_FORCE_LINK(__asan_dso_reg_hook)
|
||||
|
||||
#endif // ASAN_DLL_THUNK
|
||||
#endif // SANITIZER_DLL_THUNK
|
||||
|
@ -14,20 +14,24 @@
|
||||
// using the default "import library" generated when linking the DLL RTL.
|
||||
//
|
||||
// This includes:
|
||||
// - creating weak aliases to default implementation imported from asan dll.
|
||||
// - forwarding the detect_stack_use_after_return runtime option
|
||||
// - working around deficiencies of the MD runtime
|
||||
// - installing a custom SEH handler
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Only compile this code when building asan_dynamic_runtime_thunk.lib
|
||||
// Using #ifdef rather than relying on Makefiles etc.
|
||||
// simplifies the build procedure.
|
||||
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
|
||||
#include "asan_globals_win.h"
|
||||
#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
|
||||
#define SANITIZER_IMPORT_INTERFACE 1
|
||||
#include "sanitizer_common/sanitizer_win_defs.h"
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
|
||||
// Define weak alias for all weak functions imported from asan dll.
|
||||
#define INTERFACE_FUNCTION(Name)
|
||||
#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
|
||||
#include "asan_interface.inc"
|
||||
|
||||
// First, declare CRT sections we'll be using in this file
|
||||
#pragma section(".CRT$XIB", long, read) // NOLINT
|
||||
#pragma section(".CRT$XID", long, read) // NOLINT
|
||||
@ -122,6 +126,6 @@ __declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
|
||||
SetSEHFilter;
|
||||
}
|
||||
|
||||
ASAN_LINK_GLOBALS_WIN()
|
||||
WIN_FORCE_LINK(__asan_dso_reg_hook)
|
||||
|
||||
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
|
||||
#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
|
||||
|
23
contrib/compiler-rt/lib/asan/asan_win_weak_interception.cc
Normal file
23
contrib/compiler-rt/lib/asan/asan_win_weak_interception.cc
Normal file
@ -0,0 +1,23 @@
|
||||
//===-- asan_win_weak_interception.cc -------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// This module should be included in Address Sanitizer when it is implemented as
|
||||
// a shared library on Windows (dll), in order to delegate the calls of weak
|
||||
// functions to the implementation in the main executable when a strong
|
||||
// definition is provided.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifdef SANITIZER_DYNAMIC
|
||||
#include "sanitizer_common/sanitizer_win_weak_interception.h"
|
||||
#include "asan_interface_internal.h"
|
||||
// Check if strong definitions for weak functions are present in the main
|
||||
// executable. If that is the case, override dll functions to point to strong
|
||||
// implementations.
|
||||
#define INTERFACE_FUNCTION(Name)
|
||||
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
|
||||
#include "asan_interface.inc"
|
||||
#endif // SANITIZER_DYNAMIC
|
277
contrib/compiler-rt/lib/builtins/arm/addsf3.S
Normal file
277
contrib/compiler-rt/lib/builtins/arm/addsf3.S
Normal file
@ -0,0 +1,277 @@
|
||||
/*===-- addsf3.S - Adds two single precision floating pointer numbers-----===//
|
||||
*
|
||||
* The LLVM Compiler Infrastructure
|
||||
*
|
||||
* This file is dual licensed under the MIT and the University of Illinois Open
|
||||
* Source Licenses. See LICENSE.TXT for details.
|
||||
*
|
||||
*===----------------------------------------------------------------------===//
|
||||
*
|
||||
* This file implements the __addsf3 (single precision floating pointer number
|
||||
* addition with the IEEE-754 default rounding (to nearest, ties to even)
|
||||
* function for the ARM Thumb1 ISA.
|
||||
*
|
||||
*===----------------------------------------------------------------------===*/
|
||||
|
||||
#include "../assembly.h"
|
||||
#define significandBits 23
|
||||
#define typeWidth 32
|
||||
|
||||
.syntax unified
|
||||
.text
|
||||
.thumb
|
||||
.p2align 2
|
||||
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fadd, __addsf3)
|
||||
|
||||
DEFINE_COMPILERRT_THUMB_FUNCTION(__addsf3)
|
||||
push {r4, r5, r6, r7, lr}
|
||||
// Get the absolute value of a and b.
|
||||
lsls r2, r0, #1
|
||||
lsls r3, r1, #1
|
||||
lsrs r2, r2, #1 /* aAbs */
|
||||
beq LOCAL_LABEL(a_zero_nan_inf)
|
||||
lsrs r3, r3, #1 /* bAbs */
|
||||
beq LOCAL_LABEL(zero_nan_inf)
|
||||
|
||||
// Detect if a or b is infinity or Nan.
|
||||
lsrs r6, r2, #(significandBits)
|
||||
lsrs r7, r3, #(significandBits)
|
||||
cmp r6, #0xFF
|
||||
beq LOCAL_LABEL(zero_nan_inf)
|
||||
cmp r7, #0xFF
|
||||
beq LOCAL_LABEL(zero_nan_inf)
|
||||
|
||||
// Swap Rep and Abs so that a and aAbs has the larger absolute value.
|
||||
cmp r2, r3
|
||||
bhs LOCAL_LABEL(no_swap)
|
||||
movs r4, r0
|
||||
movs r5, r2
|
||||
movs r0, r1
|
||||
movs r2, r3
|
||||
movs r1, r4
|
||||
movs r3, r5
|
||||
LOCAL_LABEL(no_swap):
|
||||
|
||||
// Get the significands and shift them to give us round, guard and sticky.
|
||||
lsls r4, r0, #(typeWidth - significandBits)
|
||||
lsrs r4, r4, #(typeWidth - significandBits - 3) /* aSignificand << 3 */
|
||||
lsls r5, r1, #(typeWidth - significandBits)
|
||||
lsrs r5, r5, #(typeWidth - significandBits - 3) /* bSignificand << 3 */
|
||||
|
||||
// Get the implicitBit.
|
||||
movs r6, #1
|
||||
lsls r6, r6, #(significandBits + 3)
|
||||
|
||||
// Get aExponent and set implicit bit if necessary.
|
||||
lsrs r2, r2, #(significandBits)
|
||||
beq LOCAL_LABEL(a_done_implicit_bit)
|
||||
orrs r4, r6
|
||||
LOCAL_LABEL(a_done_implicit_bit):
|
||||
|
||||
// Get bExponent and set implicit bit if necessary.
|
||||
lsrs r3, r3, #(significandBits)
|
||||
beq LOCAL_LABEL(b_done_implicit_bit)
|
||||
orrs r5, r6
|
||||
LOCAL_LABEL(b_done_implicit_bit):
|
||||
|
||||
// Get the difference in exponents.
|
||||
subs r6, r2, r3
|
||||
beq LOCAL_LABEL(done_align)
|
||||
|
||||
// If b is denormal, then a must be normal as align > 0, and we only need to
|
||||
// right shift bSignificand by (align - 1) bits.
|
||||
cmp r3, #0
|
||||
bne 1f
|
||||
subs r6, r6, #1
|
||||
1:
|
||||
|
||||
// No longer needs bExponent. r3 is dead here.
|
||||
// Set sticky bits of b: sticky = bSignificand << (typeWidth - align).
|
||||
movs r3, #(typeWidth)
|
||||
subs r3, r3, r6
|
||||
movs r7, r5
|
||||
lsls r7, r3
|
||||
beq 1f
|
||||
movs r7, #1
|
||||
1:
|
||||
|
||||
// bSignificand = bSignificand >> align | sticky;
|
||||
lsrs r5, r6
|
||||
orrs r5, r7
|
||||
bne LOCAL_LABEL(done_align)
|
||||
movs r5, #1 // sticky; b is known to be non-zero.
|
||||
|
||||
LOCAL_LABEL(done_align):
|
||||
// isSubtraction = (aRep ^ bRep) >> 31;
|
||||
movs r7, r0
|
||||
eors r7, r1
|
||||
lsrs r7, #31
|
||||
bne LOCAL_LABEL(do_substraction)
|
||||
|
||||
// Same sign, do Addition.
|
||||
|
||||
// aSignificand += bSignificand;
|
||||
adds r4, r4, r5
|
||||
|
||||
// Check carry bit.
|
||||
movs r6, #1
|
||||
lsls r6, r6, #(significandBits + 3 + 1)
|
||||
movs r7, r4
|
||||
ands r7, r6
|
||||
beq LOCAL_LABEL(form_result)
|
||||
// If the addition carried up, we need to right-shift the result and
|
||||
// adjust the exponent.
|
||||
movs r7, r4
|
||||
movs r6, #1
|
||||
ands r7, r6 // sticky = aSignificand & 1;
|
||||
lsrs r4, #1
|
||||
orrs r4, r7 // result Significand
|
||||
adds r2, #1 // result Exponent
|
||||
// If we have overflowed the type, return +/- infinity.
|
||||
cmp r2, 0xFF
|
||||
beq LOCAL_LABEL(ret_inf)
|
||||
|
||||
LOCAL_LABEL(form_result):
|
||||
// Shift the sign, exponent and significand into place.
|
||||
lsrs r0, #(typeWidth - 1)
|
||||
lsls r0, #(typeWidth - 1) // Get Sign.
|
||||
lsls r2, #(significandBits)
|
||||
orrs r0, r2
|
||||
movs r1, r4
|
||||
lsls r4, #(typeWidth - significandBits - 3)
|
||||
lsrs r4, #(typeWidth - significandBits)
|
||||
orrs r0, r4
|
||||
|
||||
// Final rounding. The result may overflow to infinity, but that is the
|
||||
// correct result in that case.
|
||||
// roundGuardSticky = aSignificand & 0x7;
|
||||
movs r2, #0x7
|
||||
ands r1, r2
|
||||
// if (roundGuardSticky > 0x4) result++;
|
||||
|
||||
cmp r1, #0x4
|
||||
blt LOCAL_LABEL(done_round)
|
||||
beq 1f
|
||||
adds r0, #1
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
1:
|
||||
|
||||
// if (roundGuardSticky == 0x4) result += result & 1;
|
||||
movs r1, r0
|
||||
lsrs r1, #1
|
||||
bcc LOCAL_LABEL(done_round)
|
||||
adds r0, r0, #1
|
||||
LOCAL_LABEL(done_round):
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
LOCAL_LABEL(do_substraction):
|
||||
subs r4, r4, r5 // aSignificand -= bSignificand;
|
||||
beq LOCAL_LABEL(ret_zero)
|
||||
movs r6, r4
|
||||
cmp r2, 0
|
||||
beq LOCAL_LABEL(form_result) // if a's exp is 0, no need to normalize.
|
||||
// If partial cancellation occured, we need to left-shift the result
|
||||
// and adjust the exponent:
|
||||
lsrs r6, r6, #(significandBits + 3)
|
||||
bne LOCAL_LABEL(form_result)
|
||||
|
||||
push {r0, r1, r2, r3}
|
||||
movs r0, r4
|
||||
bl __clzsi2
|
||||
movs r5, r0
|
||||
pop {r0, r1, r2, r3}
|
||||
// shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3);
|
||||
subs r5, r5, #(typeWidth - significandBits - 3 - 1)
|
||||
// aSignificand <<= shift; aExponent -= shift;
|
||||
lsls r4, r5
|
||||
subs r2, r2, r5
|
||||
bgt LOCAL_LABEL(form_result)
|
||||
|
||||
// Do normalization if aExponent <= 0.
|
||||
movs r6, #1
|
||||
subs r6, r6, r2 // 1 - aExponent;
|
||||
movs r2, #0 // aExponent = 0;
|
||||
movs r3, #(typeWidth) // bExponent is dead.
|
||||
subs r3, r3, r6
|
||||
movs r7, r4
|
||||
lsls r7, r3 // stickyBit = (bool)(aSignificant << (typeWidth - align))
|
||||
beq 1f
|
||||
movs r7, #1
|
||||
1:
|
||||
lsrs r4, r6 /* aSignificand >> shift */
|
||||
orrs r4, r7
|
||||
b LOCAL_LABEL(form_result)
|
||||
|
||||
LOCAL_LABEL(ret_zero):
|
||||
movs r0, #0
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
|
||||
LOCAL_LABEL(a_zero_nan_inf):
|
||||
lsrs r3, r3, #1
|
||||
|
||||
LOCAL_LABEL(zero_nan_inf):
|
||||
// Here r2 has aAbs, r3 has bAbs
|
||||
movs r4, #0xFF
|
||||
lsls r4, r4, #(significandBits) // Make +inf.
|
||||
|
||||
cmp r2, r4
|
||||
bhi LOCAL_LABEL(a_is_nan)
|
||||
cmp r3, r4
|
||||
bhi LOCAL_LABEL(b_is_nan)
|
||||
|
||||
cmp r2, r4
|
||||
bne LOCAL_LABEL(a_is_rational)
|
||||
// aAbs is INF.
|
||||
eors r1, r0 // aRep ^ bRep.
|
||||
movs r6, #1
|
||||
lsls r6, r6, #(typeWidth - 1) // get sign mask.
|
||||
cmp r1, r6 // if they only differ on sign bit, it's -INF + INF
|
||||
beq LOCAL_LABEL(a_is_nan)
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
LOCAL_LABEL(a_is_rational):
|
||||
cmp r3, r4
|
||||
bne LOCAL_LABEL(b_is_rational)
|
||||
movs r0, r1
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
LOCAL_LABEL(b_is_rational):
|
||||
// either a or b or both are zero.
|
||||
adds r4, r2, r3
|
||||
beq LOCAL_LABEL(both_zero)
|
||||
cmp r2, #0 // is absA 0 ?
|
||||
beq LOCAL_LABEL(ret_b)
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
LOCAL_LABEL(both_zero):
|
||||
ands r0, r1 // +0 + -0 = +0
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
LOCAL_LABEL(ret_b):
|
||||
movs r0, r1
|
||||
|
||||
LOCAL_LABEL(ret):
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
LOCAL_LABEL(b_is_nan):
|
||||
movs r0, r1
|
||||
LOCAL_LABEL(a_is_nan):
|
||||
movs r1, #1
|
||||
lsls r1, r1, #(significandBits -1) // r1 is quiet bit.
|
||||
orrs r0, r1
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
LOCAL_LABEL(ret_inf):
|
||||
movs r4, #0xFF
|
||||
lsls r4, r4, #(significandBits)
|
||||
orrs r0, r4
|
||||
lsrs r0, r0, #(significandBits)
|
||||
lsls r0, r0, #(significandBits)
|
||||
pop {r4, r5, r6, r7, pc}
|
||||
|
||||
|
||||
END_COMPILERRT_FUNCTION(__addsf3)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
@ -30,6 +30,19 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
|
||||
push {r0-r3, lr}
|
||||
bl __aeabi_cdcmpeq_check_nan
|
||||
cmp r0, #1
|
||||
#if __ARM_ARCH_ISA_THUMB == 1
|
||||
beq 1f
|
||||
// NaN has been ruled out, so __aeabi_cdcmple can't trap
|
||||
mov r0, sp
|
||||
ldm r0, {r0-r3}
|
||||
bl __aeabi_cdcmple
|
||||
pop {r0-r3, pc}
|
||||
1:
|
||||
// Z = 0, C = 1
|
||||
movs r0, #0xF
|
||||
lsls r0, r0, #31
|
||||
pop {r0-r3, pc}
|
||||
#else
|
||||
pop {r0-r3, lr}
|
||||
|
||||
// NaN has been ruled out, so __aeabi_cdcmple can't trap
|
||||
@ -37,6 +50,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
|
||||
|
||||
msr CPSR_f, #APSR_C
|
||||
JMP(lr)
|
||||
#endif
|
||||
END_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
|
||||
|
||||
|
||||
@ -59,6 +73,28 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
|
||||
|
||||
bl __aeabi_dcmplt
|
||||
cmp r0, #1
|
||||
#if __ARM_ARCH_ISA_THUMB == 1
|
||||
bne 1f
|
||||
// Z = 0, C = 0
|
||||
movs r0, #1
|
||||
lsls r0, r0, #1
|
||||
pop {r0-r3, pc}
|
||||
1:
|
||||
mov r0, sp
|
||||
ldm r0, {r0-r3}
|
||||
bl __aeabi_dcmpeq
|
||||
cmp r0, #1
|
||||
bne 2f
|
||||
// Z = 1, C = 1
|
||||
movs r0, #2
|
||||
lsls r0, r0, #31
|
||||
pop {r0-r3, pc}
|
||||
2:
|
||||
// Z = 0, C = 1
|
||||
movs r0, #0xF
|
||||
lsls r0, r0, #31
|
||||
pop {r0-r3, pc}
|
||||
#else
|
||||
moveq ip, #0
|
||||
beq 1f
|
||||
|
||||
@ -72,6 +108,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
|
||||
msr CPSR_f, ip
|
||||
pop {r0-r3}
|
||||
POP_PC()
|
||||
#endif
|
||||
END_COMPILERRT_FUNCTION(__aeabi_cdcmple)
|
||||
|
||||
// int __aeabi_cdrcmple(double a, double b) {
|
||||
|
@ -30,6 +30,19 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
|
||||
push {r0-r3, lr}
|
||||
bl __aeabi_cfcmpeq_check_nan
|
||||
cmp r0, #1
|
||||
#if __ARM_ARCH_ISA_THUMB == 1
|
||||
beq 1f
|
||||
// NaN has been ruled out, so __aeabi_cfcmple can't trap
|
||||
mov r0, sp
|
||||
ldm r0, {r0-r3}
|
||||
bl __aeabi_cfcmple
|
||||
pop {r0-r3, pc}
|
||||
1:
|
||||
// Z = 0, C = 1
|
||||
movs r0, #0xF
|
||||
lsls r0, r0, #31
|
||||
pop {r0-r3, pc}
|
||||
#else
|
||||
pop {r0-r3, lr}
|
||||
|
||||
// NaN has been ruled out, so __aeabi_cfcmple can't trap
|
||||
@ -37,6 +50,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
|
||||
|
||||
msr CPSR_f, #APSR_C
|
||||
JMP(lr)
|
||||
#endif
|
||||
END_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
|
||||
|
||||
|
||||
@ -59,6 +73,28 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
|
||||
|
||||
bl __aeabi_fcmplt
|
||||
cmp r0, #1
|
||||
#if __ARM_ARCH_ISA_THUMB == 1
|
||||
bne 1f
|
||||
// Z = 0, C = 0
|
||||
movs r0, #1
|
||||
lsls r0, r0, #1
|
||||
pop {r0-r3, pc}
|
||||
1:
|
||||
mov r0, sp
|
||||
ldm r0, {r0-r3}
|
||||
bl __aeabi_fcmpeq
|
||||
cmp r0, #1
|
||||
bne 2f
|
||||
// Z = 1, C = 1
|
||||
movs r0, #2
|
||||
lsls r0, r0, #31
|
||||
pop {r0-r3, pc}
|
||||
2:
|
||||
// Z = 0, C = 1
|
||||
movs r0, #0xF
|
||||
lsls r0, r0, #31
|
||||
pop {r0-r3, pc}
|
||||
#else
|
||||
moveq ip, #0
|
||||
beq 1f
|
||||
|
||||
@ -72,6 +108,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
|
||||
msr CPSR_f, ip
|
||||
pop {r0-r3}
|
||||
POP_PC()
|
||||
#endif
|
||||
END_COMPILERRT_FUNCTION(__aeabi_cfcmple)
|
||||
|
||||
// int __aeabi_cfrcmple(float a, float b) {
|
||||
|
@ -26,10 +26,10 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond) \
|
||||
bl SYMBOL_NAME(__ ## cond ## df2) SEPARATOR \
|
||||
cmp r0, #0 SEPARATOR \
|
||||
b ## cond 1f SEPARATOR \
|
||||
mov r0, #0 SEPARATOR \
|
||||
movs r0, #0 SEPARATOR \
|
||||
pop { r4, pc } SEPARATOR \
|
||||
1: SEPARATOR \
|
||||
mov r0, #1 SEPARATOR \
|
||||
movs r0, #1 SEPARATOR \
|
||||
pop { r4, pc } SEPARATOR \
|
||||
END_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond)
|
||||
|
||||
|
@ -35,7 +35,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
|
||||
push {r0, r1, lr}
|
||||
bl SYMBOL_NAME(__divsi3)
|
||||
pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
|
||||
muls r2, r2, r0 // r2 = quot * denom
|
||||
muls r2, r0, r2 // r2 = quot * denom
|
||||
subs r1, r1, r2
|
||||
JMP (r3)
|
||||
#else // defined(USE_THUMB_1)
|
||||
|
@ -23,23 +23,23 @@
|
||||
.syntax unified
|
||||
.p2align 2
|
||||
DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
|
||||
push {r11, lr}
|
||||
push {r6, lr}
|
||||
sub sp, sp, #16
|
||||
add r12, sp, #8
|
||||
str r12, [sp]
|
||||
add r6, sp, #8
|
||||
str r6, [sp]
|
||||
#if defined(__MINGW32__)
|
||||
mov r12, r0
|
||||
mov r0, r2
|
||||
mov r2, r12
|
||||
mov r12, r1
|
||||
mov r1, r3
|
||||
mov r3, r12
|
||||
movs r6, r0
|
||||
movs r0, r2
|
||||
movs r2, r6
|
||||
movs r6, r1
|
||||
movs r1, r3
|
||||
movs r3, r6
|
||||
#endif
|
||||
bl SYMBOL_NAME(__divmoddi4)
|
||||
ldr r2, [sp, #8]
|
||||
ldr r3, [sp, #12]
|
||||
add sp, sp, #16
|
||||
pop {r11, pc}
|
||||
pop {r6, pc}
|
||||
END_COMPILERRT_FUNCTION(__aeabi_ldivmod)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
@ -26,7 +26,7 @@ DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset8, __aeabi_memset)
|
||||
|
||||
DEFINE_COMPILERRT_FUNCTION(__aeabi_memclr)
|
||||
mov r2, r1
|
||||
mov r1, #0
|
||||
movs r1, #0
|
||||
b memset
|
||||
END_COMPILERRT_FUNCTION(__aeabi_memclr)
|
||||
|
||||
|
@ -37,7 +37,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
|
||||
push {r0, r1, lr}
|
||||
bl SYMBOL_NAME(__aeabi_uidiv)
|
||||
pop {r1, r2, r3}
|
||||
muls r2, r2, r0 // r2 = quot * denom
|
||||
muls r2, r0, r2 // r2 = quot * denom
|
||||
subs r1, r1, r2
|
||||
JMP (r3)
|
||||
LOCAL_LABEL(case_denom_larger):
|
||||
|
@ -23,23 +23,23 @@
|
||||
.syntax unified
|
||||
.p2align 2
|
||||
DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
|
||||
push {r11, lr}
|
||||
push {r6, lr}
|
||||
sub sp, sp, #16
|
||||
add r12, sp, #8
|
||||
str r12, [sp]
|
||||
add r6, sp, #8
|
||||
str r6, [sp]
|
||||
#if defined(__MINGW32__)
|
||||
mov r12, r0
|
||||
mov r0, r2
|
||||
mov r2, r12
|
||||
mov r12, r1
|
||||
mov r1, r3
|
||||
mov r3, r12
|
||||
movs r6, r0
|
||||
movs r0, r2
|
||||
movs r2, r6
|
||||
movs r6, r1
|
||||
movs r1, r3
|
||||
movs r3, r6
|
||||
#endif
|
||||
bl SYMBOL_NAME(__udivmoddi4)
|
||||
ldr r2, [sp, #8]
|
||||
ldr r3, [sp, #12]
|
||||
add sp, sp, #16
|
||||
pop {r11, pc}
|
||||
pop {r6, pc}
|
||||
END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
@ -74,7 +74,7 @@ DEFINE_COMPILERRT_FUNCTION(__eqsf2)
|
||||
// the subsequent operations.
|
||||
#if defined(USE_THUMB_1)
|
||||
lsrs r6, r3, #1
|
||||
orrs r6, r2, r6
|
||||
orrs r6, r2
|
||||
#else
|
||||
orrs r12, r2, r3, lsr #1
|
||||
#endif
|
||||
@ -203,7 +203,7 @@ DEFINE_COMPILERRT_FUNCTION(__gtsf2)
|
||||
lsls r2, r0, #1
|
||||
lsls r3, r1, #1
|
||||
lsrs r6, r3, #1
|
||||
orrs r6, r2, r6
|
||||
orrs r6, r2
|
||||
beq 1f
|
||||
movs r6, r0
|
||||
eors r6, r1
|
||||
|
@ -36,7 +36,16 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
|
||||
beq LOCAL_LABEL(divby0)
|
||||
udiv r0, r0, r1
|
||||
bx lr
|
||||
#else
|
||||
|
||||
LOCAL_LABEL(divby0):
|
||||
mov r0, #0
|
||||
# ifdef __ARM_EABI__
|
||||
b __aeabi_idiv0
|
||||
# else
|
||||
JMP(lr)
|
||||
# endif
|
||||
|
||||
#else /* ! __ARM_ARCH_EXT_IDIV__ */
|
||||
cmp r1, #1
|
||||
bcc LOCAL_LABEL(divby0)
|
||||
#if defined(USE_THUMB_1)
|
||||
@ -185,9 +194,12 @@ LOCAL_LABEL(skip_1):
|
||||
LOCAL_LABEL(divby0):
|
||||
movs r0, #0
|
||||
# if defined(__ARM_EABI__)
|
||||
push {r7, lr}
|
||||
bl __aeabi_idiv0 // due to relocation limit, can't use b.
|
||||
# endif
|
||||
pop {r7, pc}
|
||||
# else
|
||||
JMP(lr)
|
||||
# endif
|
||||
|
||||
|
||||
#if defined(USE_THUMB_1)
|
||||
@ -251,16 +263,6 @@ LOCAL_LABEL(div0block):
|
||||
JMP(lr)
|
||||
#endif /* __ARM_ARCH_EXT_IDIV__ */
|
||||
|
||||
#if __ARM_ARCH_EXT_IDIV__
|
||||
LOCAL_LABEL(divby0):
|
||||
mov r0, #0
|
||||
# ifdef __ARM_EABI__
|
||||
b __aeabi_idiv0
|
||||
# else
|
||||
JMP(lr)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
END_COMPILERRT_FUNCTION(__udivsi3)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
@ -82,10 +82,6 @@ uintptr_t GetCurrentProcess(void);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) && defined(__arm__)
|
||||
#include <asm/unistd.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The compiler generates calls to __clear_cache() when creating
|
||||
* trampoline functions on the stack for use with nested functions.
|
||||
@ -94,7 +90,7 @@ uintptr_t GetCurrentProcess(void);
|
||||
*/
|
||||
|
||||
void __clear_cache(void *start, void *end) {
|
||||
#if __i386__ || __x86_64__
|
||||
#if __i386__ || __x86_64__ || defined(_M_IX86) || defined(_M_X64)
|
||||
/*
|
||||
* Intel processors have a unified instruction and data cache
|
||||
* so there is nothing to do
|
||||
@ -108,6 +104,15 @@ void __clear_cache(void *start, void *end) {
|
||||
|
||||
sysarch(ARM_SYNC_ICACHE, &arg);
|
||||
#elif defined(__linux__)
|
||||
/*
|
||||
* We used to include asm/unistd.h for the __ARM_NR_cacheflush define, but
|
||||
* it also brought many other unused defines, as well as a dependency on
|
||||
* kernel headers to be installed.
|
||||
*
|
||||
* This value is stable at least since Linux 3.13 and should remain so for
|
||||
* compatibility reasons, warranting it's re-definition here.
|
||||
*/
|
||||
#define __ARM_NR_cacheflush 0x0f0002
|
||||
register int start_reg __asm("r0") = (int) (intptr_t) start;
|
||||
const register int end_reg __asm("r1") = (int) (intptr_t) end;
|
||||
const register int flags __asm("r2") = 0;
|
||||
|
@ -27,6 +27,10 @@
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
#ifndef __has_attribute
|
||||
#define __has_attribute(attr) 0
|
||||
#endif
|
||||
|
||||
enum VendorSignatures {
|
||||
SIG_INTEL = 0x756e6547 /* Genu */,
|
||||
SIG_AMD = 0x68747541 /* Auth */
|
||||
@ -720,14 +724,17 @@ static unsigned getAvailableFeatures(unsigned int ECX, unsigned int EDX,
|
||||
return Features;
|
||||
}
|
||||
|
||||
#ifdef HAVE_INIT_PRIORITY
|
||||
#define CONSTRUCTOR_PRIORITY (101)
|
||||
#if defined(HAVE_INIT_PRIORITY)
|
||||
#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
|
||||
#elif __has_attribute(__constructor__)
|
||||
#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
|
||||
#else
|
||||
#define CONSTRUCTOR_PRIORITY
|
||||
// FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
|
||||
// this runs during initialization.
|
||||
#define CONSTRUCTOR_ATTRIBUTE
|
||||
#endif
|
||||
|
||||
int __cpu_indicator_init(void)
|
||||
__attribute__((constructor CONSTRUCTOR_PRIORITY));
|
||||
int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE;
|
||||
|
||||
struct __processor_model {
|
||||
unsigned int __cpu_vendor;
|
||||
@ -742,7 +749,7 @@ struct __processor_model {
|
||||
the priority set. However, it still runs after ifunc initializers and
|
||||
needs to be called explicitly there. */
|
||||
|
||||
int __attribute__((constructor CONSTRUCTOR_PRIORITY))
|
||||
int CONSTRUCTOR_ATTRIBUTE
|
||||
__cpu_indicator_init(void) {
|
||||
unsigned int EAX, EBX, ECX, EDX;
|
||||
unsigned int MaxLeaf = 5;
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
/* Returns: the quotient of (a + ib) / (c + id) */
|
||||
|
||||
COMPILER_RT_ABI long double _Complex
|
||||
COMPILER_RT_ABI Lcomplex
|
||||
__divtc3(long double __a, long double __b, long double __c, long double __d)
|
||||
{
|
||||
int __ilogbw = 0;
|
||||
@ -29,31 +29,31 @@ __divtc3(long double __a, long double __b, long double __c, long double __d)
|
||||
__d = crt_scalbnl(__d, -__ilogbw);
|
||||
}
|
||||
long double __denom = __c * __c + __d * __d;
|
||||
long double _Complex z;
|
||||
__real__ z = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
|
||||
__imag__ z = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
|
||||
if (crt_isnan(__real__ z) && crt_isnan(__imag__ z))
|
||||
Lcomplex z;
|
||||
COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
|
||||
COMPLEX_IMAGINARY(z) = crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
|
||||
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z)))
|
||||
{
|
||||
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b)))
|
||||
{
|
||||
__real__ z = crt_copysignl(CRT_INFINITY, __c) * __a;
|
||||
__imag__ z = crt_copysignl(CRT_INFINITY, __c) * __b;
|
||||
COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
|
||||
COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
|
||||
}
|
||||
else if ((crt_isinf(__a) || crt_isinf(__b)) &&
|
||||
crt_isfinite(__c) && crt_isfinite(__d))
|
||||
{
|
||||
__a = crt_copysignl(crt_isinf(__a) ? 1.0 : 0.0, __a);
|
||||
__b = crt_copysignl(crt_isinf(__b) ? 1.0 : 0.0, __b);
|
||||
__real__ z = CRT_INFINITY * (__a * __c + __b * __d);
|
||||
__imag__ z = CRT_INFINITY * (__b * __c - __a * __d);
|
||||
COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
|
||||
COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
|
||||
}
|
||||
else if (crt_isinf(__logbw) && __logbw > 0.0 &&
|
||||
crt_isfinite(__a) && crt_isfinite(__b))
|
||||
{
|
||||
__c = crt_copysignl(crt_isinf(__c) ? 1.0 : 0.0, __c);
|
||||
__d = crt_copysignl(crt_isinf(__d) ? 1.0 : 0.0, __d);
|
||||
__real__ z = 0.0 * (__a * __c + __b * __d);
|
||||
__imag__ z = 0.0 * (__b * __c - __a * __d);
|
||||
COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);
|
||||
COMPLEX_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
|
||||
}
|
||||
}
|
||||
return z;
|
||||
|
@ -32,15 +32,13 @@
|
||||
#if __ARM_EABI__
|
||||
# define ARM_EABI_FNALIAS(aeabi_name, name) \
|
||||
void __aeabi_##aeabi_name() __attribute__((alias("__" #name)));
|
||||
|
||||
# if !defined(__clang__) && defined(__GNUC__) && \
|
||||
(__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 5)
|
||||
# if defined(COMPILER_RT_ARMHF_TARGET) || (!defined(__clang__) && \
|
||||
defined(__GNUC__) && (__GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ < 5))
|
||||
/* The pcs attribute was introduced in GCC 4.5.0 */
|
||||
# define COMPILER_RT_ABI
|
||||
# define COMPILER_RT_ABI
|
||||
# else
|
||||
# define COMPILER_RT_ABI __attribute__((pcs("aapcs")))
|
||||
# define COMPILER_RT_ABI __attribute__((pcs("aapcs")))
|
||||
# endif
|
||||
|
||||
#else
|
||||
# define ARM_EABI_FNALIAS(aeabi_name, name)
|
||||
# define COMPILER_RT_ABI
|
||||
|
178
contrib/compiler-rt/lib/builtins/os_version_check.c
Normal file
178
contrib/compiler-rt/lib/builtins/os_version_check.c
Normal file
@ -0,0 +1,178 @@
|
||||
/* ===-- os_version_check.c - OS version checking -------------------------===
|
||||
*
|
||||
* The LLVM Compiler Infrastructure
|
||||
*
|
||||
* This file is dual licensed under the MIT and the University of Illinois Open
|
||||
* Source Licenses. See LICENSE.TXT for details.
|
||||
*
|
||||
* ===----------------------------------------------------------------------===
|
||||
*
|
||||
* This file implements the function __isOSVersionAtLeast, used by
|
||||
* Objective-C's @available
|
||||
*
|
||||
* ===----------------------------------------------------------------------===
|
||||
*/
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
#include <CoreFoundation/CoreFoundation.h>
|
||||
#include <dispatch/dispatch.h>
|
||||
#include <TargetConditionals.h>
|
||||
#include <dlfcn.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
/* These three variables hold the host's OS version. */
|
||||
static int32_t GlobalMajor, GlobalMinor, GlobalSubminor;
|
||||
static dispatch_once_t DispatchOnceCounter;
|
||||
|
||||
/* Find and parse the SystemVersion.plist file. */
|
||||
static void parseSystemVersionPList(void *Unused) {
|
||||
(void)Unused;
|
||||
/* Load CoreFoundation dynamically */
|
||||
const void *NullAllocator = dlsym(RTLD_DEFAULT, "kCFAllocatorNull");
|
||||
if (!NullAllocator)
|
||||
return;
|
||||
const CFAllocatorRef kCFAllocatorNull =
|
||||
*(const CFAllocatorRef *)NullAllocator;
|
||||
typeof(CFDataCreateWithBytesNoCopy) *CFDataCreateWithBytesNoCopyFunc =
|
||||
(typeof(CFDataCreateWithBytesNoCopy) *)dlsym(
|
||||
RTLD_DEFAULT, "CFDataCreateWithBytesNoCopy");
|
||||
if (!CFDataCreateWithBytesNoCopyFunc)
|
||||
return;
|
||||
typeof(CFPropertyListCreateWithData) *CFPropertyListCreateWithDataFunc =
|
||||
(typeof(CFPropertyListCreateWithData) *)dlsym(
|
||||
RTLD_DEFAULT, "CFPropertyListCreateWithData");
|
||||
/* CFPropertyListCreateWithData was introduced only in macOS 10.6+, so it
|
||||
* will be NULL on earlier OS versions. */
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
|
||||
typeof(CFPropertyListCreateFromXMLData) *CFPropertyListCreateFromXMLDataFunc =
|
||||
(typeof(CFPropertyListCreateFromXMLData) *)dlsym(
|
||||
RTLD_DEFAULT, "CFPropertyListCreateFromXMLData");
|
||||
#pragma clang diagnostic pop
|
||||
/* CFPropertyListCreateFromXMLDataFunc is deprecated in macOS 10.10, so it
|
||||
* might be NULL in future OS versions. */
|
||||
if (!CFPropertyListCreateWithDataFunc && !CFPropertyListCreateFromXMLDataFunc)
|
||||
return;
|
||||
typeof(CFStringCreateWithCStringNoCopy) *CFStringCreateWithCStringNoCopyFunc =
|
||||
(typeof(CFStringCreateWithCStringNoCopy) *)dlsym(
|
||||
RTLD_DEFAULT, "CFStringCreateWithCStringNoCopy");
|
||||
if (!CFStringCreateWithCStringNoCopyFunc)
|
||||
return;
|
||||
typeof(CFDictionaryGetValue) *CFDictionaryGetValueFunc =
|
||||
(typeof(CFDictionaryGetValue) *)dlsym(RTLD_DEFAULT,
|
||||
"CFDictionaryGetValue");
|
||||
if (!CFDictionaryGetValueFunc)
|
||||
return;
|
||||
typeof(CFGetTypeID) *CFGetTypeIDFunc =
|
||||
(typeof(CFGetTypeID) *)dlsym(RTLD_DEFAULT, "CFGetTypeID");
|
||||
if (!CFGetTypeIDFunc)
|
||||
return;
|
||||
typeof(CFStringGetTypeID) *CFStringGetTypeIDFunc =
|
||||
(typeof(CFStringGetTypeID) *)dlsym(RTLD_DEFAULT, "CFStringGetTypeID");
|
||||
if (!CFStringGetTypeIDFunc)
|
||||
return;
|
||||
typeof(CFStringGetCString) *CFStringGetCStringFunc =
|
||||
(typeof(CFStringGetCString) *)dlsym(RTLD_DEFAULT, "CFStringGetCString");
|
||||
if (!CFStringGetCStringFunc)
|
||||
return;
|
||||
typeof(CFRelease) *CFReleaseFunc =
|
||||
(typeof(CFRelease) *)dlsym(RTLD_DEFAULT, "CFRelease");
|
||||
if (!CFReleaseFunc)
|
||||
return;
|
||||
|
||||
char *PListPath = "/System/Library/CoreServices/SystemVersion.plist";
|
||||
|
||||
#if TARGET_OS_SIMULATOR
|
||||
char *PListPathPrefix = getenv("IPHONE_SIMULATOR_ROOT");
|
||||
if (!PListPathPrefix)
|
||||
return;
|
||||
char FullPath[strlen(PListPathPrefix) + strlen(PListPath) + 1];
|
||||
strcpy(FullPath, PListPathPrefix);
|
||||
strcat(FullPath, PListPath);
|
||||
PListPath = FullPath;
|
||||
#endif
|
||||
FILE *PropertyList = fopen(PListPath, "r");
|
||||
if (!PropertyList)
|
||||
return;
|
||||
|
||||
/* Dynamically allocated stuff. */
|
||||
CFDictionaryRef PListRef = NULL;
|
||||
CFDataRef FileContentsRef = NULL;
|
||||
UInt8 *PListBuf = NULL;
|
||||
|
||||
fseek(PropertyList, 0, SEEK_END);
|
||||
long PListFileSize = ftell(PropertyList);
|
||||
if (PListFileSize < 0)
|
||||
goto Fail;
|
||||
rewind(PropertyList);
|
||||
|
||||
PListBuf = malloc((size_t)PListFileSize);
|
||||
if (!PListBuf)
|
||||
goto Fail;
|
||||
|
||||
size_t NumRead = fread(PListBuf, 1, (size_t)PListFileSize, PropertyList);
|
||||
if (NumRead != (size_t)PListFileSize)
|
||||
goto Fail;
|
||||
|
||||
/* Get the file buffer into CF's format. We pass in a null allocator here *
|
||||
* because we free PListBuf ourselves */
|
||||
FileContentsRef = (*CFDataCreateWithBytesNoCopyFunc)(
|
||||
NULL, PListBuf, (CFIndex)NumRead, kCFAllocatorNull);
|
||||
if (!FileContentsRef)
|
||||
goto Fail;
|
||||
|
||||
if (CFPropertyListCreateWithDataFunc)
|
||||
PListRef = (*CFPropertyListCreateWithDataFunc)(
|
||||
NULL, FileContentsRef, kCFPropertyListImmutable, NULL, NULL);
|
||||
else
|
||||
PListRef = (*CFPropertyListCreateFromXMLDataFunc)(
|
||||
NULL, FileContentsRef, kCFPropertyListImmutable, NULL);
|
||||
if (!PListRef)
|
||||
goto Fail;
|
||||
|
||||
CFStringRef ProductVersion = (*CFStringCreateWithCStringNoCopyFunc)(
|
||||
NULL, "ProductVersion", kCFStringEncodingASCII, kCFAllocatorNull);
|
||||
if (!ProductVersion)
|
||||
goto Fail;
|
||||
CFTypeRef OpaqueValue = (*CFDictionaryGetValueFunc)(PListRef, ProductVersion);
|
||||
(*CFReleaseFunc)(ProductVersion);
|
||||
if (!OpaqueValue ||
|
||||
(*CFGetTypeIDFunc)(OpaqueValue) != (*CFStringGetTypeIDFunc)())
|
||||
goto Fail;
|
||||
|
||||
char VersionStr[32];
|
||||
if (!(*CFStringGetCStringFunc)((CFStringRef)OpaqueValue, VersionStr,
|
||||
sizeof(VersionStr), kCFStringEncodingUTF8))
|
||||
goto Fail;
|
||||
sscanf(VersionStr, "%d.%d.%d", &GlobalMajor, &GlobalMinor, &GlobalSubminor);
|
||||
|
||||
Fail:
|
||||
if (PListRef)
|
||||
(*CFReleaseFunc)(PListRef);
|
||||
if (FileContentsRef)
|
||||
(*CFReleaseFunc)(FileContentsRef);
|
||||
free(PListBuf);
|
||||
fclose(PropertyList);
|
||||
}
|
||||
|
||||
int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) {
|
||||
/* Populate the global version variables, if they haven't already. */
|
||||
dispatch_once_f(&DispatchOnceCounter, NULL, parseSystemVersionPList);
|
||||
|
||||
if (Major < GlobalMajor) return 1;
|
||||
if (Major > GlobalMajor) return 0;
|
||||
if (Minor < GlobalMinor) return 1;
|
||||
if (Minor > GlobalMinor) return 0;
|
||||
return Subminor <= GlobalSubminor;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* Silence an empty translation unit warning. */
|
||||
typedef int unused;
|
||||
|
||||
#endif
|
@ -4,7 +4,7 @@
|
||||
|
||||
/* double __floatdidf(di_int a); */
|
||||
|
||||
#ifdef __x86_64__
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
|
||||
#include "../int_lib.h"
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
* License. See LICENSE.TXT for details.
|
||||
*/
|
||||
|
||||
#ifdef __x86_64__
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
|
||||
#include "../int_lib.h"
|
||||
|
||||
|
@ -188,12 +188,14 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
|
||||
}
|
||||
}
|
||||
if (!dynamic) return 0;
|
||||
uptr strtab = 0, symtab = 0;
|
||||
uptr strtab = 0, symtab = 0, strsz = 0;
|
||||
for (const ElfW(Dyn) *p = dynamic; p->d_tag != PT_NULL; ++p) {
|
||||
if (p->d_tag == DT_SYMTAB)
|
||||
symtab = p->d_un.d_ptr;
|
||||
else if (p->d_tag == DT_STRTAB)
|
||||
strtab = p->d_un.d_ptr;
|
||||
else if (p->d_tag == DT_STRSZ)
|
||||
strsz = p->d_un.d_ptr;
|
||||
}
|
||||
|
||||
if (symtab > strtab) {
|
||||
@ -209,7 +211,8 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
|
||||
if (phdr->p_type == PT_LOAD) {
|
||||
uptr beg = info->dlpi_addr + phdr->p_vaddr;
|
||||
uptr end = beg + phdr->p_memsz;
|
||||
if (strtab >= beg && strtab < end && symtab >= beg && symtab < end)
|
||||
if (strtab >= beg && strtab + strsz < end && symtab >= beg &&
|
||||
symtab < end)
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -222,9 +225,14 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
|
||||
|
||||
for (const ElfW(Sym) *p = (const ElfW(Sym) *)symtab; (ElfW(Addr))p < strtab;
|
||||
++p) {
|
||||
// There is no reliable way to find the end of the symbol table. In
|
||||
// lld-produces files, there are other sections between symtab and strtab.
|
||||
// Stop looking when the symbol name is not inside strtab.
|
||||
if (p->st_name >= strsz) break;
|
||||
char *name = (char*)(strtab + p->st_name);
|
||||
if (strcmp(name, "__cfi_check") == 0) {
|
||||
assert(p->st_info == ELF32_ST_INFO(STB_GLOBAL, STT_FUNC));
|
||||
assert(p->st_info == ELF32_ST_INFO(STB_GLOBAL, STT_FUNC) ||
|
||||
p->st_info == ELF32_ST_INFO(STB_WEAK, STT_FUNC));
|
||||
uptr addr = info->dlpi_addr + p->st_value;
|
||||
return addr;
|
||||
}
|
||||
|
@ -285,22 +285,8 @@ fun:__sanitizer_cov_module_init=uninstrumented
|
||||
fun:__sanitizer_cov_module_init=discard
|
||||
fun:__sanitizer_cov_with_check=uninstrumented
|
||||
fun:__sanitizer_cov_with_check=discard
|
||||
fun:__sanitizer_cov_indir_call16=uninstrumented
|
||||
fun:__sanitizer_cov_indir_call16=discard
|
||||
fun:__sanitizer_cov_indir_call16=uninstrumented
|
||||
fun:__sanitizer_cov_indir_call16=discard
|
||||
fun:__sanitizer_reset_coverage=uninstrumented
|
||||
fun:__sanitizer_reset_coverage=discard
|
||||
fun:__sanitizer_set_death_callback=uninstrumented
|
||||
fun:__sanitizer_set_death_callback=discard
|
||||
fun:__sanitizer_get_coverage_guards=uninstrumented
|
||||
fun:__sanitizer_get_coverage_guards=discard
|
||||
fun:__sanitizer_get_number_of_counters=uninstrumented
|
||||
fun:__sanitizer_get_number_of_counters=discard
|
||||
fun:__sanitizer_update_counter_bitset_and_clear_counters=uninstrumented
|
||||
fun:__sanitizer_update_counter_bitset_and_clear_counters=discard
|
||||
fun:__sanitizer_get_total_unique_coverage=uninstrumented
|
||||
fun:__sanitizer_get_total_unique_coverage=discard
|
||||
fun:__sanitizer_get_total_unique_coverage=uninstrumented
|
||||
fun:__sanitizer_get_total_unique_coverage=discard
|
||||
fun:__sanitizer_update_counter_bitset_and_clear_counters=uninstrumented
|
||||
|
@ -304,20 +304,6 @@ INTERCEPTOR(int, unlink, char *path) {
|
||||
return REAL(unlink)(path);
|
||||
}
|
||||
|
||||
INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, f);
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size * nmemb);
|
||||
return REAL(fread)(ptr, size, nmemb, f);
|
||||
}
|
||||
|
||||
INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, f);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, p, size * nmemb);
|
||||
return REAL(fwrite)(p, size, nmemb, f);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, puts, const char *s) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
|
||||
|
@ -878,6 +878,8 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
|
||||
|
||||
IMAGE_DATA_DIRECTORY *export_directory =
|
||||
&headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
|
||||
if (export_directory->Size == 0)
|
||||
return 0;
|
||||
RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
|
||||
export_directory->VirtualAddress);
|
||||
RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
|
||||
|
@ -76,6 +76,7 @@ extern "C" void __lsan_init() {
|
||||
InitializeFlags();
|
||||
InitCommonLsan();
|
||||
InitializeAllocator();
|
||||
ReplaceSystemMalloc();
|
||||
InitTlsSize();
|
||||
InitializeInterceptors();
|
||||
InitializeThreadRegistry();
|
||||
|
@ -41,6 +41,13 @@
|
||||
namespace __lsan {
|
||||
|
||||
void InitializeInterceptors();
|
||||
void ReplaceSystemMalloc();
|
||||
|
||||
#define ENSURE_LSAN_INITED do { \
|
||||
CHECK(!lsan_init_is_running); \
|
||||
if (!lsan_inited) \
|
||||
__lsan_init(); \
|
||||
} while (0)
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
|
@ -24,44 +24,18 @@
|
||||
extern "C" void *memset(void *ptr, int value, uptr num);
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
struct ChunkMetadata {
|
||||
u8 allocated : 8; // Must be first.
|
||||
ChunkTag tag : 2;
|
||||
uptr requested_size : 54;
|
||||
u32 stack_trace_id;
|
||||
};
|
||||
|
||||
#if defined(__mips64) || defined(__aarch64__)
|
||||
#if defined(__i386__) || defined(__arm__)
|
||||
static const uptr kMaxAllowedMallocSize = 1UL << 30;
|
||||
#elif defined(__mips64) || defined(__aarch64__)
|
||||
static const uptr kMaxAllowedMallocSize = 4UL << 30;
|
||||
static const uptr kRegionSizeLog = 20;
|
||||
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
|
||||
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
|
||||
typedef CompactSizeClassMap SizeClassMap;
|
||||
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
|
||||
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
|
||||
PrimaryAllocator;
|
||||
#else
|
||||
static const uptr kMaxAllowedMallocSize = 8UL << 30;
|
||||
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = 0x600000000000ULL;
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
static const uptr kMetadataSize = sizeof(ChunkMetadata);
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
|
||||
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
||||
#endif
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
typedef LargeMmapAllocator<> SecondaryAllocator;
|
||||
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
|
||||
SecondaryAllocator> Allocator;
|
||||
|
||||
static Allocator allocator;
|
||||
static THREADLOCAL AllocatorCache cache;
|
||||
|
||||
void InitializeAllocator() {
|
||||
allocator.InitLinkerInitialized(
|
||||
@ -70,7 +44,7 @@ void InitializeAllocator() {
|
||||
}
|
||||
|
||||
void AllocatorThreadFinish() {
|
||||
allocator.SwallowCache(&cache);
|
||||
allocator.SwallowCache(GetAllocatorCache());
|
||||
}
|
||||
|
||||
static ChunkMetadata *Metadata(const void *p) {
|
||||
@ -102,7 +76,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
|
||||
return nullptr;
|
||||
}
|
||||
void *p = allocator.Allocate(&cache, size, alignment, false);
|
||||
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false);
|
||||
// Do not rely on the allocator to clear the memory (it's slow).
|
||||
if (cleared && allocator.FromPrimary(p))
|
||||
memset(p, 0, size);
|
||||
@ -116,7 +90,7 @@ void Deallocate(void *p) {
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
|
||||
RunFreeHooks(p);
|
||||
RegisterDeallocation(p);
|
||||
allocator.Deallocate(&cache, p);
|
||||
allocator.Deallocate(GetAllocatorCache(), p);
|
||||
}
|
||||
|
||||
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
|
||||
@ -124,17 +98,17 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
|
||||
RegisterDeallocation(p);
|
||||
if (new_size > kMaxAllowedMallocSize) {
|
||||
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
|
||||
allocator.Deallocate(&cache, p);
|
||||
allocator.Deallocate(GetAllocatorCache(), p);
|
||||
return nullptr;
|
||||
}
|
||||
p = allocator.Reallocate(&cache, p, new_size, alignment);
|
||||
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
|
||||
RegisterAllocation(stack, p, new_size);
|
||||
return p;
|
||||
}
|
||||
|
||||
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
|
||||
*begin = (uptr)&cache;
|
||||
*end = *begin + sizeof(cache);
|
||||
*begin = (uptr)GetAllocatorCache();
|
||||
*end = *begin + sizeof(AllocatorCache);
|
||||
}
|
||||
|
||||
uptr GetMallocUsableSize(const void *p) {
|
||||
@ -143,6 +117,37 @@ uptr GetMallocUsableSize(const void *p) {
|
||||
return m->requested_size;
|
||||
}
|
||||
|
||||
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
|
||||
return Allocate(stack, size, alignment, kAlwaysClearMemory);
|
||||
}
|
||||
|
||||
void *lsan_malloc(uptr size, const StackTrace &stack) {
|
||||
return Allocate(stack, size, 1, kAlwaysClearMemory);
|
||||
}
|
||||
|
||||
void lsan_free(void *p) {
|
||||
Deallocate(p);
|
||||
}
|
||||
|
||||
void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
|
||||
return Reallocate(stack, p, size, 1);
|
||||
}
|
||||
|
||||
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
|
||||
size *= nmemb;
|
||||
return Allocate(stack, size, 1, true);
|
||||
}
|
||||
|
||||
void *lsan_valloc(uptr size, const StackTrace &stack) {
|
||||
if (size == 0)
|
||||
size = GetPageSizeCached();
|
||||
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
|
||||
}
|
||||
|
||||
uptr lsan_mz_size(const void *p) {
|
||||
return GetMallocUsableSize(p);
|
||||
}
|
||||
|
||||
///// Interface to the common LSan module. /////
|
||||
|
||||
void LockAllocator() {
|
||||
|
@ -15,8 +15,10 @@
|
||||
#ifndef LSAN_ALLOCATOR_H
|
||||
#define LSAN_ALLOCATOR_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_allocator.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
#include "lsan_common.h"
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
@ -34,6 +36,53 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end);
|
||||
void AllocatorThreadFinish();
|
||||
void InitializeAllocator();
|
||||
|
||||
const bool kAlwaysClearMemory = true;
|
||||
|
||||
struct ChunkMetadata {
|
||||
u8 allocated : 8; // Must be first.
|
||||
ChunkTag tag : 2;
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
uptr requested_size : 54;
|
||||
#else
|
||||
uptr requested_size : 32;
|
||||
uptr padding : 22;
|
||||
#endif
|
||||
u32 stack_trace_id;
|
||||
};
|
||||
|
||||
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
|
||||
defined(__arm__)
|
||||
static const uptr kRegionSizeLog = 20;
|
||||
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
|
||||
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
|
||||
typedef CompactSizeClassMap SizeClassMap;
|
||||
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
|
||||
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
|
||||
PrimaryAllocator;
|
||||
#elif defined(__x86_64__)
|
||||
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
|
||||
static const uptr kSpaceBeg = 0x600000000000ULL;
|
||||
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
|
||||
static const uptr kMetadataSize = sizeof(ChunkMetadata);
|
||||
typedef DefaultSizeClassMap SizeClassMap;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
|
||||
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
|
||||
#endif
|
||||
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
|
||||
|
||||
AllocatorCache *GetAllocatorCache();
|
||||
|
||||
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
|
||||
void *lsan_malloc(uptr size, const StackTrace &stack);
|
||||
void lsan_free(void *p);
|
||||
void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
|
||||
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
|
||||
void *lsan_valloc(uptr size, const StackTrace &stack);
|
||||
uptr lsan_mz_size(const void *p);
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
#endif // LSAN_ALLOCATOR_H
|
||||
|
@ -32,20 +32,15 @@ namespace __lsan {
|
||||
// also to protect the global list of root regions.
|
||||
BlockingMutex global_mutex(LINKER_INITIALIZED);
|
||||
|
||||
__attribute__((tls_model("initial-exec")))
|
||||
THREADLOCAL int disable_counter;
|
||||
bool DisabledInThisThread() { return disable_counter > 0; }
|
||||
void DisableInThisThread() { disable_counter++; }
|
||||
void EnableInThisThread() {
|
||||
if (!disable_counter && common_flags()->detect_leaks) {
|
||||
Flags lsan_flags;
|
||||
|
||||
void DisableCounterUnderflow() {
|
||||
if (common_flags()->detect_leaks) {
|
||||
Report("Unmatched call to __lsan_enable().\n");
|
||||
Die();
|
||||
}
|
||||
disable_counter--;
|
||||
}
|
||||
|
||||
Flags lsan_flags;
|
||||
|
||||
void Flags::SetDefaults() {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
#include "lsan_flags.inc"
|
||||
@ -73,6 +68,14 @@ ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
|
||||
static SuppressionContext *suppression_ctx = nullptr;
|
||||
static const char kSuppressionLeak[] = "leak";
|
||||
static const char *kSuppressionTypes[] = { kSuppressionLeak };
|
||||
static const char kStdSuppressions[] =
|
||||
#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
// The actual string allocation happens here (for more details refer to the
|
||||
// SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT definition).
|
||||
"leak:*_dl_map_object_deps*";
|
||||
#else
|
||||
"";
|
||||
#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
|
||||
void InitializeSuppressions() {
|
||||
CHECK_EQ(nullptr, suppression_ctx);
|
||||
@ -81,6 +84,7 @@ void InitializeSuppressions() {
|
||||
suppression_ctx->ParseFromFile(flags()->suppressions);
|
||||
if (&__lsan_default_suppressions)
|
||||
suppression_ctx->Parse(__lsan_default_suppressions());
|
||||
suppression_ctx->Parse(kStdSuppressions);
|
||||
}
|
||||
|
||||
static SuppressionContext *GetSuppressionContext() {
|
||||
@ -88,12 +92,9 @@ static SuppressionContext *GetSuppressionContext() {
|
||||
return suppression_ctx;
|
||||
}
|
||||
|
||||
struct RootRegion {
|
||||
const void *begin;
|
||||
uptr size;
|
||||
};
|
||||
static InternalMmapVector<RootRegion> *root_regions;
|
||||
|
||||
InternalMmapVector<RootRegion> *root_regions;
|
||||
InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
|
||||
|
||||
void InitializeRootRegions() {
|
||||
CHECK(!root_regions);
|
||||
@ -180,6 +181,23 @@ void ScanRangeForPointers(uptr begin, uptr end,
|
||||
}
|
||||
}
|
||||
|
||||
// Scans a global range for pointers
|
||||
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
|
||||
uptr allocator_begin = 0, allocator_end = 0;
|
||||
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
|
||||
if (begin <= allocator_begin && allocator_begin < end) {
|
||||
CHECK_LE(allocator_begin, allocator_end);
|
||||
CHECK_LE(allocator_end, end);
|
||||
if (begin < allocator_begin)
|
||||
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
|
||||
kReachable);
|
||||
if (allocator_end < end)
|
||||
ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
|
||||
} else {
|
||||
ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
|
||||
}
|
||||
}
|
||||
|
||||
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
|
||||
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
|
||||
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
|
||||
@ -188,11 +206,11 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
|
||||
// Scans thread data (stacks and TLS) for heap pointers.
|
||||
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
Frontier *frontier) {
|
||||
InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
|
||||
InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
|
||||
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
|
||||
uptr registers_end = registers_begin + registers.size();
|
||||
for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
|
||||
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
|
||||
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
|
||||
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
|
||||
LOG_THREADS("Processing thread %d.\n", os_id);
|
||||
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
|
||||
DTLS *dtls;
|
||||
@ -206,11 +224,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
continue;
|
||||
}
|
||||
uptr sp;
|
||||
bool have_registers =
|
||||
(suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
|
||||
if (!have_registers) {
|
||||
Report("Unable to get registers from thread %d.\n");
|
||||
// If unable to get SP, consider the entire stack to be reachable.
|
||||
PtraceRegistersStatus have_registers =
|
||||
suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
|
||||
if (have_registers != REGISTERS_AVAILABLE) {
|
||||
Report("Unable to get registers from thread %d.\n", os_id);
|
||||
// If unable to get SP, consider the entire stack to be reachable unless
|
||||
// GetRegistersAndSP failed with ESRCH.
|
||||
if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
|
||||
sp = stack_begin;
|
||||
}
|
||||
|
||||
@ -258,7 +278,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
if (tls_end > cache_end)
|
||||
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
|
||||
}
|
||||
if (dtls) {
|
||||
if (dtls && !DTLSInDestruction(dtls)) {
|
||||
for (uptr j = 0; j < dtls->dtv_size; ++j) {
|
||||
uptr dtls_beg = dtls->dtv[j].beg;
|
||||
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
|
||||
@ -268,28 +288,38 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
kReachable);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We are handling a thread with DTLS under destruction. Log about
|
||||
// this and continue.
|
||||
LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
|
||||
uptr root_end) {
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
|
||||
uptr region_begin, uptr region_end, uptr prot) {
|
||||
uptr intersection_begin = Max(root_region.begin, region_begin);
|
||||
uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
|
||||
if (intersection_begin >= intersection_end) return;
|
||||
bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
|
||||
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
|
||||
root_region.begin, root_region.begin + root_region.size,
|
||||
region_begin, region_end,
|
||||
is_readable ? "readable" : "unreadable");
|
||||
if (is_readable)
|
||||
ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
|
||||
kReachable);
|
||||
}
|
||||
|
||||
static void ProcessRootRegion(Frontier *frontier,
|
||||
const RootRegion &root_region) {
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
||||
uptr begin, end, prot;
|
||||
while (proc_maps.Next(&begin, &end,
|
||||
/*offset*/ nullptr, /*filename*/ nullptr,
|
||||
/*filename_size*/ 0, &prot)) {
|
||||
uptr intersection_begin = Max(root_begin, begin);
|
||||
uptr intersection_end = Min(end, root_end);
|
||||
if (intersection_begin >= intersection_end) continue;
|
||||
bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
|
||||
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
|
||||
root_begin, root_end, begin, end,
|
||||
is_readable ? "readable" : "unreadable");
|
||||
if (is_readable)
|
||||
ScanRangeForPointers(intersection_begin, intersection_end, frontier,
|
||||
"ROOT", kReachable);
|
||||
ScanRootRegion(frontier, root_region, begin, end, prot);
|
||||
}
|
||||
}
|
||||
|
||||
@ -298,9 +328,7 @@ static void ProcessRootRegions(Frontier *frontier) {
|
||||
if (!flags()->use_root_regions) return;
|
||||
CHECK(root_regions);
|
||||
for (uptr i = 0; i < root_regions->size(); i++) {
|
||||
RootRegion region = (*root_regions)[i];
|
||||
uptr begin_addr = reinterpret_cast<uptr>(region.begin);
|
||||
ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
|
||||
ProcessRootRegion(frontier, (*root_regions)[i]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -338,6 +366,72 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
|
||||
}
|
||||
}
|
||||
|
||||
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
||||
CHECK(stack_id);
|
||||
StackTrace stack = map->Get(stack_id);
|
||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||
if (stack.size >= 2)
|
||||
return stack.trace[1];
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct InvalidPCParam {
|
||||
Frontier *frontier;
|
||||
StackDepotReverseMap *stack_depot_reverse_map;
|
||||
bool skip_linker_allocations;
|
||||
};
|
||||
|
||||
// ForEachChunk callback. If the caller pc is invalid or is within the linker,
|
||||
// mark as reachable. Called by ProcessPlatformSpecificAllocations.
|
||||
static void MarkInvalidPCCb(uptr chunk, void *arg) {
|
||||
CHECK(arg);
|
||||
InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
||||
u32 stack_id = m.stack_trace_id();
|
||||
uptr caller_pc = 0;
|
||||
if (stack_id > 0)
|
||||
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
||||
GetLinker()->containsAddress(caller_pc))) {
|
||||
m.set_tag(kReachable);
|
||||
param->frontier->push_back(chunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// On Linux, handles dynamically allocated TLS blocks by treating all chunks
|
||||
// allocated from ld-linux.so as reachable.
|
||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||
// blocks, but we can make sure they come from our own allocator by intercepting
|
||||
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
||||
// addresses are stored in a dynamically allocated array (the DTV) which is
|
||||
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
||||
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
||||
// the DTV. This is because the initial DTV is allocated before our interception
|
||||
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
||||
// can't special-case it either, since we don't know its size.
|
||||
// Our solution is to include in the root set all allocations made from
|
||||
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||
// which we don't care about).
|
||||
// On all other platforms, this simply checks to ensure that the caller pc is
|
||||
// valid before reporting chunks as leaked.
|
||||
void ProcessPC(Frontier *frontier) {
|
||||
StackDepotReverseMap stack_depot_reverse_map;
|
||||
InvalidPCParam arg;
|
||||
arg.frontier = frontier;
|
||||
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
||||
arg.skip_linker_allocations =
|
||||
flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
|
||||
ForEachChunk(MarkInvalidPCCb, &arg);
|
||||
}
|
||||
|
||||
// Sets the appropriate tag on each chunk.
|
||||
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||
// Holds the flood fill frontier.
|
||||
@ -349,11 +443,13 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
|
||||
ProcessRootRegions(&frontier);
|
||||
FloodFillTag(&frontier, kReachable);
|
||||
|
||||
CHECK_EQ(0, frontier.size());
|
||||
ProcessPC(&frontier);
|
||||
|
||||
// The check here is relatively expensive, so we do this in a separate flood
|
||||
// fill. That way we can skip the check for chunks that are reachable
|
||||
// otherwise.
|
||||
LOG_POINTERS("Processing platform-specific allocations.\n");
|
||||
CHECK_EQ(0, frontier.size());
|
||||
ProcessPlatformSpecificAllocations(&frontier);
|
||||
FloodFillTag(&frontier, kReachable);
|
||||
|
||||
@ -689,7 +785,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
BlockingMutexLock l(&global_mutex);
|
||||
CHECK(root_regions);
|
||||
RootRegion region = {begin, size};
|
||||
RootRegion region = {reinterpret_cast<uptr>(begin), size};
|
||||
root_regions->push_back(region);
|
||||
VReport(1, "Registered root region at %p of size %llu\n", begin, size);
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
@ -703,7 +799,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
|
||||
bool removed = false;
|
||||
for (uptr i = 0; i < root_regions->size(); i++) {
|
||||
RootRegion region = (*root_regions)[i];
|
||||
if (region.begin == begin && region.size == size) {
|
||||
if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
|
||||
removed = true;
|
||||
uptr last_index = root_regions->size() - 1;
|
||||
(*root_regions)[i] = (*root_regions)[last_index];
|
||||
|
@ -22,8 +22,23 @@
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "sanitizer_common/sanitizer_symbolizer.h"
|
||||
|
||||
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) && (SANITIZER_WORDSIZE == 64) \
|
||||
&& (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
|
||||
// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
|
||||
// supported for Linux only. Also, LSan doesn't like 32 bit architectures
|
||||
// because of "small" (4 bytes) pointer size that leads to high false negative
|
||||
// ratio on large leaks. But we still want to have it for some 32 bit arches
|
||||
// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
|
||||
// To enable LeakSanitizer on new architecture, one need to implement
|
||||
// internal_clone function as well as (probably) adjust TLS machinery for
|
||||
// new architecture inside sanitizer library.
|
||||
#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
|
||||
(SANITIZER_WORDSIZE == 64) && \
|
||||
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
#elif defined(__i386__) && \
|
||||
(SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
#elif defined(__arm__) && \
|
||||
SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
#define CAN_SANITIZE_LEAKS 1
|
||||
#else
|
||||
#define CAN_SANITIZE_LEAKS 0
|
||||
@ -44,6 +59,8 @@ enum ChunkTag {
|
||||
kIgnored = 3
|
||||
};
|
||||
|
||||
const u32 kInvalidTid = (u32) -1;
|
||||
|
||||
struct Flags {
|
||||
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
||||
#include "lsan_flags.inc"
|
||||
@ -101,12 +118,22 @@ typedef InternalMmapVector<uptr> Frontier;
|
||||
void InitializePlatformSpecificModules();
|
||||
void ProcessGlobalRegions(Frontier *frontier);
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier);
|
||||
|
||||
struct RootRegion {
|
||||
uptr begin;
|
||||
uptr size;
|
||||
};
|
||||
|
||||
InternalMmapVector<RootRegion> const *GetRootRegions();
|
||||
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
|
||||
uptr region_begin, uptr region_end, uptr prot);
|
||||
// Run stoptheworld while holding any platform-specific locks.
|
||||
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
|
||||
|
||||
void ScanRangeForPointers(uptr begin, uptr end,
|
||||
Frontier *frontier,
|
||||
const char *region_type, ChunkTag tag);
|
||||
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
|
||||
|
||||
enum IgnoreObjectResult {
|
||||
kIgnoreObjectSuccess,
|
||||
@ -117,6 +144,7 @@ enum IgnoreObjectResult {
|
||||
// Functions called from the parent tool.
|
||||
void InitCommonLsan();
|
||||
void DoLeakCheck();
|
||||
void DisableCounterUnderflow();
|
||||
bool DisabledInThisThread();
|
||||
|
||||
// Used to implement __lsan::ScopedDisabler.
|
||||
@ -129,15 +157,38 @@ struct ScopedInterceptorDisabler {
|
||||
~ScopedInterceptorDisabler() { EnableInThisThread(); }
|
||||
};
|
||||
|
||||
// Special case for "new T[0]" where T is a type with DTOR.
|
||||
// new T[0] will allocate one word for the array size (0) and store a pointer
|
||||
// to the end of allocated chunk.
|
||||
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
|
||||
uptr addr) {
|
||||
// According to Itanium C++ ABI array cookie is a one word containing
|
||||
// size of allocated array.
|
||||
static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
|
||||
uptr addr) {
|
||||
return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
|
||||
*reinterpret_cast<uptr *>(chunk_beg) == 0;
|
||||
}
|
||||
|
||||
// According to ARM C++ ABI array cookie consists of two words:
|
||||
// struct array_cookie {
|
||||
// std::size_t element_size; // element_size != 0
|
||||
// std::size_t element_count;
|
||||
// };
|
||||
static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
|
||||
uptr addr) {
|
||||
return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
|
||||
*reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
|
||||
}
|
||||
|
||||
// Special case for "new T[0]" where T is a type with DTOR.
|
||||
// new T[0] will allocate a cookie (one or two words) for the array size (0)
|
||||
// and store a pointer to the end of allocated chunk. The actual cookie layout
|
||||
// varies between platforms according to their C++ ABI implementation.
|
||||
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
|
||||
uptr addr) {
|
||||
#if defined(__arm__)
|
||||
return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
|
||||
#else
|
||||
return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
|
||||
#endif
|
||||
}
|
||||
|
||||
// The following must be implemented in the parent tool.
|
||||
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg);
|
||||
@ -151,10 +202,10 @@ bool WordIsPoisoned(uptr addr);
|
||||
// Wrappers for ThreadRegistry access.
|
||||
void LockThreadRegistry();
|
||||
void UnlockThreadRegistry();
|
||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls);
|
||||
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
|
||||
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
||||
void *arg);
|
||||
// If called from the main thread, updates the main thread's TID in the thread
|
||||
// registry. We need this to handle processes that fork() without a subsequent
|
||||
@ -170,6 +221,10 @@ uptr PointsIntoChunk(void *p);
|
||||
uptr GetUserBegin(uptr chunk);
|
||||
// Helper for __lsan_ignore_object().
|
||||
IgnoreObjectResult IgnoreObjectLocked(const void *p);
|
||||
|
||||
// Return the linker module, if valid for the platform.
|
||||
LoadedModule *GetLinker();
|
||||
|
||||
// Wrapper for chunk metadata operations.
|
||||
class LsanMetadata {
|
||||
public:
|
||||
|
@ -34,6 +34,17 @@ static bool IsLinker(const char* full_name) {
|
||||
return LibraryNameIs(full_name, kLinkerName);
|
||||
}
|
||||
|
||||
__attribute__((tls_model("initial-exec")))
|
||||
THREADLOCAL int disable_counter;
|
||||
bool DisabledInThisThread() { return disable_counter > 0; }
|
||||
void DisableInThisThread() { disable_counter++; }
|
||||
void EnableInThisThread() {
|
||||
if (disable_counter == 0) {
|
||||
DisableCounterUnderflow();
|
||||
}
|
||||
disable_counter--;
|
||||
}
|
||||
|
||||
void InitializePlatformSpecificModules() {
|
||||
ListOfModules modules;
|
||||
modules.init();
|
||||
@ -67,20 +78,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
|
||||
continue;
|
||||
uptr begin = info->dlpi_addr + phdr->p_vaddr;
|
||||
uptr end = begin + phdr->p_memsz;
|
||||
uptr allocator_begin = 0, allocator_end = 0;
|
||||
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
|
||||
if (begin <= allocator_begin && allocator_begin < end) {
|
||||
CHECK_LE(allocator_begin, allocator_end);
|
||||
CHECK_LE(allocator_end, end);
|
||||
if (begin < allocator_begin)
|
||||
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
|
||||
kReachable);
|
||||
if (allocator_end < end)
|
||||
ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
|
||||
kReachable);
|
||||
} else {
|
||||
ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
|
||||
}
|
||||
ScanGlobalRange(begin, end, frontier);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -91,70 +89,9 @@ void ProcessGlobalRegions(Frontier *frontier) {
|
||||
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
|
||||
}
|
||||
|
||||
static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
|
||||
CHECK(stack_id);
|
||||
StackTrace stack = map->Get(stack_id);
|
||||
// The top frame is our malloc/calloc/etc. The next frame is the caller.
|
||||
if (stack.size >= 2)
|
||||
return stack.trace[1];
|
||||
return 0;
|
||||
}
|
||||
LoadedModule *GetLinker() { return linker; }
|
||||
|
||||
struct ProcessPlatformAllocParam {
|
||||
Frontier *frontier;
|
||||
StackDepotReverseMap *stack_depot_reverse_map;
|
||||
bool skip_linker_allocations;
|
||||
};
|
||||
|
||||
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
|
||||
// reachable. Marks them as reachable and adds them to the frontier.
|
||||
static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
|
||||
CHECK(arg);
|
||||
ProcessPlatformAllocParam *param =
|
||||
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
|
||||
chunk = GetUserBegin(chunk);
|
||||
LsanMetadata m(chunk);
|
||||
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
|
||||
u32 stack_id = m.stack_trace_id();
|
||||
uptr caller_pc = 0;
|
||||
if (stack_id > 0)
|
||||
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
|
||||
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
|
||||
// it as reachable, as we can't properly report its allocation stack anyway.
|
||||
if (caller_pc == 0 || (param->skip_linker_allocations &&
|
||||
linker->containsAddress(caller_pc))) {
|
||||
m.set_tag(kReachable);
|
||||
param->frontier->push_back(chunk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handles dynamically allocated TLS blocks by treating all chunks allocated
|
||||
// from ld-linux.so as reachable.
|
||||
// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
|
||||
// They are allocated with a __libc_memalign() call in allocate_and_init()
|
||||
// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
|
||||
// blocks, but we can make sure they come from our own allocator by intercepting
|
||||
// __libc_memalign(). On top of that, there is no easy way to reach them. Their
|
||||
// addresses are stored in a dynamically allocated array (the DTV) which is
|
||||
// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
|
||||
// being reachable from the static TLS, and the dynamic TLS being reachable from
|
||||
// the DTV. This is because the initial DTV is allocated before our interception
|
||||
// mechanism kicks in, and thus we don't recognize it as allocated memory. We
|
||||
// can't special-case it either, since we don't know its size.
|
||||
// Our solution is to include in the root set all allocations made from
|
||||
// ld-linux.so (which is where allocate_and_init() is implemented). This is
|
||||
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
|
||||
// which we don't care about).
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
StackDepotReverseMap stack_depot_reverse_map;
|
||||
ProcessPlatformAllocParam arg;
|
||||
arg.frontier = frontier;
|
||||
arg.stack_depot_reverse_map = &stack_depot_reverse_map;
|
||||
arg.skip_linker_allocations =
|
||||
flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
|
||||
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
|
||||
}
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
|
||||
|
||||
struct DoStopTheWorldParam {
|
||||
StopTheWorldCallback callback;
|
||||
|
173
contrib/compiler-rt/lib/lsan/lsan_common_mac.cc
Normal file
173
contrib/compiler-rt/lib/lsan/lsan_common_mac.cc
Normal file
@ -0,0 +1,173 @@
|
||||
//=-- lsan_common_mac.cc --------------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of LeakSanitizer.
|
||||
// Implementation of common leak checking functionality. Darwin-specific code.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#include "lsan_common.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
|
||||
|
||||
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
||||
#include "lsan_allocator.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
#include <mach/mach.h>
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
typedef struct {
|
||||
int disable_counter;
|
||||
u32 current_thread_id;
|
||||
AllocatorCache cache;
|
||||
} thread_local_data_t;
|
||||
|
||||
static pthread_key_t key;
|
||||
static pthread_once_t key_once = PTHREAD_ONCE_INIT;
|
||||
|
||||
// The main thread destructor requires the current thread id,
|
||||
// so we can't destroy it until it's been used and reset to invalid tid
|
||||
void restore_tid_data(void *ptr) {
|
||||
thread_local_data_t *data = (thread_local_data_t *)ptr;
|
||||
if (data->current_thread_id != kInvalidTid)
|
||||
pthread_setspecific(key, data);
|
||||
}
|
||||
|
||||
static void make_tls_key() {
|
||||
CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
|
||||
}
|
||||
|
||||
static thread_local_data_t *get_tls_val(bool alloc) {
|
||||
pthread_once(&key_once, make_tls_key);
|
||||
|
||||
thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
|
||||
if (ptr == NULL && alloc) {
|
||||
ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
|
||||
ptr->disable_counter = 0;
|
||||
ptr->current_thread_id = kInvalidTid;
|
||||
ptr->cache = AllocatorCache();
|
||||
pthread_setspecific(key, ptr);
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
bool DisabledInThisThread() {
|
||||
thread_local_data_t *data = get_tls_val(false);
|
||||
return data ? data->disable_counter > 0 : false;
|
||||
}
|
||||
|
||||
void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
|
||||
|
||||
void EnableInThisThread() {
|
||||
int *disable_counter = &get_tls_val(true)->disable_counter;
|
||||
if (*disable_counter == 0) {
|
||||
DisableCounterUnderflow();
|
||||
}
|
||||
--*disable_counter;
|
||||
}
|
||||
|
||||
u32 GetCurrentThread() {
|
||||
thread_local_data_t *data = get_tls_val(false);
|
||||
CHECK(data);
|
||||
return data->current_thread_id;
|
||||
}
|
||||
|
||||
void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
|
||||
|
||||
AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
|
||||
|
||||
LoadedModule *GetLinker() { return nullptr; }
|
||||
|
||||
// Required on Linux for initialization of TLS behavior, but should not be
|
||||
// required on Darwin.
|
||||
void InitializePlatformSpecificModules() {
|
||||
if (flags()->use_tls) {
|
||||
Report("use_tls=1 is not supported on Darwin.\n");
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
// Scans global variables for heap pointers.
|
||||
void ProcessGlobalRegions(Frontier *frontier) {
|
||||
MemoryMappingLayout memory_mapping(false);
|
||||
InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
|
||||
memory_mapping.DumpListOfModules(&modules);
|
||||
for (uptr i = 0; i < modules.size(); ++i) {
|
||||
// Even when global scanning is disabled, we still need to scan
|
||||
// system libraries for stashed pointers
|
||||
if (!flags()->use_globals && modules[i].instrumented()) continue;
|
||||
|
||||
for (const __sanitizer::LoadedModule::AddressRange &range :
|
||||
modules[i].ranges()) {
|
||||
if (range.executable || !range.readable) continue;
|
||||
|
||||
ScanGlobalRange(range.beg, range.end, frontier);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
mach_port_name_t port;
|
||||
if (task_for_pid(mach_task_self(), internal_getpid(), &port)
|
||||
!= KERN_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned depth = 1;
|
||||
vm_size_t size = 0;
|
||||
vm_address_t address = 0;
|
||||
kern_return_t err = KERN_SUCCESS;
|
||||
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
|
||||
|
||||
InternalMmapVector<RootRegion> const *root_regions = GetRootRegions();
|
||||
|
||||
while (err == KERN_SUCCESS) {
|
||||
struct vm_region_submap_info_64 info;
|
||||
err = vm_region_recurse_64(port, &address, &size, &depth,
|
||||
(vm_region_info_t)&info, &count);
|
||||
|
||||
uptr end_address = address + size;
|
||||
|
||||
// libxpc stashes some pointers in the Kernel Alloc Once page,
|
||||
// make sure not to report those as leaks.
|
||||
if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
|
||||
ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
|
||||
kReachable);
|
||||
}
|
||||
|
||||
// This additional root region scan is required on Darwin in order to
|
||||
// detect root regions contained within mmap'd memory regions, because
|
||||
// the Darwin implementation of sanitizer_procmaps traverses images
|
||||
// as loaded by dyld, and not the complete set of all memory regions.
|
||||
//
|
||||
// TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
|
||||
// behavior as sanitizer_procmaps_linux and traverses all memory regions
|
||||
if (flags()->use_root_regions) {
|
||||
for (uptr i = 0; i < root_regions->size(); i++) {
|
||||
ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
|
||||
info.protection);
|
||||
}
|
||||
}
|
||||
|
||||
address = end_address;
|
||||
}
|
||||
}
|
||||
|
||||
void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
|
||||
StopTheWorld(callback, argument);
|
||||
}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC
|
@ -30,7 +30,7 @@ LSAN_FLAG(bool, use_globals, true,
|
||||
"Root set: include global variables (.data and .bss)")
|
||||
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
|
||||
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
|
||||
LSAN_FLAG(bool, use_tls, true,
|
||||
LSAN_FLAG(bool, use_tls, !SANITIZER_MAC,
|
||||
"Root set: include TLS and thread-specific storage")
|
||||
LSAN_FLAG(bool, use_root_regions, true,
|
||||
"Root set: include regions added via __lsan_register_root_region().")
|
||||
|
@ -21,12 +21,15 @@
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
#include "sanitizer_common/sanitizer_platform_interceptors.h"
|
||||
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
|
||||
#include "sanitizer_common/sanitizer_posix.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
#include "lsan.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_common.h"
|
||||
#include "lsan_thread.h"
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
using namespace __lsan;
|
||||
|
||||
extern "C" {
|
||||
@ -37,29 +40,22 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
|
||||
int pthread_setspecific(unsigned key, const void *v);
|
||||
}
|
||||
|
||||
#define ENSURE_LSAN_INITED do { \
|
||||
CHECK(!lsan_init_is_running); \
|
||||
if (!lsan_inited) \
|
||||
__lsan_init(); \
|
||||
} while (0)
|
||||
|
||||
///// Malloc/free interceptors. /////
|
||||
|
||||
const bool kAlwaysClearMemory = true;
|
||||
|
||||
namespace std {
|
||||
struct nothrow_t;
|
||||
}
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
INTERCEPTOR(void*, malloc, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return Allocate(stack, size, 1, kAlwaysClearMemory);
|
||||
return lsan_malloc(size, stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, free, void *p) {
|
||||
ENSURE_LSAN_INITED;
|
||||
Deallocate(p);
|
||||
lsan_free(p);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||
@ -77,28 +73,42 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
size *= nmemb;
|
||||
return Allocate(stack, size, 1, true);
|
||||
return lsan_calloc(nmemb, size, stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, realloc, void *q, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return Reallocate(stack, q, size, 1);
|
||||
return lsan_realloc(q, size, stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
*memptr = lsan_memalign(alignment, size, stack);
|
||||
// FIXME: Return ENOMEM if user requested more than max alloc size.
|
||||
return 0;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, valloc, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return lsan_valloc(size, stack);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMALIGN
|
||||
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return Allocate(stack, size, alignment, kAlwaysClearMemory);
|
||||
return lsan_memalign(alignment, size, stack);
|
||||
}
|
||||
#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
|
||||
|
||||
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
|
||||
void *res = lsan_memalign(alignment, size, stack);
|
||||
DTLS_on_libc_memalign(res, size);
|
||||
return res;
|
||||
}
|
||||
@ -108,32 +118,27 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
|
||||
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
|
||||
#endif // SANITIZER_INTERCEPT_MEMALIGN
|
||||
|
||||
#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
|
||||
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return Allocate(stack, size, alignment, kAlwaysClearMemory);
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
|
||||
// FIXME: Return ENOMEM if user requested more than max alloc size.
|
||||
return 0;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, valloc, uptr size) {
|
||||
ENSURE_LSAN_INITED;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
if (size == 0)
|
||||
size = GetPageSizeCached();
|
||||
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
|
||||
return lsan_memalign(alignment, size, stack);
|
||||
}
|
||||
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
|
||||
#else
|
||||
#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
|
||||
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
|
||||
ENSURE_LSAN_INITED;
|
||||
return GetMallocUsableSize(ptr);
|
||||
}
|
||||
#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
|
||||
INTERCEPT_FUNCTION(malloc_usable_size)
|
||||
#else
|
||||
#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
|
||||
struct fake_mallinfo {
|
||||
@ -186,13 +191,13 @@ INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
|
||||
return Allocate(stack, size, 1, kAlwaysClearMemory);
|
||||
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
void *operator new(uptr size) { OPERATOR_NEW_BODY; }
|
||||
void *operator new(size_t size) { OPERATOR_NEW_BODY; }
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
|
||||
void *operator new[](size_t size) { OPERATOR_NEW_BODY; }
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
|
||||
void *operator new(size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
|
||||
INTERCEPTOR_ATTRIBUTE
|
||||
void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
|
||||
void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
|
||||
|
||||
#define OPERATOR_DELETE_BODY \
|
||||
ENSURE_LSAN_INITED; \
|
||||
@ -277,7 +282,8 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
|
||||
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
|
||||
}
|
||||
if (res == 0) {
|
||||
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
|
||||
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
|
||||
IsStateDetached(detached));
|
||||
CHECK_NE(tid, 0);
|
||||
atomic_store(&p.tid, tid, memory_order_release);
|
||||
while (atomic_load(&p.tid, memory_order_acquire) != 0)
|
||||
@ -307,11 +313,11 @@ void InitializeInterceptors() {
|
||||
INTERCEPT_FUNCTION(realloc);
|
||||
LSAN_MAYBE_INTERCEPT_MEMALIGN;
|
||||
LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
|
||||
INTERCEPT_FUNCTION(aligned_alloc);
|
||||
LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
|
||||
INTERCEPT_FUNCTION(posix_memalign);
|
||||
INTERCEPT_FUNCTION(valloc);
|
||||
LSAN_MAYBE_INTERCEPT_PVALLOC;
|
||||
INTERCEPT_FUNCTION(malloc_usable_size);
|
||||
LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
|
||||
LSAN_MAYBE_INTERCEPT_MALLINFO;
|
||||
LSAN_MAYBE_INTERCEPT_MALLOPT;
|
||||
INTERCEPT_FUNCTION(pthread_create);
|
||||
|
33
contrib/compiler-rt/lib/lsan/lsan_linux.cc
Normal file
33
contrib/compiler-rt/lib/lsan/lsan_linux.cc
Normal file
@ -0,0 +1,33 @@
|
||||
//=-- lsan_linux.cc -------------------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of LeakSanitizer. Linux-specific code.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
|
||||
#include "lsan_allocator.h"
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
static THREADLOCAL u32 current_thread_tid = kInvalidTid;
|
||||
u32 GetCurrentThread() { return current_thread_tid; }
|
||||
void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
|
||||
|
||||
static THREADLOCAL AllocatorCache allocator_cache;
|
||||
AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
|
||||
|
||||
void ReplaceSystemMalloc() {}
|
||||
|
||||
} // namespace __lsan
|
||||
|
||||
#endif // SANITIZER_LINUX
|
55
contrib/compiler-rt/lib/lsan/lsan_malloc_mac.cc
Normal file
55
contrib/compiler-rt/lib/lsan/lsan_malloc_mac.cc
Normal file
@ -0,0 +1,55 @@
|
||||
//===-- lsan_malloc_mac.cc ------------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of LeakSanitizer (LSan), a memory leak detector.
|
||||
//
|
||||
// Mac-specific malloc interception.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_MAC
|
||||
|
||||
#include "lsan.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_thread.h"
|
||||
|
||||
using namespace __lsan;
|
||||
#define COMMON_MALLOC_ZONE_NAME "lsan"
|
||||
#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
|
||||
#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
|
||||
#define COMMON_MALLOC_FORCE_LOCK()
|
||||
#define COMMON_MALLOC_FORCE_UNLOCK()
|
||||
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = lsan_memalign(alignment, size, stack)
|
||||
#define COMMON_MALLOC_MALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = lsan_malloc(size, stack)
|
||||
#define COMMON_MALLOC_REALLOC(ptr, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = lsan_realloc(ptr, size, stack)
|
||||
#define COMMON_MALLOC_CALLOC(count, size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = lsan_calloc(count, size, stack)
|
||||
#define COMMON_MALLOC_VALLOC(size) \
|
||||
GET_STACK_TRACE_MALLOC; \
|
||||
void *p = lsan_valloc(size, stack)
|
||||
#define COMMON_MALLOC_FREE(ptr) \
|
||||
lsan_free(ptr)
|
||||
#define COMMON_MALLOC_SIZE(ptr) \
|
||||
uptr size = lsan_mz_size(ptr)
|
||||
#define COMMON_MALLOC_FILL_STATS(zone, stats)
|
||||
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
|
||||
(void)zone_name; \
|
||||
Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
|
||||
#define COMMON_MALLOC_NAMESPACE __lsan
|
||||
|
||||
#include "sanitizer_common/sanitizer_malloc_mac.inc"
|
||||
|
||||
#endif // SANITIZER_MAC
|
@ -19,13 +19,11 @@
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
#include "sanitizer_common/sanitizer_tls_get_addr.h"
|
||||
#include "lsan_allocator.h"
|
||||
#include "lsan_common.h"
|
||||
|
||||
namespace __lsan {
|
||||
|
||||
const u32 kInvalidTid = (u32) -1;
|
||||
|
||||
static ThreadRegistry *thread_registry;
|
||||
static THREADLOCAL u32 current_thread_tid = kInvalidTid;
|
||||
|
||||
static ThreadContextBase *CreateThreadContext(u32 tid) {
|
||||
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
|
||||
@ -41,14 +39,6 @@ void InitializeThreadRegistry() {
|
||||
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
|
||||
}
|
||||
|
||||
u32 GetCurrentThread() {
|
||||
return current_thread_tid;
|
||||
}
|
||||
|
||||
void SetCurrentThread(u32 tid) {
|
||||
current_thread_tid = tid;
|
||||
}
|
||||
|
||||
ThreadContext::ThreadContext(int tid)
|
||||
: ThreadContextBase(tid),
|
||||
stack_begin_(0),
|
||||
@ -87,7 +77,7 @@ u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
|
||||
/* arg */ nullptr);
|
||||
}
|
||||
|
||||
void ThreadStart(u32 tid, uptr os_id) {
|
||||
void ThreadStart(u32 tid, tid_t os_id) {
|
||||
OnStartedArgs args;
|
||||
uptr stack_size = 0;
|
||||
uptr tls_size = 0;
|
||||
@ -97,11 +87,12 @@ void ThreadStart(u32 tid, uptr os_id) {
|
||||
args.tls_end = args.tls_begin + tls_size;
|
||||
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
|
||||
args.dtls = DTLS_Get();
|
||||
thread_registry->StartThread(tid, os_id, &args);
|
||||
thread_registry->StartThread(tid, os_id, /*workerthread*/ false, &args);
|
||||
}
|
||||
|
||||
void ThreadFinish() {
|
||||
thread_registry->FinishThread(GetCurrentThread());
|
||||
SetCurrentThread(kInvalidTid);
|
||||
}
|
||||
|
||||
ThreadContext *CurrentThreadContext() {
|
||||
@ -136,7 +127,7 @@ void EnsureMainThreadIDIsCorrect() {
|
||||
|
||||
///// Interface to the common LSan module. /////
|
||||
|
||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
ThreadContext *context = static_cast<ThreadContext *>(
|
||||
@ -152,7 +143,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
return true;
|
||||
}
|
||||
|
||||
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
|
||||
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
|
||||
void *arg) {
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ class ThreadContext : public ThreadContextBase {
|
||||
|
||||
void InitializeThreadRegistry();
|
||||
|
||||
void ThreadStart(u32 tid, uptr os_id);
|
||||
void ThreadStart(u32 tid, tid_t os_id);
|
||||
void ThreadFinish();
|
||||
u32 ThreadCreate(u32 tid, uptr uid, bool detached);
|
||||
void ThreadJoin(u32 tid);
|
||||
|
2
contrib/compiler-rt/lib/lsan/weak_symbols.txt
Normal file
2
contrib/compiler-rt/lib/lsan/weak_symbols.txt
Normal file
@ -0,0 +1,2 @@
|
||||
___lsan_default_suppressions
|
||||
___lsan_is_turned_off
|
@ -123,14 +123,6 @@ static void *AllocateFromLocalPool(uptr size_in_bytes) {
|
||||
#define CHECK_UNPOISONED_STRING(x, n) \
|
||||
CHECK_UNPOISONED_STRING_OF_LEN((x), internal_strlen(x), (n))
|
||||
|
||||
INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {
|
||||
ENSURE_MSAN_INITED();
|
||||
SIZE_T res = REAL(fread)(ptr, size, nmemb, file);
|
||||
if (res > 0)
|
||||
__msan_unpoison(ptr, res *size);
|
||||
return res;
|
||||
}
|
||||
|
||||
#if !SANITIZER_FREEBSD
|
||||
INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb,
|
||||
void *file) {
|
||||
@ -580,6 +572,13 @@ INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
|
||||
ENSURE_MSAN_INITED();
|
||||
SIZE_T res = REAL(wcsnlen)(s, n);
|
||||
CHECK_UNPOISONED(s, sizeof(wchar_t) * Min(res + 1, n));
|
||||
return res;
|
||||
}
|
||||
|
||||
// wchar_t *wcschr(const wchar_t *wcs, wchar_t wc);
|
||||
INTERCEPTOR(wchar_t *, wcschr, void *s, wchar_t wc, void *ps) {
|
||||
ENSURE_MSAN_INITED();
|
||||
@ -597,6 +596,18 @@ INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) {
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src,
|
||||
SIZE_T n) { // NOLINT
|
||||
ENSURE_MSAN_INITED();
|
||||
GET_STORE_STACK_TRACE;
|
||||
SIZE_T copy_size = REAL(wcsnlen)(src, n);
|
||||
if (copy_size < n) copy_size++; // trailing \0
|
||||
wchar_t *res = REAL(wcsncpy)(dest, src, n); // NOLINT
|
||||
CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack);
|
||||
__msan_unpoison(dest + copy_size, (n - copy_size) * sizeof(wchar_t));
|
||||
return res;
|
||||
}
|
||||
|
||||
// wchar_t *wmemcpy(wchar_t *dest, const wchar_t *src, SIZE_T n);
|
||||
INTERCEPTOR(wchar_t *, wmemcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) {
|
||||
ENSURE_MSAN_INITED();
|
||||
@ -1565,8 +1576,10 @@ void InitializeInterceptors() {
|
||||
INTERCEPT_FUNCTION(mbtowc);
|
||||
INTERCEPT_FUNCTION(mbrtowc);
|
||||
INTERCEPT_FUNCTION(wcslen);
|
||||
INTERCEPT_FUNCTION(wcsnlen);
|
||||
INTERCEPT_FUNCTION(wcschr);
|
||||
INTERCEPT_FUNCTION(wcscpy);
|
||||
INTERCEPT_FUNCTION(wcsncpy);
|
||||
INTERCEPT_FUNCTION(wcscmp);
|
||||
INTERCEPT_FUNCTION(getenv);
|
||||
INTERCEPT_FUNCTION(setenv);
|
||||
|
@ -153,7 +153,17 @@ INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
|
||||
VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
|
||||
INSTR_PROF_COMMA
|
||||
VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
|
||||
#ifndef VALUE_RANGE_PROF
|
||||
VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
|
||||
#else /* VALUE_RANGE_PROF */
|
||||
VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
|
||||
INSTR_PROF_COMMA
|
||||
VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
|
||||
INSTR_PROF_COMMA
|
||||
VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeLast, Type::getInt64Ty(Ctx)) \
|
||||
INSTR_PROF_COMMA
|
||||
VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
|
||||
#endif /*VALUE_RANGE_PROF */
|
||||
#undef VALUE_PROF_FUNC_PARAM
|
||||
#undef INSTR_PROF_COMMA
|
||||
/* VALUE_PROF_FUNC_PARAM end */
|
||||
@ -174,13 +184,15 @@ VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
|
||||
* name hash and the function address.
|
||||
*/
|
||||
VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0)
|
||||
/* For memory intrinsic functions size profiling. */
|
||||
VALUE_PROF_KIND(IPVK_MemOPSize, 1)
|
||||
/* These two kinds must be the last to be
|
||||
* declared. This is to make sure the string
|
||||
* array created with the template can be
|
||||
* indexed with the kind value.
|
||||
*/
|
||||
VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget)
|
||||
VALUE_PROF_KIND(IPVK_Last, IPVK_IndirectCallTarget)
|
||||
VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize)
|
||||
|
||||
#undef VALUE_PROF_KIND
|
||||
/* VALUE_PROF_KIND end */
|
||||
@ -234,6 +246,31 @@ COVMAP_HEADER(uint32_t, Int32Ty, Version, \
|
||||
/* COVMAP_HEADER end. */
|
||||
|
||||
|
||||
#ifdef INSTR_PROF_SECT_ENTRY
|
||||
#define INSTR_PROF_DATA_DEFINED
|
||||
INSTR_PROF_SECT_ENTRY(IPSK_data, \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_DATA_COFF), "__DATA,")
|
||||
INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COFF), "__DATA,")
|
||||
INSTR_PROF_SECT_ENTRY(IPSK_name, \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_NAME_COFF), "__DATA,")
|
||||
INSTR_PROF_SECT_ENTRY(IPSK_vals, \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_VALS_COFF), "__DATA,")
|
||||
INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COFF), "__DATA,")
|
||||
INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COFF), "__LLVM_COV,")
|
||||
|
||||
#undef INSTR_PROF_SECT_ENTRY
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef INSTR_PROF_VALUE_PROF_DATA
|
||||
#define INSTR_PROF_DATA_DEFINED
|
||||
|
||||
@ -610,17 +647,47 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
|
||||
* specified via command line. */
|
||||
#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
|
||||
|
||||
/* section name strings common to all targets other
|
||||
than WIN32 */
|
||||
#define INSTR_PROF_DATA_COMMON __llvm_prf_data
|
||||
#define INSTR_PROF_NAME_COMMON __llvm_prf_names
|
||||
#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
|
||||
#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
|
||||
#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
|
||||
#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
|
||||
/* Win32 */
|
||||
#define INSTR_PROF_DATA_COFF .lprfd
|
||||
#define INSTR_PROF_NAME_COFF .lprfn
|
||||
#define INSTR_PROF_CNTS_COFF .lprfc
|
||||
#define INSTR_PROF_VALS_COFF .lprfv
|
||||
#define INSTR_PROF_VNODES_COFF .lprfnd
|
||||
#define INSTR_PROF_COVMAP_COFF .lcovmap
|
||||
|
||||
#ifdef _WIN32
|
||||
/* Runtime section names and name strings. */
|
||||
#define INSTR_PROF_DATA_SECT_NAME __llvm_prf_data
|
||||
#define INSTR_PROF_NAME_SECT_NAME __llvm_prf_names
|
||||
#define INSTR_PROF_CNTS_SECT_NAME __llvm_prf_cnts
|
||||
#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
|
||||
#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
|
||||
#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
|
||||
/* Array of pointers. Each pointer points to a list
|
||||
* of value nodes associated with one value site.
|
||||
*/
|
||||
#define INSTR_PROF_VALS_SECT_NAME __llvm_prf_vals
|
||||
#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
|
||||
/* Value profile nodes section. */
|
||||
#define INSTR_PROF_VNODES_SECT_NAME __llvm_prf_vnds
|
||||
#define INSTR_PROF_COVMAP_SECT_NAME __llvm_covmap
|
||||
#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
|
||||
#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
|
||||
#else
|
||||
/* Runtime section names and name strings. */
|
||||
#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COMMON
|
||||
#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COMMON
|
||||
#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COMMON
|
||||
/* Array of pointers. Each pointer points to a list
|
||||
* of value nodes associated with one value site.
|
||||
*/
|
||||
#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COMMON
|
||||
/* Value profile nodes section. */
|
||||
#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COMMON
|
||||
#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COMMON
|
||||
#endif
|
||||
|
||||
#define INSTR_PROF_DATA_SECT_NAME_STR \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_DATA_SECT_NAME)
|
||||
@ -649,6 +716,9 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
|
||||
#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
|
||||
#define INSTR_PROF_VALUE_PROF_FUNC_STR \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
|
||||
#define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
|
||||
#define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
|
||||
INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
|
||||
|
||||
/* InstrProfile per-function control data alignment. */
|
||||
#define INSTR_PROF_DATA_ALIGNMENT 8
|
||||
|
@ -172,6 +172,16 @@ static int doProfileMerging(FILE *ProfileFile) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Create the directory holding the file, if needed. */
|
||||
static void createProfileDir(const char *Filename) {
|
||||
size_t Length = strlen(Filename);
|
||||
if (lprofFindFirstDirSeparator(Filename)) {
|
||||
char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
|
||||
strncpy(Copy, Filename, Length + 1);
|
||||
__llvm_profile_recursive_mkdir(Copy);
|
||||
}
|
||||
}
|
||||
|
||||
/* Open the profile data for merging. It opens the file in r+b mode with
|
||||
* file locking. If the file has content which is compatible with the
|
||||
* current process, it also reads in the profile data in the file and merge
|
||||
@ -184,6 +194,7 @@ static FILE *openFileForMerging(const char *ProfileFileName) {
|
||||
FILE *ProfileFile;
|
||||
int rc;
|
||||
|
||||
createProfileDir(ProfileFileName);
|
||||
ProfileFile = lprofOpenFileEx(ProfileFileName);
|
||||
if (!ProfileFile)
|
||||
return NULL;
|
||||
@ -233,18 +244,13 @@ static void truncateCurrentFile(void) {
|
||||
if (!Filename)
|
||||
return;
|
||||
|
||||
/* Create the directory holding the file, if needed. */
|
||||
if (lprofFindFirstDirSeparator(Filename)) {
|
||||
char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
|
||||
strncpy(Copy, Filename, Length + 1);
|
||||
__llvm_profile_recursive_mkdir(Copy);
|
||||
}
|
||||
|
||||
/* By pass file truncation to allow online raw profile
|
||||
* merging. */
|
||||
if (lprofCurFilename.MergePoolSize)
|
||||
return;
|
||||
|
||||
createProfileDir(Filename);
|
||||
|
||||
/* Truncate the file. Later we'll reopen and append. */
|
||||
File = fopen(Filename, "w");
|
||||
if (!File)
|
||||
@ -524,6 +530,7 @@ int __llvm_profile_write_file(void) {
|
||||
int rc, Length;
|
||||
const char *Filename;
|
||||
char *FilenameBuf;
|
||||
int PDeathSig = 0;
|
||||
|
||||
if (lprofProfileDumped()) {
|
||||
PROF_NOTE("Profile data not written to file: %s.\n",
|
||||
@ -550,10 +557,18 @@ int __llvm_profile_write_file(void) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Temporarily suspend getting SIGKILL when the parent exits.
|
||||
PDeathSig = lprofSuspendSigKill();
|
||||
|
||||
/* Write profile data to the file. */
|
||||
rc = writeFile(Filename);
|
||||
if (rc)
|
||||
PROF_ERR("Failed to write file \"%s\": %s\n", Filename, strerror(errno));
|
||||
|
||||
// Restore SIGKILL.
|
||||
if (PDeathSig == 1)
|
||||
lprofRestoreSigKill();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,11 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#if defined(__linux__)
|
||||
#include <signal.h>
|
||||
#include <sys/prctl.h>
|
||||
#endif
|
||||
|
||||
COMPILER_RT_VISIBILITY
|
||||
void __llvm_profile_recursive_mkdir(char *path) {
|
||||
int i;
|
||||
@ -219,3 +224,21 @@ COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
|
||||
#endif
|
||||
return Sep;
|
||||
}
|
||||
|
||||
COMPILER_RT_VISIBILITY int lprofSuspendSigKill() {
|
||||
#if defined(__linux__)
|
||||
int PDeachSig = 0;
|
||||
/* Temporarily suspend getting SIGKILL upon exit of the parent process. */
|
||||
if (prctl(PR_GET_PDEATHSIG, &PDeachSig) == 0 && PDeachSig == SIGKILL)
|
||||
prctl(PR_SET_PDEATHSIG, 0);
|
||||
return (PDeachSig == SIGKILL);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
COMPILER_RT_VISIBILITY void lprofRestoreSigKill() {
|
||||
#if defined(__linux__)
|
||||
prctl(PR_SET_PDEATHSIG, SIGKILL);
|
||||
#endif
|
||||
}
|
||||
|
@ -51,4 +51,12 @@ int lprofGetHostName(char *Name, int Len);
|
||||
unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV);
|
||||
void *lprofPtrFetchAdd(void **Mem, long ByteIncr);
|
||||
|
||||
/* Temporarily suspend SIGKILL. Return value of 1 means a restore is needed.
|
||||
* Other return values mean no restore is needed.
|
||||
*/
|
||||
int lprofSuspendSigKill();
|
||||
|
||||
/* Restore previously suspended SIGKILL. */
|
||||
void lprofRestoreSigKill();
|
||||
|
||||
#endif /* PROFILE_INSTRPROFILINGUTIL_H */
|
||||
|
@ -219,6 +219,35 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The target values are partitioned into multiple regions/ranges. There is one
|
||||
* contiguous region which is precise -- every value in the range is tracked
|
||||
* individually. A value outside the precise region will be collapsed into one
|
||||
* value depending on the region it falls in.
|
||||
*
|
||||
* There are three regions:
|
||||
* 1. (-inf, PreciseRangeStart) and (PreciseRangeLast, LargeRangeValue) belong
|
||||
* to one region -- all values here should be mapped to one value of
|
||||
* "PreciseRangeLast + 1".
|
||||
* 2. [PreciseRangeStart, PreciseRangeLast]
|
||||
* 3. Large values: [LargeValue, +inf) maps to one value of LargeValue.
|
||||
*
|
||||
* The range for large values is optional. The default value of INT64_MIN
|
||||
* indicates it is not specified.
|
||||
*/
|
||||
COMPILER_RT_VISIBILITY void __llvm_profile_instrument_range(
|
||||
uint64_t TargetValue, void *Data, uint32_t CounterIndex,
|
||||
int64_t PreciseRangeStart, int64_t PreciseRangeLast, int64_t LargeValue) {
|
||||
|
||||
if (LargeValue != INT64_MIN && (int64_t)TargetValue >= LargeValue)
|
||||
TargetValue = LargeValue;
|
||||
else if ((int64_t)TargetValue < PreciseRangeStart ||
|
||||
(int64_t)TargetValue > PreciseRangeLast)
|
||||
TargetValue = PreciseRangeLast + 1;
|
||||
|
||||
__llvm_profile_instrument_target(TargetValue, Data, CounterIndex);
|
||||
}
|
||||
|
||||
/*
|
||||
* A wrapper struct that represents value profile runtime data.
|
||||
* Like InstrProfRecord class which is used by profiling host tools,
|
||||
|
@ -15,10 +15,9 @@
|
||||
#include "sanitizer_flag_parser.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if !SANITIZER_LINUX
|
||||
// other platforms do not have weak symbols out of the box.
|
||||
extern "C" const char* __sancov_default_options() { return ""; }
|
||||
#endif
|
||||
SANITIZER_INTERFACE_WEAK_DEF(const char*, __sancov_default_options, void) {
|
||||
return "";
|
||||
}
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
|
@ -32,9 +32,9 @@ inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
|
||||
|
||||
void InitializeSancovFlags();
|
||||
|
||||
} // namespace __sancov
|
||||
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
|
||||
__sancov_default_options();
|
||||
|
||||
} // namespace __sancov
|
||||
|
||||
#endif
|
||||
|
@ -34,13 +34,12 @@ SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
|
||||
void (*free_hook)(const void *));
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
/* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
|
||||
void __sanitizer_malloc_hook(void *ptr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
/* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
|
||||
void __sanitizer_free_hook(void *ptr);
|
||||
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_print_memory_profile(int top_percent);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
|
||||
|
@ -45,10 +45,10 @@ struct SizeClassAllocator64LocalCache {
|
||||
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(c, allocator, class_id);
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
CHECK_GT(c->count, 0);
|
||||
CompactPtrT chunk = c->chunks[--c->count];
|
||||
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
|
||||
@ -62,8 +62,8 @@ struct SizeClassAllocator64LocalCache {
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
InitCache();
|
||||
stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
CHECK_NE(c->max_count, 0UL);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
Drain(c, allocator, class_id, c->max_count / 2);
|
||||
@ -85,6 +85,7 @@ struct SizeClassAllocator64LocalCache {
|
||||
struct PerClass {
|
||||
u32 count;
|
||||
u32 max_count;
|
||||
uptr class_size;
|
||||
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
|
||||
};
|
||||
PerClass per_class_[kNumClasses];
|
||||
@ -96,13 +97,14 @@ struct SizeClassAllocator64LocalCache {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
|
||||
c->class_size = Allocator::ClassIdToSize(i);
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
InitCache();
|
||||
uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
|
||||
uptr num_requested_chunks = c->max_count / 2;
|
||||
allocator->GetFromAllocator(&stats_, class_id, c->chunks,
|
||||
num_requested_chunks);
|
||||
c->count = num_requested_chunks;
|
||||
@ -141,10 +143,10 @@ struct SizeClassAllocator32LocalCache {
|
||||
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0))
|
||||
Refill(allocator, class_id);
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
void *res = c->batch[--c->count];
|
||||
PREFETCH(c->batch[c->count - 1]);
|
||||
return res;
|
||||
@ -156,8 +158,8 @@ struct SizeClassAllocator32LocalCache {
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
InitCache();
|
||||
stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
|
||||
PerClass *c = &per_class_[class_id];
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
CHECK_NE(c->max_count, 0UL);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
Drain(allocator, class_id);
|
||||
@ -177,6 +179,7 @@ struct SizeClassAllocator32LocalCache {
|
||||
struct PerClass {
|
||||
uptr count;
|
||||
uptr max_count;
|
||||
uptr class_size;
|
||||
void *batch[2 * TransferBatch::kMaxNumCached];
|
||||
};
|
||||
PerClass per_class_[kNumClasses];
|
||||
@ -188,6 +191,7 @@ struct SizeClassAllocator32LocalCache {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
c->max_count = 2 * TransferBatch::MaxCached(i);
|
||||
c->class_size = Allocator::ClassIdToSize(i);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,23 +199,24 @@ const char *StripModuleName(const char *module) {
|
||||
return module;
|
||||
}
|
||||
|
||||
void ReportErrorSummary(const char *error_message) {
|
||||
void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
InternalScopedString buff(kMaxSummaryLength);
|
||||
buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
|
||||
buff.append("SUMMARY: %s: %s",
|
||||
alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
|
||||
__sanitizer_report_error_summary(buff.data());
|
||||
}
|
||||
|
||||
#if !SANITIZER_GO
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
|
||||
const char *alt_tool_name) {
|
||||
if (!common_flags()->print_summary) return;
|
||||
InternalScopedString buff(kMaxSummaryLength);
|
||||
buff.append("%s ", error_type);
|
||||
RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
|
||||
common_flags()->strip_path_prefix);
|
||||
ReportErrorSummary(buff.data());
|
||||
ReportErrorSummary(buff.data(), alt_tool_name);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -283,9 +284,10 @@ void LoadedModule::clear() {
|
||||
}
|
||||
}
|
||||
|
||||
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
|
||||
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
|
||||
bool readable) {
|
||||
void *mem = InternalAlloc(sizeof(AddressRange));
|
||||
AddressRange *r = new(mem) AddressRange(beg, end, executable);
|
||||
AddressRange *r = new(mem) AddressRange(beg, end, executable, readable);
|
||||
ranges_.push_back(r);
|
||||
if (executable && end > max_executable_address_)
|
||||
max_executable_address_ = end;
|
||||
@ -489,7 +491,8 @@ void __sanitizer_set_report_fd(void *fd) {
|
||||
report_file.fd_pid = internal_getpid();
|
||||
}
|
||||
|
||||
void __sanitizer_report_error_summary(const char *error_summary) {
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary,
|
||||
const char *error_summary) {
|
||||
Printf("%s\n", error_summary);
|
||||
}
|
||||
|
||||
@ -504,11 +507,4 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
|
||||
void (*free_hook)(const void *)) {
|
||||
return InstallMallocFreeHooks(malloc_hook, free_hook);
|
||||
}
|
||||
|
||||
#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_print_memory_profile(int top_percent) {
|
||||
(void)top_percent;
|
||||
}
|
||||
#endif
|
||||
} // extern "C"
|
||||
|
@ -72,7 +72,7 @@ INLINE uptr GetPageSizeCached() {
|
||||
uptr GetMmapGranularity();
|
||||
uptr GetMaxVirtualAddress();
|
||||
// Threads
|
||||
uptr GetTid();
|
||||
tid_t GetTid();
|
||||
uptr GetThreadSelf();
|
||||
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
|
||||
uptr *stack_bottom);
|
||||
@ -382,6 +382,7 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
|
||||
typedef void (*SignalHandlerType)(int, void *, void *);
|
||||
bool IsHandledDeadlySignal(int signum);
|
||||
void InstallDeadlySignalHandlers(SignalHandlerType handler);
|
||||
const char *DescribeSignalOrException(int signo);
|
||||
// Alternative signal stack (POSIX-only).
|
||||
void SetAlternateSignalStack();
|
||||
void UnsetAlternateSignalStack();
|
||||
@ -391,12 +392,16 @@ const int kMaxSummaryLength = 1024;
|
||||
// Construct a one-line string:
|
||||
// SUMMARY: SanitizerToolName: error_message
|
||||
// and pass it to __sanitizer_report_error_summary.
|
||||
void ReportErrorSummary(const char *error_message);
|
||||
// If alt_tool_name is provided, it's used in place of SanitizerToolName.
|
||||
void ReportErrorSummary(const char *error_message,
|
||||
const char *alt_tool_name = nullptr);
|
||||
// Same as above, but construct error_message as:
|
||||
// error_type file:line[:column][ function]
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info);
|
||||
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
|
||||
const char *alt_tool_name = nullptr);
|
||||
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
|
||||
void ReportErrorSummary(const char *error_type, const StackTrace *trace);
|
||||
void ReportErrorSummary(const char *error_type, const StackTrace *trace,
|
||||
const char *alt_tool_name = nullptr);
|
||||
|
||||
// Math
|
||||
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
|
||||
@ -712,7 +717,7 @@ class LoadedModule {
|
||||
void set(const char *module_name, uptr base_address, ModuleArch arch,
|
||||
u8 uuid[kModuleUUIDSize], bool instrumented);
|
||||
void clear();
|
||||
void addAddressRange(uptr beg, uptr end, bool executable);
|
||||
void addAddressRange(uptr beg, uptr end, bool executable, bool readable);
|
||||
bool containsAddress(uptr address) const;
|
||||
|
||||
const char *full_name() const { return full_name_; }
|
||||
@ -727,9 +732,14 @@ class LoadedModule {
|
||||
uptr beg;
|
||||
uptr end;
|
||||
bool executable;
|
||||
bool readable;
|
||||
|
||||
AddressRange(uptr beg, uptr end, bool executable)
|
||||
: next(nullptr), beg(beg), end(end), executable(executable) {}
|
||||
AddressRange(uptr beg, uptr end, bool executable, bool readable)
|
||||
: next(nullptr),
|
||||
beg(beg),
|
||||
end(end),
|
||||
executable(executable),
|
||||
readable(readable) {}
|
||||
};
|
||||
|
||||
const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
|
||||
@ -910,6 +920,8 @@ struct StackDepotStats {
|
||||
// indicate that sanitizer allocator should not attempt to release memory to OS.
|
||||
const s32 kReleaseToOSIntervalNever = -1;
|
||||
|
||||
void CheckNoDeepBind(const char *filename, int flag);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
inline void *operator new(__sanitizer::operator_new_size_type size,
|
||||
|
@ -24,7 +24,8 @@
|
||||
// COMMON_INTERCEPTOR_SET_THREAD_NAME
|
||||
// COMMON_INTERCEPTOR_ON_DLOPEN
|
||||
// COMMON_INTERCEPTOR_ON_EXIT
|
||||
// COMMON_INTERCEPTOR_MUTEX_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_UNLOCK
|
||||
// COMMON_INTERCEPTOR_MUTEX_REPAIR
|
||||
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
|
||||
@ -44,15 +45,9 @@
|
||||
#include <stdarg.h>
|
||||
|
||||
#if SANITIZER_INTERCEPTOR_HOOKS
|
||||
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \
|
||||
do { \
|
||||
if (f) \
|
||||
f(__VA_ARGS__); \
|
||||
} while (false);
|
||||
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
|
||||
extern "C" { \
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__); \
|
||||
} // extern "C"
|
||||
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
|
||||
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, f, __VA_ARGS__) {}
|
||||
#else
|
||||
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...)
|
||||
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)
|
||||
@ -95,8 +90,12 @@ bool PlatformHasDifferentMemcpyAndMemmove();
|
||||
#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) {}
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
|
||||
#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
|
||||
@ -140,15 +139,13 @@ bool PlatformHasDifferentMemcpyAndMemmove();
|
||||
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
|
||||
#endif
|
||||
|
||||
#define COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n) \
|
||||
COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
|
||||
common_flags()->strict_string_checks ? (len) + 1 : (n) )
|
||||
|
||||
#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \
|
||||
COMMON_INTERCEPTOR_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
|
||||
COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
|
||||
common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) )
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
|
||||
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) {}
|
||||
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
|
||||
CheckNoDeepBind(filename, flag);
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
|
||||
@ -450,8 +447,7 @@ static inline void StrstrCheck(void *ctx, char *r, const char *s1,
|
||||
const char *s2) {
|
||||
uptr len1 = REAL(strlen)(s1);
|
||||
uptr len2 = REAL(strlen)(s2);
|
||||
COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s1, len1,
|
||||
r ? r - s1 + len2 : len1 + 1);
|
||||
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
|
||||
}
|
||||
#endif
|
||||
@ -500,6 +496,52 @@ INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
|
||||
#define INIT_STRCASESTR
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_STRTOK
|
||||
|
||||
INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strtok, str, delimiters);
|
||||
if (!common_flags()->intercept_strtok) {
|
||||
return REAL(strtok)(str, delimiters);
|
||||
}
|
||||
if (common_flags()->strict_string_checks) {
|
||||
// If strict_string_checks is enabled, we check the whole first argument
|
||||
// string on the first call (strtok saves this string in a static buffer
|
||||
// for subsequent calls). We do not need to check strtok's result.
|
||||
// As the delimiters can change, we check them every call.
|
||||
if (str != nullptr) {
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
|
||||
}
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
|
||||
REAL(strlen)(delimiters) + 1);
|
||||
return REAL(strtok)(str, delimiters);
|
||||
} else {
|
||||
// However, when strict_string_checks is disabled we cannot check the
|
||||
// whole string on the first call. Instead, we check the result string
|
||||
// which is guaranteed to be a NULL-terminated substring of the first
|
||||
// argument. We also conservatively check one character of str and the
|
||||
// delimiters.
|
||||
if (str != nullptr) {
|
||||
COMMON_INTERCEPTOR_READ_STRING(ctx, str, 1);
|
||||
}
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
|
||||
char *result = REAL(strtok)(str, delimiters);
|
||||
if (result != nullptr) {
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
|
||||
} else if (str != nullptr) {
|
||||
// No delimiter were found, it's safe to assume that the entire str was
|
||||
// scanned.
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
#define INIT_STRTOK COMMON_INTERCEPT_FUNCTION(strtok)
|
||||
#else
|
||||
#define INIT_STRTOK
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMMEM
|
||||
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
|
||||
const void *s1, SIZE_T len1, const void *s2,
|
||||
@ -531,10 +573,11 @@ INTERCEPTOR(char*, strchr, const char *s, int c) {
|
||||
return internal_strchr(s, c);
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c);
|
||||
char *result = REAL(strchr)(s, c);
|
||||
uptr len = internal_strlen(s);
|
||||
uptr n = result ? result - s + 1 : len + 1;
|
||||
if (common_flags()->intercept_strchr)
|
||||
COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n);
|
||||
if (common_flags()->intercept_strchr) {
|
||||
// Keep strlen as macro argument, as macro may ignore it.
|
||||
COMMON_INTERCEPTOR_READ_STRING(ctx, s,
|
||||
(result ? result - s : REAL(strlen)(s)) + 1);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
#define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr)
|
||||
@ -563,9 +606,8 @@ INTERCEPTOR(char*, strrchr, const char *s, int c) {
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
|
||||
return internal_strrchr(s, c);
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
|
||||
uptr len = internal_strlen(s);
|
||||
if (common_flags()->intercept_strchr)
|
||||
COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, len + 1);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
|
||||
return REAL(strrchr)(s, c);
|
||||
}
|
||||
#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
|
||||
@ -842,6 +884,23 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
|
||||
#define INIT_READ
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_FREAD
|
||||
INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {
|
||||
// libc file streams can call user-supplied functions, see fopencookie.
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, file);
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
SIZE_T res = REAL(fread)(ptr, size, nmemb, file);
|
||||
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res * size);
|
||||
return res;
|
||||
}
|
||||
#define INIT_FREAD COMMON_INTERCEPT_FUNCTION(fread)
|
||||
#else
|
||||
#define INIT_FREAD
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_PREAD
|
||||
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
|
||||
void *ctx;
|
||||
@ -942,6 +1001,20 @@ INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
|
||||
#define INIT_WRITE
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_FWRITE
|
||||
INTERCEPTOR(SIZE_T, fwrite, const void *p, uptr size, uptr nmemb, void *file) {
|
||||
// libc file streams can call user-supplied functions, see fopencookie.
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, file);
|
||||
SIZE_T res = REAL(fwrite)(p, size, nmemb, file);
|
||||
if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, p, res * size);
|
||||
return res;
|
||||
}
|
||||
#define INIT_FWRITE COMMON_INTERCEPT_FUNCTION(fwrite)
|
||||
#else
|
||||
#define INIT_FWRITE
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_PWRITE
|
||||
INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
|
||||
void *ctx;
|
||||
@ -3251,6 +3324,30 @@ INTERCEPTOR(char *, strerror, int errnum) {
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_STRERROR_R
|
||||
// There are 2 versions of strerror_r:
|
||||
// * POSIX version returns 0 on success, negative error code on failure,
|
||||
// writes message to buf.
|
||||
// * GNU version returns message pointer, which points to either buf or some
|
||||
// static storage.
|
||||
#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
|
||||
SANITIZER_MAC
|
||||
// POSIX version. Spec is not clear on whether buf is NULL-terminated.
|
||||
// At least on OSX, buf contents are valid even when the call fails.
|
||||
INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
int res = REAL(strerror_r)(errnum, buf, buflen);
|
||||
|
||||
SIZE_T sz = internal_strnlen(buf, buflen);
|
||||
if (sz < buflen) ++sz;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
|
||||
return res;
|
||||
}
|
||||
#else
|
||||
// GNU version.
|
||||
INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
|
||||
@ -3258,24 +3355,11 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
char *res = REAL(strerror_r)(errnum, buf, buflen);
|
||||
// There are 2 versions of strerror_r:
|
||||
// * POSIX version returns 0 on success, negative error code on failure,
|
||||
// writes message to buf.
|
||||
// * GNU version returns message pointer, which points to either buf or some
|
||||
// static storage.
|
||||
SIZE_T posix_res = (SIZE_T)res;
|
||||
if (posix_res < 1024 || posix_res > (SIZE_T) - 1024) {
|
||||
// POSIX version. Spec is not clear on whether buf is NULL-terminated.
|
||||
// At least on OSX, buf contents are valid even when the call fails.
|
||||
SIZE_T sz = internal_strnlen(buf, buflen);
|
||||
if (sz < buflen) ++sz;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
|
||||
} else {
|
||||
// GNU version.
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
|
||||
}
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
|
||||
return res;
|
||||
}
|
||||
#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
|
||||
//SANITIZER_MAC
|
||||
#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);
|
||||
#else
|
||||
#define INIT_STRERROR_R
|
||||
@ -3414,7 +3498,8 @@ INTERCEPTOR(int, getgroups, int size, u32 *lst) {
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
int res = REAL(getgroups)(size, lst);
|
||||
if (res && lst) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
|
||||
if (res >= 0 && lst && size > 0)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
|
||||
return res;
|
||||
}
|
||||
#define INIT_GETGROUPS COMMON_INTERCEPT_FUNCTION(getgroups);
|
||||
@ -3669,11 +3754,12 @@ INTERCEPTOR(void, _exit, int status) {
|
||||
INTERCEPTOR(int, pthread_mutex_lock, void *m) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
|
||||
int res = REAL(pthread_mutex_lock)(m);
|
||||
if (res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
|
||||
if (res == 0 || res == errno_EOWNERDEAD)
|
||||
COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
|
||||
COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
|
||||
if (res == errno_EINVAL)
|
||||
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
|
||||
return res;
|
||||
@ -4547,7 +4633,7 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
|
||||
if (res != (SIZE_T) - 1 && outbuf && *outbuf > outbuf_orig) {
|
||||
if (outbuf && *outbuf > outbuf_orig) {
|
||||
SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, outbuf_orig, sz);
|
||||
}
|
||||
@ -4614,11 +4700,15 @@ void *__tls_get_addr_opt(void *arg);
|
||||
// descriptor offset as an argument instead of a pointer. GOT address
|
||||
// is passed in r12, so it's necessary to write it in assembly. This is
|
||||
// the function used by the compiler.
|
||||
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr_internal)
|
||||
extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
|
||||
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
|
||||
DEFINE_REAL(uptr, __tls_get_offset, void *arg)
|
||||
extern "C" uptr __tls_get_offset(void *arg);
|
||||
extern "C" uptr __interceptor___tls_get_offset(void *arg);
|
||||
INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
|
||||
uptr res = REAL(__tls_get_addr_internal)(arg);
|
||||
uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
|
||||
uptr tp = reinterpret_cast<uptr>(__builtin_thread_pointer());
|
||||
void *ptr = reinterpret_cast<void *>(res + tp);
|
||||
uptr tls_begin, tls_end;
|
||||
@ -4630,32 +4720,43 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
|
||||
}
|
||||
return res;
|
||||
}
|
||||
// We need a protected symbol aliasing the above, so that we can jump
|
||||
// We need a hidden symbol aliasing the above, so that we can jump
|
||||
// directly to it from the assembly below.
|
||||
extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
|
||||
visibility("protected")))
|
||||
uptr __interceptor___tls_get_addr_internal_protected(void *arg);
|
||||
visibility("hidden")))
|
||||
uptr __tls_get_addr_hidden(void *arg);
|
||||
// Now carefully intercept __tls_get_offset.
|
||||
asm(
|
||||
".text\n"
|
||||
".global __tls_get_offset\n"
|
||||
"__tls_get_offset:\n"
|
||||
// The __intercept_ version has to exist, so that gen_dynamic_list.py
|
||||
// exports our symbol.
|
||||
".weak __tls_get_offset\n"
|
||||
".type __tls_get_offset, @function\n"
|
||||
"__tls_get_offset:\n"
|
||||
".global __interceptor___tls_get_offset\n"
|
||||
".type __interceptor___tls_get_offset, @function\n"
|
||||
"__interceptor___tls_get_offset:\n"
|
||||
#ifdef __s390x__
|
||||
"la %r2, 0(%r2,%r12)\n"
|
||||
"jg __interceptor___tls_get_addr_internal_protected\n"
|
||||
"jg __tls_get_addr_hidden\n"
|
||||
#else
|
||||
"basr %r3,0\n"
|
||||
"0: la %r2,0(%r2,%r12)\n"
|
||||
"l %r4,1f-0b(%r3)\n"
|
||||
"b 0(%r4,%r3)\n"
|
||||
"1: .long __interceptor___tls_get_addr_internal_protected - 0b\n"
|
||||
"1: .long __tls_get_addr_hidden - 0b\n"
|
||||
#endif
|
||||
".type __tls_get_offset, @function\n"
|
||||
".size __tls_get_offset, .-__tls_get_offset\n"
|
||||
".size __interceptor___tls_get_offset, .-__interceptor___tls_get_offset\n"
|
||||
// Assembly wrapper to call REAL(__tls_get_offset)(arg)
|
||||
".type __tls_get_offset_wrapper, @function\n"
|
||||
"__tls_get_offset_wrapper:\n"
|
||||
#ifdef __s390x__
|
||||
"sgr %r2,%r12\n"
|
||||
#else
|
||||
"sr %r2,%r12\n"
|
||||
#endif
|
||||
"br %r3\n"
|
||||
".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
|
||||
);
|
||||
#endif // SANITIZER_S390
|
||||
#else
|
||||
@ -6026,6 +6127,21 @@ INTERCEPTOR(void *, getutxline, void *ut) {
|
||||
#define INIT_UTMPX
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_GETLOADAVG
|
||||
INTERCEPTOR(int, getloadavg, double *loadavg, int nelem) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, getloadavg, loadavg, nelem);
|
||||
int res = REAL(getloadavg)(loadavg, nelem);
|
||||
if (res > 0)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, loadavg, res * sizeof(*loadavg));
|
||||
return res;
|
||||
}
|
||||
#define INIT_GETLOADAVG \
|
||||
COMMON_INTERCEPT_FUNCTION(getloadavg);
|
||||
#else
|
||||
#define INIT_GETLOADAVG
|
||||
#endif
|
||||
|
||||
static void InitializeCommonInterceptors() {
|
||||
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
|
||||
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
|
||||
@ -6043,6 +6159,7 @@ static void InitializeCommonInterceptors() {
|
||||
INIT_STRCHRNUL;
|
||||
INIT_STRRCHR;
|
||||
INIT_STRSPN;
|
||||
INIT_STRTOK;
|
||||
INIT_STRPBRK;
|
||||
INIT_MEMSET;
|
||||
INIT_MEMMOVE;
|
||||
@ -6052,12 +6169,14 @@ static void InitializeCommonInterceptors() {
|
||||
INIT_MEMRCHR;
|
||||
INIT_MEMMEM;
|
||||
INIT_READ;
|
||||
INIT_FREAD;
|
||||
INIT_PREAD;
|
||||
INIT_PREAD64;
|
||||
INIT_READV;
|
||||
INIT_PREADV;
|
||||
INIT_PREADV64;
|
||||
INIT_WRITE;
|
||||
INIT_FWRITE;
|
||||
INIT_PWRITE;
|
||||
INIT_PWRITE64;
|
||||
INIT_WRITEV;
|
||||
@ -6224,4 +6343,5 @@ static void InitializeCommonInterceptors() {
|
||||
// FIXME: add other *stat interceptors.
|
||||
INIT_UTMP;
|
||||
INIT_UTMPX;
|
||||
INIT_GETLOADAVG;
|
||||
}
|
||||
|
@ -0,0 +1,39 @@
|
||||
//===-- sanitizer_common_interface.inc ------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Sanitizer Common interface list.
|
||||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
|
||||
// Sanitizer weak hooks
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_memcmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strcmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strncmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strstr)
|
||||
// Stacktrace interface.
|
||||
INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
|
||||
// Allocator interface.
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_heap_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_ownership)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
|
||||
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
|
@ -0,0 +1,14 @@
|
||||
//===-- sanitizer_common_interface_posix.inc ------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Sanitizer Common interface list only available for Posix systems.
|
||||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
|
@ -47,7 +47,8 @@ void SetSandboxingCallback(void (*f)()) {
|
||||
sandboxing_callback = f;
|
||||
}
|
||||
|
||||
void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
|
||||
void ReportErrorSummary(const char *error_type, const StackTrace *stack,
|
||||
const char *alt_tool_name) {
|
||||
#if !SANITIZER_GO
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
@ -59,7 +60,7 @@ void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
|
||||
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
|
||||
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
|
||||
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
|
||||
ReportErrorSummary(error_type, frame->info);
|
||||
ReportErrorSummary(error_type, frame->info, alt_tool_name);
|
||||
frame->ClearAll();
|
||||
#endif
|
||||
}
|
||||
@ -123,7 +124,7 @@ void BackgroundThread(void *arg) {
|
||||
if (heap_profile &&
|
||||
current_rss_mb > rss_during_last_reported_profile * 1.1) {
|
||||
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
|
||||
__sanitizer_print_memory_profile(90);
|
||||
__sanitizer_print_memory_profile(90, 20);
|
||||
rss_during_last_reported_profile = current_rss_mb;
|
||||
}
|
||||
}
|
||||
@ -162,8 +163,8 @@ void MaybeStartBackgroudThread() {
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
void NOINLINE
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
|
||||
__sanitizer_sandbox_arguments *args) {
|
||||
__sanitizer::PrepareForSandboxing(args);
|
||||
if (__sanitizer::sandboxing_callback)
|
||||
__sanitizer::sandboxing_callback();
|
||||
|
@ -0,0 +1,32 @@
|
||||
//===-- sanitizer_coverage_interface.inc ----------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Sanitizer Coverage interface list.
|
||||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_FUNCTION(__sanitizer_cov)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_dump)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
|
||||
INTERFACE_FUNCTION(__sanitizer_dump_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
|
||||
INTERFACE_WEAK_FUNCTION(__sancov_default_options)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp2)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp4)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp8)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div4)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div8)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
|
@ -57,12 +57,6 @@ static const u64 kMagic = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
|
||||
static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
|
||||
|
||||
static atomic_uintptr_t coverage_counter;
|
||||
static atomic_uintptr_t caller_callee_counter;
|
||||
|
||||
static void ResetGlobalCounters() {
|
||||
return atomic_store(&coverage_counter, 0, memory_order_relaxed);
|
||||
return atomic_store(&caller_callee_counter, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
// pc_array is the array containing the covered PCs.
|
||||
// To make the pc_array thread- and async-signal-safe it has to be large enough.
|
||||
@ -90,25 +84,14 @@ class CoverageData {
|
||||
void AfterFork(int child_pid);
|
||||
void Extend(uptr npcs);
|
||||
void Add(uptr pc, u32 *guard);
|
||||
void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
|
||||
uptr cache_size);
|
||||
void DumpCallerCalleePairs();
|
||||
void DumpTrace();
|
||||
void DumpAsBitSet();
|
||||
void DumpCounters();
|
||||
void DumpOffsets();
|
||||
void DumpAll();
|
||||
|
||||
ALWAYS_INLINE
|
||||
void TraceBasicBlock(u32 *id);
|
||||
|
||||
void InitializeGuardArray(s32 *guards);
|
||||
void InitializeGuards(s32 *guards, uptr n, const char *module_name,
|
||||
uptr caller_pc);
|
||||
void InitializeCounters(u8 *counters, uptr n);
|
||||
void ReinitializeGuards();
|
||||
uptr GetNumberOf8bitCounters();
|
||||
uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
|
||||
|
||||
uptr *data();
|
||||
uptr size() const;
|
||||
@ -150,33 +133,6 @@ class CoverageData {
|
||||
InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
|
||||
InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
|
||||
|
||||
struct CounterAndSize {
|
||||
u8 *counters;
|
||||
uptr n;
|
||||
};
|
||||
|
||||
InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
|
||||
uptr num_8bit_counters;
|
||||
|
||||
// Caller-Callee (cc) array, size and current index.
|
||||
static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
|
||||
uptr **cc_array;
|
||||
atomic_uintptr_t cc_array_index;
|
||||
atomic_uintptr_t cc_array_size;
|
||||
|
||||
// Tracing event array, size and current pointer.
|
||||
// We record all events (basic block entries) in a global buffer of u32
|
||||
// values. Each such value is the index in pc_array.
|
||||
// So far the tracing is highly experimental:
|
||||
// - not thread-safe;
|
||||
// - does not support long traces;
|
||||
// - not tuned for performance.
|
||||
static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
|
||||
u32 *tr_event_array;
|
||||
uptr tr_event_array_size;
|
||||
u32 *tr_event_pointer;
|
||||
static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
|
||||
|
||||
StaticSpinMutex mu;
|
||||
};
|
||||
|
||||
@ -213,23 +169,6 @@ void CoverageData::Enable() {
|
||||
} else {
|
||||
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
|
||||
}
|
||||
|
||||
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
|
||||
sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
|
||||
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
|
||||
atomic_store(&cc_array_index, 0, memory_order_relaxed);
|
||||
|
||||
// Allocate tr_event_array with a guard page at the end.
|
||||
tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
|
||||
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
|
||||
"CovInit::tr_event_array"));
|
||||
MprotectNoAccess(
|
||||
reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
|
||||
GetMmapGranularity());
|
||||
tr_event_array_size = kTrEventArrayMaxSize;
|
||||
tr_event_pointer = tr_event_array;
|
||||
|
||||
num_8bit_counters = 0;
|
||||
}
|
||||
|
||||
void CoverageData::InitializeGuardArray(s32 *guards) {
|
||||
@ -247,17 +186,6 @@ void CoverageData::Disable() {
|
||||
UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
|
||||
pc_array = nullptr;
|
||||
}
|
||||
if (cc_array) {
|
||||
UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
|
||||
cc_array = nullptr;
|
||||
}
|
||||
if (tr_event_array) {
|
||||
UnmapOrDie(tr_event_array,
|
||||
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
|
||||
GetMmapGranularity());
|
||||
tr_event_array = nullptr;
|
||||
tr_event_pointer = nullptr;
|
||||
}
|
||||
if (pc_fd != kInvalidFd) {
|
||||
CloseFile(pc_fd);
|
||||
pc_fd = kInvalidFd;
|
||||
@ -337,15 +265,6 @@ void CoverageData::Extend(uptr npcs) {
|
||||
atomic_store(&pc_array_size, size, memory_order_release);
|
||||
}
|
||||
|
||||
void CoverageData::InitializeCounters(u8 *counters, uptr n) {
|
||||
if (!counters) return;
|
||||
CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
|
||||
n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
|
||||
SpinMutexLock l(&mu);
|
||||
counters_vec.push_back({counters, n});
|
||||
num_8bit_counters += n;
|
||||
}
|
||||
|
||||
void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
|
||||
uptr range_end) {
|
||||
auto sym = Symbolizer::GetOrInit();
|
||||
@ -415,104 +334,11 @@ void CoverageData::Add(uptr pc, u32 *guard) {
|
||||
uptr idx = -guard_value - 1;
|
||||
if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
|
||||
return; // May happen after fork when pc_array_index becomes 0.
|
||||
CHECK_LT(idx * sizeof(uptr),
|
||||
atomic_load(&pc_array_size, memory_order_acquire));
|
||||
CHECK_LT(idx, atomic_load(&pc_array_size, memory_order_acquire));
|
||||
uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
|
||||
pc_array[idx] = BundlePcAndCounter(pc, counter);
|
||||
}
|
||||
|
||||
// Registers a pair caller=>callee.
|
||||
// When a given caller is seen for the first time, the callee_cache is added
|
||||
// to the global array cc_array, callee_cache[0] is set to caller and
|
||||
// callee_cache[1] is set to cache_size.
|
||||
// Then we are trying to add callee to callee_cache [2,cache_size) if it is
|
||||
// not there yet.
|
||||
// If the cache is full we drop the callee (may want to fix this later).
|
||||
void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
|
||||
uptr cache_size) {
|
||||
if (!cc_array) return;
|
||||
atomic_uintptr_t *atomic_callee_cache =
|
||||
reinterpret_cast<atomic_uintptr_t *>(callee_cache);
|
||||
uptr zero = 0;
|
||||
if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
|
||||
memory_order_seq_cst)) {
|
||||
uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
|
||||
CHECK_LT(idx * sizeof(uptr),
|
||||
atomic_load(&cc_array_size, memory_order_acquire));
|
||||
callee_cache[1] = cache_size;
|
||||
cc_array[idx] = callee_cache;
|
||||
}
|
||||
CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
|
||||
for (uptr i = 2; i < cache_size; i++) {
|
||||
uptr was = 0;
|
||||
if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
|
||||
memory_order_seq_cst)) {
|
||||
atomic_fetch_add(&caller_callee_counter, 1, memory_order_relaxed);
|
||||
return;
|
||||
}
|
||||
if (was == callee) // Already have this callee.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
uptr CoverageData::GetNumberOf8bitCounters() {
|
||||
return num_8bit_counters;
|
||||
}
|
||||
|
||||
// Map every 8bit counter to a 8-bit bitset and clear the counter.
|
||||
uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
|
||||
uptr num_new_bits = 0;
|
||||
uptr cur = 0;
|
||||
// For better speed we map 8 counters to 8 bytes of bitset at once.
|
||||
static const uptr kBatchSize = 8;
|
||||
CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
|
||||
for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
|
||||
u8 *c = counters_vec[i].counters;
|
||||
uptr n = counters_vec[i].n;
|
||||
CHECK_EQ(n % 16, 0);
|
||||
CHECK_EQ(cur % kBatchSize, 0);
|
||||
CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
|
||||
if (!bitset) {
|
||||
internal_bzero_aligned16(c, n);
|
||||
cur += n;
|
||||
continue;
|
||||
}
|
||||
for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
|
||||
CHECK_LT(cur, num_8bit_counters);
|
||||
u64 *pc64 = reinterpret_cast<u64*>(c + j);
|
||||
u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
|
||||
u64 c64 = *pc64;
|
||||
u64 old_bits_64 = *pb64;
|
||||
u64 new_bits_64 = old_bits_64;
|
||||
if (c64) {
|
||||
*pc64 = 0;
|
||||
for (uptr k = 0; k < kBatchSize; k++) {
|
||||
u64 x = (c64 >> (8 * k)) & 0xff;
|
||||
if (x) {
|
||||
u64 bit = 0;
|
||||
/**/ if (x >= 128) bit = 128;
|
||||
else if (x >= 32) bit = 64;
|
||||
else if (x >= 16) bit = 32;
|
||||
else if (x >= 8) bit = 16;
|
||||
else if (x >= 4) bit = 8;
|
||||
else if (x >= 3) bit = 4;
|
||||
else if (x >= 2) bit = 2;
|
||||
else if (x >= 1) bit = 1;
|
||||
u64 mask = bit << (8 * k);
|
||||
if (!(new_bits_64 & mask)) {
|
||||
num_new_bits++;
|
||||
new_bits_64 |= mask;
|
||||
}
|
||||
}
|
||||
}
|
||||
*pb64 = new_bits_64;
|
||||
}
|
||||
}
|
||||
}
|
||||
CHECK_EQ(cur, num_8bit_counters);
|
||||
return num_new_bits;
|
||||
}
|
||||
|
||||
uptr *CoverageData::data() {
|
||||
return pc_array;
|
||||
}
|
||||
@ -593,132 +419,6 @@ static fd_t CovOpenFile(InternalScopedString *path, bool packed,
|
||||
return fd;
|
||||
}
|
||||
|
||||
// Dump trace PCs and trace events into two separate files.
|
||||
void CoverageData::DumpTrace() {
|
||||
uptr max_idx = tr_event_pointer - tr_event_array;
|
||||
if (!max_idx) return;
|
||||
auto sym = Symbolizer::GetOrInit();
|
||||
if (!sym)
|
||||
return;
|
||||
InternalScopedString out(32 << 20);
|
||||
for (uptr i = 0, n = size(); i < n; i++) {
|
||||
const char *module_name = "<unknown>";
|
||||
uptr module_address = 0;
|
||||
sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
|
||||
&module_address);
|
||||
out.append("%s 0x%zx\n", module_name, module_address);
|
||||
}
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
fd_t fd = CovOpenFile(&path, false, "trace-points");
|
||||
if (fd == kInvalidFd) return;
|
||||
WriteToFile(fd, out.data(), out.length());
|
||||
CloseFile(fd);
|
||||
|
||||
fd = CovOpenFile(&path, false, "trace-compunits");
|
||||
if (fd == kInvalidFd) return;
|
||||
out.clear();
|
||||
for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
|
||||
out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
|
||||
WriteToFile(fd, out.data(), out.length());
|
||||
CloseFile(fd);
|
||||
|
||||
fd = CovOpenFile(&path, false, "trace-events");
|
||||
if (fd == kInvalidFd) return;
|
||||
uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
|
||||
u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
|
||||
// The trace file could be huge, and may not be written with a single syscall.
|
||||
while (bytes_to_write) {
|
||||
uptr actually_written;
|
||||
if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
|
||||
actually_written <= bytes_to_write) {
|
||||
bytes_to_write -= actually_written;
|
||||
event_bytes += actually_written;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
CloseFile(fd);
|
||||
VReport(1, " CovDump: Trace: %zd PCs written\n", size());
|
||||
VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
|
||||
}
|
||||
|
||||
// This function dumps the caller=>callee pairs into a file as a sequence of
|
||||
// lines like "module_name offset".
|
||||
void CoverageData::DumpCallerCalleePairs() {
|
||||
uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
|
||||
if (!max_idx) return;
|
||||
auto sym = Symbolizer::GetOrInit();
|
||||
if (!sym)
|
||||
return;
|
||||
InternalScopedString out(32 << 20);
|
||||
uptr total = 0;
|
||||
for (uptr i = 0; i < max_idx; i++) {
|
||||
uptr *cc_cache = cc_array[i];
|
||||
CHECK(cc_cache);
|
||||
uptr caller = cc_cache[0];
|
||||
uptr n_callees = cc_cache[1];
|
||||
const char *caller_module_name = "<unknown>";
|
||||
uptr caller_module_address = 0;
|
||||
sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
|
||||
&caller_module_address);
|
||||
for (uptr j = 2; j < n_callees; j++) {
|
||||
uptr callee = cc_cache[j];
|
||||
if (!callee) break;
|
||||
total++;
|
||||
const char *callee_module_name = "<unknown>";
|
||||
uptr callee_module_address = 0;
|
||||
sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
|
||||
&callee_module_address);
|
||||
out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
|
||||
caller_module_address, callee_module_name,
|
||||
callee_module_address);
|
||||
}
|
||||
}
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
fd_t fd = CovOpenFile(&path, false, "caller-callee");
|
||||
if (fd == kInvalidFd) return;
|
||||
WriteToFile(fd, out.data(), out.length());
|
||||
CloseFile(fd);
|
||||
VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
|
||||
}
|
||||
|
||||
// Record the current PC into the event buffer.
|
||||
// Every event is a u32 value (index in tr_pc_array_index) so we compute
|
||||
// it once and then cache in the provided 'cache' storage.
|
||||
//
|
||||
// This function will eventually be inlined by the compiler.
|
||||
void CoverageData::TraceBasicBlock(u32 *id) {
|
||||
// Will trap here if
|
||||
// 1. coverage is not enabled at run-time.
|
||||
// 2. The array tr_event_array is full.
|
||||
*tr_event_pointer = *id - 1;
|
||||
tr_event_pointer++;
|
||||
}
|
||||
|
||||
void CoverageData::DumpCounters() {
|
||||
if (!common_flags()->coverage_counters) return;
|
||||
uptr n = coverage_data.GetNumberOf8bitCounters();
|
||||
if (!n) return;
|
||||
InternalScopedBuffer<u8> bitset(n);
|
||||
coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
|
||||
InternalScopedString path(kMaxPathLength);
|
||||
|
||||
for (uptr m = 0; m < module_name_vec.size(); m++) {
|
||||
auto r = module_name_vec[m];
|
||||
CHECK(r.copied_module_name);
|
||||
CHECK_LE(r.beg, r.end);
|
||||
CHECK_LE(r.end, size());
|
||||
const char *base_name = StripModuleName(r.copied_module_name);
|
||||
fd_t fd =
|
||||
CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
|
||||
if (fd == kInvalidFd) return;
|
||||
WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
|
||||
CloseFile(fd);
|
||||
VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
|
||||
base_name);
|
||||
}
|
||||
}
|
||||
|
||||
void CoverageData::DumpAsBitSet() {
|
||||
if (!common_flags()->coverage_bitset) return;
|
||||
if (!size()) return;
|
||||
@ -866,10 +566,7 @@ void CoverageData::DumpAll() {
|
||||
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
|
||||
return;
|
||||
DumpAsBitSet();
|
||||
DumpCounters();
|
||||
DumpTrace();
|
||||
DumpOffsets();
|
||||
DumpCallerCalleePairs();
|
||||
}
|
||||
|
||||
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
|
||||
@ -940,12 +637,8 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
|
||||
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
|
||||
if (static_cast<s32>(
|
||||
__sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
|
||||
__sanitizer_cov(guard);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void
|
||||
__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
|
||||
coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
|
||||
callee, callee_cache16, 16);
|
||||
coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
|
||||
guard);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
|
||||
coverage_enabled = true;
|
||||
@ -954,15 +647,12 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
|
||||
coverage_data.DumpAll();
|
||||
#if SANITIZER_LINUX
|
||||
__sanitizer_dump_trace_pc_guard_coverage();
|
||||
#endif
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void
|
||||
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
|
||||
const char *comp_unit_name) {
|
||||
coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
|
||||
coverage_data.InitializeCounters(counters, npcs);
|
||||
if (!common_flags()->coverage_direct) return;
|
||||
if (SANITIZER_ANDROID && coverage_enabled) {
|
||||
// dlopen/dlclose interceptors do not work on Android, so we rely on
|
||||
@ -980,65 +670,15 @@ uptr __sanitizer_get_total_unique_coverage() {
|
||||
return atomic_load(&coverage_counter, memory_order_relaxed);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_total_unique_caller_callee_pairs() {
|
||||
return atomic_load(&caller_callee_counter, memory_order_relaxed);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_func_enter(u32 *id) {
|
||||
__sanitizer_cov_with_check(id);
|
||||
coverage_data.TraceBasicBlock(id);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_basic_block(u32 *id) {
|
||||
__sanitizer_cov_with_check(id);
|
||||
coverage_data.TraceBasicBlock(id);
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_reset_coverage() {
|
||||
ResetGlobalCounters();
|
||||
coverage_data.ReinitializeGuards();
|
||||
internal_bzero_aligned16(
|
||||
coverage_data.data(),
|
||||
RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
|
||||
}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_coverage_guards(uptr **data) {
|
||||
*data = coverage_data.data();
|
||||
return coverage_data.size();
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_number_of_counters() {
|
||||
return coverage_data.GetNumberOf8bitCounters();
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
|
||||
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
|
||||
}
|
||||
// Default empty implementations (weak). Users should redefine them.
|
||||
#if !SANITIZER_WINDOWS // weak does not work on Windows.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp1() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp2() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp4() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp8() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_switch() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div4() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div8() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_gep() {}
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_indir() {}
|
||||
#endif // !SANITIZER_WINDOWS
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
|
||||
} // extern "C"
|
||||
|
@ -156,14 +156,13 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
|
||||
return __sancov::SanitizerDumpCoverage(pcs, len);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_guard(u32* guard) {
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {
|
||||
if (!*guard) return;
|
||||
__sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_guard_init(u32* start, u32* end) {
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
|
||||
u32* start, u32* end) {
|
||||
if (start == end || *start) return;
|
||||
__sancov::pc_guard_controller.InitTracePcGuard(start, end);
|
||||
}
|
||||
|
@ -0,0 +1,21 @@
|
||||
//===-- sanitizer_coverage_win_dll_thunk.cc -------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines a family of thunks that should be statically linked into
|
||||
// the DLLs that have instrumentation in order to delegate the calls to the
|
||||
// shared runtime that lives in the main binary.
|
||||
// See https://github.com/google/sanitizers/issues/209 for the details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifdef SANITIZER_DLL_THUNK
|
||||
#include "sanitizer_win_dll_thunk.h"
|
||||
// Sanitizer Coverage interface functions.
|
||||
#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
|
||||
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
|
||||
#include "sanitizer_coverage_interface.inc"
|
||||
#endif // SANITIZER_DLL_THUNK
|
@ -0,0 +1,21 @@
|
||||
//===-- sanitizer_coverage_win_dynamic_runtime_thunk.cc -------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines things that need to be present in the application modules
|
||||
// to interact with Sanitizer Coverage, when it is included in a dll.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
|
||||
#define SANITIZER_IMPORT_INTERFACE 1
|
||||
#include "sanitizer_win_defs.h"
|
||||
// Define weak alias for all weak functions imported from sanitizer coverage.
|
||||
#define INTERFACE_FUNCTION(Name)
|
||||
#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
|
||||
#include "sanitizer_coverage_interface.inc"
|
||||
#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
|
@ -0,0 +1,22 @@
|
||||
//===-- sanitizer_coverage_win_sections.cc --------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines delimiters for Sanitizer Coverage's section.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
#if SANITIZER_WINDOWS
|
||||
#include <stdint.h>
|
||||
#pragma section(".SCOV$A", read, write) // NOLINT
|
||||
#pragma section(".SCOV$Z", read, write) // NOLINT
|
||||
extern "C" {
|
||||
__declspec(allocate(".SCOV$A")) uint32_t __start___sancov_guards = 0;
|
||||
__declspec(allocate(".SCOV$Z")) uint32_t __stop___sancov_guards = 0;
|
||||
}
|
||||
#endif // SANITIZER_WINDOWS
|
@ -0,0 +1,24 @@
|
||||
//===-- sanitizer_coverage_win_weak_interception.cc -----------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// This module should be included in Sanitizer Coverage when it implemented as a
|
||||
// shared library on Windows (dll), in order to delegate the calls of weak
|
||||
// functions to the implementation in the main executable when a strong
|
||||
// definition is provided.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifdef SANITIZER_DYNAMIC
|
||||
#include "sanitizer_win_weak_interception.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sancov_flags.h"
|
||||
// Check if strong definitions for weak functions are present in the main
|
||||
// executable. If that is the case, override dll functions to point to strong
|
||||
// implementations.
|
||||
#define INTERFACE_FUNCTION(Name)
|
||||
#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
|
||||
#include "sanitizer_coverage_interface.inc"
|
||||
#endif // SANITIZER_DYNAMIC
|
@ -62,7 +62,7 @@ COMMON_FLAG(
|
||||
COMMON_FLAG(
|
||||
int, verbosity, 0,
|
||||
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
|
||||
COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
|
||||
COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
|
||||
COMMON_FLAG(
|
||||
bool, leak_check_at_exit, true,
|
||||
"Invoke leak checking in an atexit handler. Has no effect if "
|
||||
@ -79,7 +79,9 @@ COMMON_FLAG(int, print_module_map, 0,
|
||||
"exits, 2 = print after each report.")
|
||||
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
|
||||
COMMON_FLAG(bool, handle_segv, true,
|
||||
"If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
|
||||
"If set, registers the tool's custom SIGSEGV handler.")
|
||||
COMMON_FLAG(bool, handle_sigbus, true,
|
||||
"If set, registers the tool's custom SIGBUS handler.")
|
||||
COMMON_FLAG(bool, handle_abort, false,
|
||||
"If set, registers the tool's custom SIGABRT handler.")
|
||||
COMMON_FLAG(bool, handle_sigill, false,
|
||||
@ -190,6 +192,9 @@ COMMON_FLAG(bool, intercept_strstr, true,
|
||||
COMMON_FLAG(bool, intercept_strspn, true,
|
||||
"If set, uses custom wrappers for strspn and strcspn function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strtok, true,
|
||||
"If set, uses a custom wrapper for the strtok function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strpbrk, true,
|
||||
"If set, uses custom wrappers for strpbrk function "
|
||||
"to find more errors.")
|
||||
|
@ -69,6 +69,32 @@ extern "C" {
|
||||
int __sanitizer_get_module_and_offset_for_pc(
|
||||
__sanitizer::uptr pc, char *module_path,
|
||||
__sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
|
||||
} // extern "C"
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_cmp8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_switch();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_div8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_gep();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_indir();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
|
||||
__sanitizer::u32*);
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_INTERFACE_INTERNAL_H
|
||||
|
@ -21,8 +21,11 @@
|
||||
|
||||
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
|
||||
#if SANITIZER_WINDOWS
|
||||
#if SANITIZER_IMPORT_INTERFACE
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllimport)
|
||||
#else
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
|
||||
// FIXME find out what we need on Windows, if anything.
|
||||
#endif
|
||||
# define SANITIZER_WEAK_ATTRIBUTE
|
||||
#elif SANITIZER_GO
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE
|
||||
@ -32,11 +35,46 @@
|
||||
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
|
||||
#endif
|
||||
|
||||
#if (SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_WINDOWS) && !SANITIZER_GO
|
||||
//--------------------------- WEAK FUNCTIONS ---------------------------------//
|
||||
// When working with weak functions, to simplify the code and make it more
|
||||
// portable, when possible define a default implementation using this macro:
|
||||
//
|
||||
// SANITIZER_INTERFACE_WEAK_DEF(<return_type>, <name>, <parameter list>)
|
||||
//
|
||||
// For example:
|
||||
// SANITIZER_INTERFACE_WEAK_DEF(bool, compare, int a, int b) { return a > b; }
|
||||
//
|
||||
#if SANITIZER_WINDOWS
|
||||
#include "sanitizer_win_defs.h"
|
||||
# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
|
||||
WIN_WEAK_EXPORT_DEF(ReturnType, Name, __VA_ARGS__)
|
||||
#else
|
||||
# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE \
|
||||
ReturnType Name(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
// SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that
|
||||
// will evaluate to a null pointer when not defined.
|
||||
#if (SANITIZER_LINUX || SANITIZER_MAC) && !SANITIZER_GO
|
||||
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
|
||||
#else
|
||||
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
|
||||
#endif
|
||||
// For some weak hooks that will be called very often and we want to avoid the
|
||||
// overhead of executing the default implementation when it is not necessary,
|
||||
// we can use the flag SANITIZER_SUPPORTS_WEAK_HOOKS to only define the default
|
||||
// implementation for platforms that doesn't support weak symbols. For example:
|
||||
//
|
||||
// #if !SANITIZER_SUPPORT_WEAK_HOOKS
|
||||
// SANITIZER_INTERFACE_WEAK_DEF(bool, compare_hook, int a, int b) {
|
||||
// return a > b;
|
||||
// }
|
||||
// #endif
|
||||
//
|
||||
// And then use it as: if (compare_hook) compare_hook(a, b);
|
||||
//----------------------------------------------------------------------------//
|
||||
|
||||
|
||||
// We can use .preinit_array section on Linux to call sanitizer initialization
|
||||
// functions very early in the process startup (unless PIC macro is defined).
|
||||
@ -114,6 +152,12 @@ typedef u32 operator_new_size_type;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if SANITIZER_MAC
|
||||
// On Darwin, thread IDs are 64-bit even on 32-bit systems.
|
||||
typedef u64 tid_t;
|
||||
#else
|
||||
typedef uptr tid_t;
|
||||
#endif
|
||||
|
||||
// ----------- ATTENTION -------------
|
||||
// This header should NOT include any other headers to avoid portability issues.
|
||||
@ -289,7 +333,12 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
|
||||
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
|
||||
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
#if SANITIZER_S390_31
|
||||
#define GET_CALLER_PC() \
|
||||
(__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
|
||||
#else
|
||||
#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
|
||||
#endif
|
||||
#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
|
||||
inline void Trap() {
|
||||
__builtin_trap();
|
||||
|
@ -77,6 +77,20 @@ extern char **environ; // provided by crt1
|
||||
#include <sys/signal.h>
|
||||
#endif
|
||||
|
||||
#ifndef __GLIBC_PREREQ
|
||||
#define __GLIBC_PREREQ(x, y) 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && __GLIBC_PREREQ(2, 16)
|
||||
# define SANITIZER_USE_GETAUXVAL 1
|
||||
#else
|
||||
# define SANITIZER_USE_GETAUXVAL 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_USE_GETAUXVAL
|
||||
#include <sys/auxv.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
// <linux/time.h>
|
||||
struct kernel_timeval {
|
||||
@ -370,7 +384,7 @@ bool FileExists(const char *filename) {
|
||||
return S_ISREG(st.st_mode);
|
||||
}
|
||||
|
||||
uptr GetTid() {
|
||||
tid_t GetTid() {
|
||||
#if SANITIZER_FREEBSD
|
||||
return (uptr)pthread_self();
|
||||
#else
|
||||
@ -805,6 +819,8 @@ uptr GetPageSize() {
|
||||
return 4096;
|
||||
#elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
|
||||
return EXEC_PAGESIZE;
|
||||
#elif SANITIZER_USE_GETAUXVAL
|
||||
return getauxval(AT_PAGESZ);
|
||||
#else
|
||||
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
|
||||
#endif
|
||||
@ -1097,36 +1113,50 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
long long res;
|
||||
/* Stack frame offsets. */
|
||||
#if _CALL_ELF != 2
|
||||
#define FRAME_MIN_SIZE 112
|
||||
#define FRAME_TOC_SAVE 40
|
||||
// Stack frame structure.
|
||||
#if SANITIZER_PPC64V1
|
||||
// Back chain == 0 (SP + 112)
|
||||
// Frame (112 bytes):
|
||||
// Parameter save area (SP + 48), 8 doublewords
|
||||
// TOC save area (SP + 40)
|
||||
// Link editor doubleword (SP + 32)
|
||||
// Compiler doubleword (SP + 24)
|
||||
// LR save area (SP + 16)
|
||||
// CR save area (SP + 8)
|
||||
// Back chain (SP + 0)
|
||||
# define FRAME_SIZE 112
|
||||
# define FRAME_TOC_SAVE_OFFSET 40
|
||||
#elif SANITIZER_PPC64V2
|
||||
// Back chain == 0 (SP + 32)
|
||||
// Frame (32 bytes):
|
||||
// TOC save area (SP + 24)
|
||||
// LR save area (SP + 16)
|
||||
// CR save area (SP + 8)
|
||||
// Back chain (SP + 0)
|
||||
# define FRAME_SIZE 32
|
||||
# define FRAME_TOC_SAVE_OFFSET 24
|
||||
#else
|
||||
#define FRAME_MIN_SIZE 32
|
||||
#define FRAME_TOC_SAVE 24
|
||||
# error "Unsupported PPC64 ABI"
|
||||
#endif
|
||||
if (!fn || !child_stack)
|
||||
return -EINVAL;
|
||||
CHECK_EQ(0, (uptr)child_stack % 16);
|
||||
child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
|
||||
((unsigned long long *)child_stack)[0] = (uptr)fn;
|
||||
((unsigned long long *)child_stack)[1] = (uptr)arg;
|
||||
|
||||
register int (*__fn)(void *) __asm__("r3") = fn;
|
||||
register void *__cstack __asm__("r4") = child_stack;
|
||||
register int __flags __asm__("r5") = flags;
|
||||
register void * __arg __asm__("r6") = arg;
|
||||
register int * __ptidptr __asm__("r7") = parent_tidptr;
|
||||
register void * __newtls __asm__("r8") = newtls;
|
||||
register int * __ctidptr __asm__("r9") = child_tidptr;
|
||||
register void *__arg __asm__("r6") = arg;
|
||||
register int *__ptidptr __asm__("r7") = parent_tidptr;
|
||||
register void *__newtls __asm__("r8") = newtls;
|
||||
register int *__ctidptr __asm__("r9") = child_tidptr;
|
||||
|
||||
__asm__ __volatile__(
|
||||
/* fn, arg, child_stack are saved acrVoss the syscall */
|
||||
/* fn and arg are saved across the syscall */
|
||||
"mr 28, %5\n\t"
|
||||
"mr 29, %6\n\t"
|
||||
"mr 27, %8\n\t"
|
||||
|
||||
/* syscall
|
||||
r0 == __NR_clone
|
||||
r3 == flags
|
||||
r4 == child_stack
|
||||
r5 == parent_tidptr
|
||||
@ -1144,15 +1174,21 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
"crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
|
||||
"bne- cr1, 1f\n\t"
|
||||
|
||||
/* Set up stack frame */
|
||||
"li 29, 0\n\t"
|
||||
"stdu 29, -8(1)\n\t"
|
||||
"stdu 1, -%12(1)\n\t"
|
||||
/* Do the function call */
|
||||
"std 2, %13(1)\n\t"
|
||||
#if _CALL_ELF != 2
|
||||
#if SANITIZER_PPC64V1
|
||||
"ld 0, 0(28)\n\t"
|
||||
"ld 2, 8(28)\n\t"
|
||||
"mtctr 0\n\t"
|
||||
#else
|
||||
#elif SANITIZER_PPC64V2
|
||||
"mr 12, 28\n\t"
|
||||
"mtctr 12\n\t"
|
||||
#else
|
||||
# error "Unsupported PPC64 ABI"
|
||||
#endif
|
||||
"mr 3, 27\n\t"
|
||||
"bctrl\n\t"
|
||||
@ -1166,13 +1202,151 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
"1:\n\t"
|
||||
"mr %0, 3\n\t"
|
||||
: "=r" (res)
|
||||
: "0" (-1), "i" (EINVAL),
|
||||
"i" (__NR_clone), "i" (__NR_exit),
|
||||
"r" (__fn), "r" (__cstack), "r" (__flags),
|
||||
"r" (__arg), "r" (__ptidptr), "r" (__newtls),
|
||||
"r" (__ctidptr), "i" (FRAME_MIN_SIZE), "i" (FRAME_TOC_SAVE)
|
||||
: "cr0", "cr1", "memory", "ctr",
|
||||
"r0", "r29", "r27", "r28");
|
||||
: "0" (-1),
|
||||
"i" (EINVAL),
|
||||
"i" (__NR_clone),
|
||||
"i" (__NR_exit),
|
||||
"r" (__fn),
|
||||
"r" (__cstack),
|
||||
"r" (__flags),
|
||||
"r" (__arg),
|
||||
"r" (__ptidptr),
|
||||
"r" (__newtls),
|
||||
"r" (__ctidptr),
|
||||
"i" (FRAME_SIZE),
|
||||
"i" (FRAME_TOC_SAVE_OFFSET)
|
||||
: "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
|
||||
return res;
|
||||
}
|
||||
#elif defined(__i386__) && SANITIZER_LINUX
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
int res;
|
||||
if (!fn || !child_stack)
|
||||
return -EINVAL;
|
||||
CHECK_EQ(0, (uptr)child_stack % 16);
|
||||
child_stack = (char *)child_stack - 7 * sizeof(unsigned int);
|
||||
((unsigned int *)child_stack)[0] = (uptr)flags;
|
||||
((unsigned int *)child_stack)[1] = (uptr)0;
|
||||
((unsigned int *)child_stack)[2] = (uptr)fn;
|
||||
((unsigned int *)child_stack)[3] = (uptr)arg;
|
||||
__asm__ __volatile__(
|
||||
/* %eax = syscall(%eax = SYSCALL(clone),
|
||||
* %ebx = flags,
|
||||
* %ecx = child_stack,
|
||||
* %edx = parent_tidptr,
|
||||
* %esi = new_tls,
|
||||
* %edi = child_tidptr)
|
||||
*/
|
||||
|
||||
/* Obtain flags */
|
||||
"movl (%%ecx), %%ebx\n"
|
||||
/* Do the system call */
|
||||
"pushl %%ebx\n"
|
||||
"pushl %%esi\n"
|
||||
"pushl %%edi\n"
|
||||
/* Remember the flag value. */
|
||||
"movl %%ebx, (%%ecx)\n"
|
||||
"int $0x80\n"
|
||||
"popl %%edi\n"
|
||||
"popl %%esi\n"
|
||||
"popl %%ebx\n"
|
||||
|
||||
/* if (%eax != 0)
|
||||
* return;
|
||||
*/
|
||||
|
||||
"test %%eax,%%eax\n"
|
||||
"jnz 1f\n"
|
||||
|
||||
/* terminate the stack frame */
|
||||
"xorl %%ebp,%%ebp\n"
|
||||
/* Call FN. */
|
||||
"call *%%ebx\n"
|
||||
#ifdef PIC
|
||||
"call here\n"
|
||||
"here:\n"
|
||||
"popl %%ebx\n"
|
||||
"addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
|
||||
#endif
|
||||
/* Call exit */
|
||||
"movl %%eax, %%ebx\n"
|
||||
"movl %2, %%eax\n"
|
||||
"int $0x80\n"
|
||||
"1:\n"
|
||||
: "=a" (res)
|
||||
: "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
|
||||
"c"(child_stack),
|
||||
"d"(parent_tidptr),
|
||||
"S"(newtls),
|
||||
"D"(child_tidptr)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
#elif defined(__arm__) && SANITIZER_LINUX
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
unsigned int res;
|
||||
if (!fn || !child_stack)
|
||||
return -EINVAL;
|
||||
child_stack = (char *)child_stack - 2 * sizeof(unsigned int);
|
||||
((unsigned int *)child_stack)[0] = (uptr)fn;
|
||||
((unsigned int *)child_stack)[1] = (uptr)arg;
|
||||
register int r0 __asm__("r0") = flags;
|
||||
register void *r1 __asm__("r1") = child_stack;
|
||||
register int *r2 __asm__("r2") = parent_tidptr;
|
||||
register void *r3 __asm__("r3") = newtls;
|
||||
register int *r4 __asm__("r4") = child_tidptr;
|
||||
register int r7 __asm__("r7") = __NR_clone;
|
||||
|
||||
#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)
|
||||
# define ARCH_HAS_BX
|
||||
#endif
|
||||
#if __ARM_ARCH > 4
|
||||
# define ARCH_HAS_BLX
|
||||
#endif
|
||||
|
||||
#ifdef ARCH_HAS_BX
|
||||
# ifdef ARCH_HAS_BLX
|
||||
# define BLX(R) "blx " #R "\n"
|
||||
# else
|
||||
# define BLX(R) "mov lr, pc; bx " #R "\n"
|
||||
# endif
|
||||
#else
|
||||
# define BLX(R) "mov lr, pc; mov pc," #R "\n"
|
||||
#endif
|
||||
|
||||
__asm__ __volatile__(
|
||||
/* %r0 = syscall(%r7 = SYSCALL(clone),
|
||||
* %r0 = flags,
|
||||
* %r1 = child_stack,
|
||||
* %r2 = parent_tidptr,
|
||||
* %r3 = new_tls,
|
||||
* %r4 = child_tidptr)
|
||||
*/
|
||||
|
||||
/* Do the system call */
|
||||
"swi 0x0\n"
|
||||
|
||||
/* if (%r0 != 0)
|
||||
* return %r0;
|
||||
*/
|
||||
"cmp r0, #0\n"
|
||||
"bne 1f\n"
|
||||
|
||||
/* In the child, now. Call "fn(arg)". */
|
||||
"ldr r0, [sp, #4]\n"
|
||||
"ldr ip, [sp], #8\n"
|
||||
BLX(ip)
|
||||
/* Call _exit(%r0). */
|
||||
"mov r7, %7\n"
|
||||
"swi 0x0\n"
|
||||
"1:\n"
|
||||
"mov %0, r0\n"
|
||||
: "=r"(res)
|
||||
: "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7),
|
||||
"i"(__NR_exit)
|
||||
: "memory");
|
||||
return res;
|
||||
}
|
||||
#endif // defined(__x86_64__) && SANITIZER_LINUX
|
||||
@ -1227,7 +1401,9 @@ bool IsHandledDeadlySignal(int signum) {
|
||||
return true;
|
||||
if (common_flags()->handle_sigfpe && signum == SIGFPE)
|
||||
return true;
|
||||
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
|
||||
if (common_flags()->handle_segv && signum == SIGSEGV)
|
||||
return true;
|
||||
return common_flags()->handle_sigbus && signum == SIGBUS;
|
||||
}
|
||||
|
||||
#if !SANITIZER_GO
|
||||
@ -1395,6 +1571,21 @@ void MaybeReexec() {
|
||||
|
||||
void PrintModuleMap() { }
|
||||
|
||||
void CheckNoDeepBind(const char *filename, int flag) {
|
||||
#ifdef RTLD_DEEPBIND
|
||||
if (flag & RTLD_DEEPBIND) {
|
||||
Report(
|
||||
"You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
|
||||
" which is incompatibe with sanitizer runtime "
|
||||
"(see https://github.com/google/sanitizers/issues/611 for details"
|
||||
"). If you want to run %s library under sanitizers please remove "
|
||||
"RTLD_DEEPBIND from dlopen flags.\n",
|
||||
filename, filename);
|
||||
Die();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
|
||||
UNREACHABLE("FindAvailableMemoryRange is not available");
|
||||
return 0;
|
||||
|
@ -48,7 +48,8 @@ int internal_sigaction_syscall(int signum, const void *act, void *oldact);
|
||||
#endif
|
||||
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
|
||||
#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
|
||||
|| defined(__powerpc64__) || defined(__s390__)
|
||||
|| defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
|
||||
|| defined(__arm__)
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr);
|
||||
#endif
|
||||
|
@ -183,8 +183,8 @@ void InitTlsSize() { }
|
||||
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
|
||||
|
||||
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \
|
||||
|| defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__)) \
|
||||
&& SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
|| defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) \
|
||||
|| defined(__arm__)) && SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
// sizeof(struct pthread) from glibc.
|
||||
static atomic_uintptr_t kThreadDescriptorSize;
|
||||
|
||||
@ -192,14 +192,14 @@ uptr ThreadDescriptorSize() {
|
||||
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
|
||||
if (val)
|
||||
return val;
|
||||
#if defined(__x86_64__) || defined(__i386__)
|
||||
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
|
||||
#ifdef _CS_GNU_LIBC_VERSION
|
||||
char buf[64];
|
||||
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
|
||||
if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) {
|
||||
char *end;
|
||||
int minor = internal_simple_strtoll(buf + 8, &end, 10);
|
||||
if (end != buf + 8 && (*end == '\0' || *end == '.')) {
|
||||
if (end != buf + 8 && (*end == '\0' || *end == '.' || *end == '-')) {
|
||||
int patch = 0;
|
||||
if (*end == '.')
|
||||
// strtoll will return 0 if no valid conversion could be performed
|
||||
@ -208,6 +208,9 @@ uptr ThreadDescriptorSize() {
|
||||
/* sizeof(struct pthread) values from various glibc versions. */
|
||||
if (SANITIZER_X32)
|
||||
val = 1728; // Assume only one particular version for x32.
|
||||
// For ARM sizeof(struct pthread) changed in Glibc 2.23.
|
||||
else if (SANITIZER_ARM)
|
||||
val = minor <= 22 ? 1120 : 1216;
|
||||
else if (minor <= 3)
|
||||
val = FIRST_32_SECOND_64(1104, 1696);
|
||||
else if (minor == 4)
|
||||
@ -270,9 +273,7 @@ static uptr TlsPreTcbSize() {
|
||||
# endif
|
||||
const uptr kTlsAlign = 16;
|
||||
const uptr kTlsPreTcbSize =
|
||||
(ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
|
||||
InitTlsSize();
|
||||
g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
|
||||
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
|
||||
return kTlsPreTcbSize;
|
||||
}
|
||||
#endif
|
||||
@ -295,7 +296,7 @@ uptr ThreadSelf() {
|
||||
rdhwr %0,$29;\
|
||||
.set pop" : "=r" (thread_pointer));
|
||||
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
|
||||
# elif defined(__aarch64__)
|
||||
# elif defined(__aarch64__) || defined(__arm__)
|
||||
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
|
||||
ThreadDescriptorSize();
|
||||
# elif defined(__s390__)
|
||||
@ -344,7 +345,8 @@ static void GetTls(uptr *addr, uptr *size) {
|
||||
*size = GetTlsSize();
|
||||
*addr -= *size;
|
||||
*addr += ThreadDescriptorSize();
|
||||
# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__)
|
||||
# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
|
||||
|| defined(__arm__)
|
||||
*addr = ThreadSelf();
|
||||
*size = GetTlsSize();
|
||||
# else
|
||||
@ -379,6 +381,8 @@ uptr GetTlsSize() {
|
||||
uptr addr, size;
|
||||
GetTls(&addr, &size);
|
||||
return size;
|
||||
#elif defined(__mips__) || defined(__powerpc64__)
|
||||
return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
|
||||
#else
|
||||
return g_tls_size;
|
||||
#endif
|
||||
@ -443,7 +447,9 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
|
||||
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
|
||||
uptr cur_end = cur_beg + phdr->p_memsz;
|
||||
bool executable = phdr->p_flags & PF_X;
|
||||
cur_module.addAddressRange(cur_beg, cur_end, executable);
|
||||
bool readable = phdr->p_flags & PF_R;
|
||||
cur_module.addAddressRange(cur_beg, cur_end, executable,
|
||||
readable);
|
||||
}
|
||||
}
|
||||
data->modules->push_back(cur_module);
|
||||
|
@ -136,6 +136,18 @@ static bool FixedCVE_2016_2143() {
|
||||
if (ptr[0] == '.')
|
||||
patch = internal_simple_strtoll(ptr+1, &ptr, 10);
|
||||
if (major < 3) {
|
||||
if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
|
||||
internal_strstr(ptr, ".el6")) {
|
||||
// Check RHEL6
|
||||
int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
|
||||
if (r1 >= 657) // 2.6.32-657.el6 or later
|
||||
return true;
|
||||
if (r1 == 642 && ptr[0] == '.') {
|
||||
int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
|
||||
if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// <3.0 is bad.
|
||||
return false;
|
||||
} else if (major == 3) {
|
||||
@ -145,6 +157,18 @@ static bool FixedCVE_2016_2143() {
|
||||
// 3.12.58+ is OK.
|
||||
if (minor == 12 && patch >= 58)
|
||||
return true;
|
||||
if (minor == 10 && patch == 0 && ptr[0] == '-' &&
|
||||
internal_strstr(ptr, ".el7")) {
|
||||
// Check RHEL7
|
||||
int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
|
||||
if (r1 >= 426) // 3.10.0-426.el7 or later
|
||||
return true;
|
||||
if (r1 == 327 && ptr[0] == '.') {
|
||||
int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
|
||||
if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// Otherwise, bad.
|
||||
return false;
|
||||
} else if (major == 4) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user