Update compiler-rt to 3.9.0 release, and update the build glue for
libcompiler_rt and libclang_rt.
This commit is contained in:
commit
53b715b5ba
@ -14,7 +14,7 @@ Full text of the relevant licenses is included below.
|
||||
University of Illinois/NCSA
|
||||
Open Source License
|
||||
|
||||
Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
|
||||
Copyright (c) 2009-2016 by the contributors listed in CREDITS.TXT
|
||||
|
||||
All rights reserved.
|
||||
|
||||
|
@ -59,6 +59,23 @@ extern "C" {
|
||||
deallocation of "ptr". */
|
||||
void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
|
||||
void __sanitizer_free_hook(const volatile void *ptr);
|
||||
|
||||
/* Installs a pair of hooks for malloc/free.
|
||||
Several (currently, 5) hook pairs may be installed, they are executed
|
||||
in the order they were installed and after calling
|
||||
__sanitizer_malloc_hook/__sanitizer_free_hook.
|
||||
Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
|
||||
chained and do not rely on weak symbols working on the platform, but
|
||||
require __sanitizer_install_malloc_and_free_hooks to be called at startup
|
||||
and thus will not be called on malloc/free very early in the process.
|
||||
Returns the number of hooks currently installed or 0 on failure.
|
||||
Not thread-safe, should be called in the main thread before starting
|
||||
other threads.
|
||||
*/
|
||||
int __sanitizer_install_malloc_and_free_hooks(
|
||||
void (*malloc_hook)(const volatile void *, size_t),
|
||||
void (*free_hook)(const volatile void *));
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -41,6 +41,9 @@ extern "C" {
|
||||
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
// Tell the tools to write their reports to the provided file descriptor
|
||||
// (casted to void *).
|
||||
void __sanitizer_set_report_fd(void *fd);
|
||||
|
||||
// Notify the tools that the sandbox is going to be turned on. The reserved
|
||||
// parameter will be used in the future to hold a structure with functions
|
||||
@ -128,8 +131,45 @@ extern "C" {
|
||||
const void *s2, size_t n, int result);
|
||||
void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
|
||||
const char *s2, size_t n, int result);
|
||||
void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
|
||||
const char *s2, size_t n, int result);
|
||||
void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
|
||||
const char *s2, int result);
|
||||
void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
|
||||
const char *s2, int result);
|
||||
void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
|
||||
const char *s2, char *result);
|
||||
void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
|
||||
const char *s2, char *result);
|
||||
void __sanitizer_weak_hook_memmem(void *called_pc,
|
||||
const void *s1, size_t len1,
|
||||
const void *s2, size_t len2, void *result);
|
||||
|
||||
// Prints stack traces for all live heap allocations ordered by total
|
||||
// allocation size until `top_percent` of total live heap is shown.
|
||||
// `top_percent` should be between 1 and 100.
|
||||
// Experimental feature currently available only with asan on Linux/x86_64.
|
||||
void __sanitizer_print_memory_profile(size_t top_percent);
|
||||
|
||||
// Fiber annotation interface.
|
||||
// Before switching to a different stack, one must call
|
||||
// __sanitizer_start_switch_fiber with a pointer to the bottom of the
|
||||
// destination stack and its size. When code starts running on the new stack,
|
||||
// it must call __sanitizer_finish_switch_fiber to finalize the switch.
|
||||
// The start_switch function takes a void** to store the current fake stack if
|
||||
// there is one (it is needed when detect_stack_use_after_return is enabled).
|
||||
// When restoring a stack, this pointer must be given to the finish_switch
|
||||
// function. In most cases, this void* can be stored on the stack just before
|
||||
// switching. When leaving a fiber definitely, null must be passed as first
|
||||
// argument to the start_switch function so that the fake stack is destroyed.
|
||||
// If you do not want support for stack use-after-return detection, you can
|
||||
// always pass null to these two functions.
|
||||
// Note that the fake stack mechanism is disabled during fiber switch, so if a
|
||||
// signal callback runs during the switch, it will not benefit from the stack
|
||||
// use-after-return detection.
|
||||
void __sanitizer_start_switch_fiber(void **fake_stack_save,
|
||||
const void *bottom, size_t size);
|
||||
void __sanitizer_finish_switch_fiber(void *fake_stack_save);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
50
contrib/compiler-rt/include/sanitizer/esan_interface.h
Normal file
50
contrib/compiler-rt/include/sanitizer/esan_interface.h
Normal file
@ -0,0 +1,50 @@
|
||||
//===-- sanitizer/esan_interface.h ------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of EfficiencySanitizer, a family of performance tuners.
|
||||
//
|
||||
// Public interface header.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ESAN_INTERFACE_H
|
||||
#define SANITIZER_ESAN_INTERFACE_H
|
||||
|
||||
#include <sanitizer/common_interface_defs.h>
|
||||
|
||||
// We declare our interface routines as weak to allow the user to avoid
|
||||
// ifdefs and instead use this pattern to allow building the same sources
|
||||
// with and without our runtime library:
|
||||
// if (__esan_report)
|
||||
// __esan_report();
|
||||
#ifdef _MSC_VER
|
||||
/* selectany is as close to weak as we'll get. */
|
||||
#define COMPILER_RT_WEAK __declspec(selectany)
|
||||
#elif __GNUC__
|
||||
#define COMPILER_RT_WEAK __attribute__((weak))
|
||||
#else
|
||||
#define COMPILER_RT_WEAK
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
// This function can be called mid-run (or at the end of a run for
|
||||
// a server process that doesn't shut down normally) to request that
|
||||
// data for that point in the run be reported from the tool.
|
||||
void COMPILER_RT_WEAK __esan_report();
|
||||
|
||||
// This function returns the number of samples that the esan tool has collected
|
||||
// to this point. This is useful for testing.
|
||||
unsigned int COMPILER_RT_WEAK __esan_get_sample_count();
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_ESAN_INTERFACE_H
|
@ -1835,6 +1835,17 @@
|
||||
__sanitizer_syscall_pre_impl_vfork()
|
||||
#define __sanitizer_syscall_post_vfork(res) \
|
||||
__sanitizer_syscall_post_impl_vfork(res)
|
||||
#define __sanitizer_syscall_pre_sigaction(signum, act, oldact) \
|
||||
__sanitizer_syscall_pre_impl_sigaction((long)signum, (long)act, (long)oldact)
|
||||
#define __sanitizer_syscall_post_sigaction(res, signum, act, oldact) \
|
||||
__sanitizer_syscall_post_impl_sigaction(res, (long)signum, (long)act, \
|
||||
(long)oldact)
|
||||
#define __sanitizer_syscall_pre_rt_sigaction(signum, act, oldact, sz) \
|
||||
__sanitizer_syscall_pre_impl_rt_sigaction((long)signum, (long)act, \
|
||||
(long)oldact, (long)sz)
|
||||
#define __sanitizer_syscall_post_rt_sigaction(res, signum, act, oldact, sz) \
|
||||
__sanitizer_syscall_post_impl_rt_sigaction(res, (long)signum, (long)act, \
|
||||
(long)oldact, (long)sz)
|
||||
|
||||
// And now a few syscalls we don't handle yet.
|
||||
#define __sanitizer_syscall_pre_afs_syscall(...)
|
||||
@ -1889,7 +1900,6 @@
|
||||
#define __sanitizer_syscall_pre_query_module(...)
|
||||
#define __sanitizer_syscall_pre_readahead(...)
|
||||
#define __sanitizer_syscall_pre_readdir(...)
|
||||
#define __sanitizer_syscall_pre_rt_sigaction(...)
|
||||
#define __sanitizer_syscall_pre_rt_sigreturn(...)
|
||||
#define __sanitizer_syscall_pre_rt_sigsuspend(...)
|
||||
#define __sanitizer_syscall_pre_security(...)
|
||||
@ -1903,7 +1913,6 @@
|
||||
#define __sanitizer_syscall_pre_setreuid32(...)
|
||||
#define __sanitizer_syscall_pre_set_thread_area(...)
|
||||
#define __sanitizer_syscall_pre_setuid32(...)
|
||||
#define __sanitizer_syscall_pre_sigaction(...)
|
||||
#define __sanitizer_syscall_pre_sigaltstack(...)
|
||||
#define __sanitizer_syscall_pre_sigreturn(...)
|
||||
#define __sanitizer_syscall_pre_sigsuspend(...)
|
||||
@ -1971,7 +1980,6 @@
|
||||
#define __sanitizer_syscall_post_query_module(res, ...)
|
||||
#define __sanitizer_syscall_post_readahead(res, ...)
|
||||
#define __sanitizer_syscall_post_readdir(res, ...)
|
||||
#define __sanitizer_syscall_post_rt_sigaction(res, ...)
|
||||
#define __sanitizer_syscall_post_rt_sigreturn(res, ...)
|
||||
#define __sanitizer_syscall_post_rt_sigsuspend(res, ...)
|
||||
#define __sanitizer_syscall_post_security(res, ...)
|
||||
@ -1985,7 +1993,6 @@
|
||||
#define __sanitizer_syscall_post_setreuid32(res, ...)
|
||||
#define __sanitizer_syscall_post_set_thread_area(res, ...)
|
||||
#define __sanitizer_syscall_post_setuid32(res, ...)
|
||||
#define __sanitizer_syscall_post_sigaction(res, ...)
|
||||
#define __sanitizer_syscall_post_sigaltstack(res, ...)
|
||||
#define __sanitizer_syscall_post_sigreturn(res, ...)
|
||||
#define __sanitizer_syscall_post_sigsuspend(res, ...)
|
||||
@ -3062,7 +3069,13 @@ void __sanitizer_syscall_pre_impl_fork();
|
||||
void __sanitizer_syscall_post_impl_fork(long res);
|
||||
void __sanitizer_syscall_pre_impl_vfork();
|
||||
void __sanitizer_syscall_post_impl_vfork(long res);
|
||||
|
||||
void __sanitizer_syscall_pre_impl_sigaction(long signum, long act, long oldact);
|
||||
void __sanitizer_syscall_post_impl_sigaction(long res, long signum, long act,
|
||||
long oldact);
|
||||
void __sanitizer_syscall_pre_impl_rt_sigaction(long signum, long act,
|
||||
long oldact, long sz);
|
||||
void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
|
||||
long oldact, long sz);
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
@ -47,6 +47,7 @@ static struct AsanDeactivatedFlags {
|
||||
FlagParser parser;
|
||||
RegisterActivationFlags(&parser, &f, &cf);
|
||||
|
||||
cf.SetDefaults();
|
||||
// Copy the current activation flags.
|
||||
allocator_options.CopyTo(&f, &cf);
|
||||
cf.malloc_context_size = malloc_context_size;
|
||||
@ -61,7 +62,7 @@ static struct AsanDeactivatedFlags {
|
||||
parser.ParseString(env);
|
||||
}
|
||||
|
||||
SetVerbosity(cf.verbosity);
|
||||
InitializeCommonFlags(&cf);
|
||||
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
|
||||
|
@ -223,7 +223,7 @@ void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
|
||||
|
||||
struct Allocator {
|
||||
static const uptr kMaxAllowedMallocSize =
|
||||
FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
|
||||
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
|
||||
static const uptr kMaxThreadLocalQuarantine =
|
||||
FIRST_32_SECOND_64(1 << 18, 1 << 20);
|
||||
|
||||
@ -457,29 +457,28 @@ struct Allocator {
|
||||
return res;
|
||||
}
|
||||
|
||||
void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
|
||||
// Set quarantine flag if chunk is allocated, issue ASan error report on
|
||||
// available and quarantined chunks. Return true on success, false otherwise.
|
||||
bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
|
||||
BufferedStackTrace *stack) {
|
||||
u8 old_chunk_state = CHUNK_ALLOCATED;
|
||||
// Flip the chunk_state atomically to avoid race on double-free.
|
||||
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
|
||||
CHUNK_QUARANTINE, memory_order_acquire))
|
||||
if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
|
||||
CHUNK_QUARANTINE,
|
||||
memory_order_acquire)) {
|
||||
ReportInvalidFree(ptr, old_chunk_state, stack);
|
||||
// It's not safe to push a chunk in quarantine on invalid free.
|
||||
return false;
|
||||
}
|
||||
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Expects the chunk to already be marked as quarantined by using
|
||||
// AtomicallySetQuarantineFlag.
|
||||
// AtomicallySetQuarantineFlagIfAllocated.
|
||||
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
|
||||
AllocType alloc_type) {
|
||||
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
|
||||
|
||||
if (m->alloc_type != alloc_type) {
|
||||
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
|
||||
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
|
||||
(AllocType)alloc_type);
|
||||
}
|
||||
}
|
||||
|
||||
CHECK_GE(m->alloc_tid, 0);
|
||||
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
|
||||
CHECK_EQ(m->free_tid, kInvalidTid);
|
||||
@ -516,13 +515,24 @@ struct Allocator {
|
||||
|
||||
uptr chunk_beg = p - kChunkHeaderSize;
|
||||
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
|
||||
if (delete_size && flags()->new_delete_type_mismatch &&
|
||||
delete_size != m->UsedSize()) {
|
||||
ReportNewDeleteSizeMismatch(p, delete_size, stack);
|
||||
}
|
||||
|
||||
ASAN_FREE_HOOK(ptr);
|
||||
// Must mark the chunk as quarantined before any changes to its metadata.
|
||||
AtomicallySetQuarantineFlag(m, ptr, stack);
|
||||
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
|
||||
if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
|
||||
|
||||
if (m->alloc_type != alloc_type) {
|
||||
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
|
||||
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
|
||||
(AllocType)alloc_type);
|
||||
}
|
||||
}
|
||||
|
||||
if (delete_size && flags()->new_delete_type_mismatch &&
|
||||
delete_size != m->UsedSize()) {
|
||||
ReportNewDeleteSizeMismatch(p, m->UsedSize(), delete_size, stack);
|
||||
}
|
||||
|
||||
QuarantineChunk(m, ptr, stack, alloc_type);
|
||||
}
|
||||
|
||||
@ -655,6 +665,9 @@ static AsanAllocator &get_allocator() {
|
||||
bool AsanChunkView::IsValid() {
|
||||
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
|
||||
}
|
||||
bool AsanChunkView::IsAllocated() {
|
||||
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
|
||||
}
|
||||
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
|
||||
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
|
||||
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
|
||||
@ -668,12 +681,15 @@ static StackTrace GetStackTraceFromId(u32 id) {
|
||||
return res;
|
||||
}
|
||||
|
||||
u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
|
||||
u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
|
||||
|
||||
StackTrace AsanChunkView::GetAllocStack() {
|
||||
return GetStackTraceFromId(chunk_->alloc_context_id);
|
||||
return GetStackTraceFromId(GetAllocStackId());
|
||||
}
|
||||
|
||||
StackTrace AsanChunkView::GetFreeStack() {
|
||||
return GetStackTraceFromId(chunk_->free_context_id);
|
||||
return GetStackTraceFromId(GetFreeStackId());
|
||||
}
|
||||
|
||||
void InitializeAllocator(const AllocatorOptions &options) {
|
||||
@ -754,7 +770,7 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
|
||||
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
|
||||
if (!ptr) return 0;
|
||||
uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
|
||||
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
|
||||
|
@ -49,14 +49,17 @@ void GetAllocatorOptions(AllocatorOptions *options);
|
||||
class AsanChunkView {
|
||||
public:
|
||||
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
|
||||
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
|
||||
// or quarantined chunk.
|
||||
uptr Beg(); // First byte of user memory.
|
||||
uptr End(); // Last byte of user memory.
|
||||
uptr UsedSize(); // Size requested by the user.
|
||||
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
|
||||
// or quarantined chunk.
|
||||
bool IsAllocated(); // Checks if the memory is currently allocated.
|
||||
uptr Beg(); // First byte of user memory.
|
||||
uptr End(); // Last byte of user memory.
|
||||
uptr UsedSize(); // Size requested by the user.
|
||||
uptr AllocTid();
|
||||
uptr FreeTid();
|
||||
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
|
||||
u32 GetAllocStackId();
|
||||
u32 GetFreeStackId();
|
||||
StackTrace GetAllocStack();
|
||||
StackTrace GetFreeStack();
|
||||
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
|
||||
@ -171,7 +174,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
|
||||
|
||||
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
|
||||
BufferedStackTrace *stack);
|
||||
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
|
||||
uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
|
||||
|
||||
uptr asan_mz_size(const void *ptr);
|
||||
void asan_mz_force_lock();
|
||||
|
@ -31,7 +31,7 @@ ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
|
||||
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
|
||||
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
|
||||
if (class_id <= 6) {
|
||||
for (uptr i = 0; i < (1U << class_id); i++) {
|
||||
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
|
||||
shadow[i] = magic;
|
||||
// Make sure this does not become memset.
|
||||
SanitizerBreakOptimization(nullptr);
|
||||
@ -121,7 +121,7 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
|
||||
uptr class_id = (ptr - beg) >> stack_size_log;
|
||||
uptr base = beg + (class_id << stack_size_log);
|
||||
CHECK_LE(base, ptr);
|
||||
CHECK_LT(ptr, base + (1UL << stack_size_log));
|
||||
CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
|
||||
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
|
||||
uptr res = base + pos * BytesInSizeClass(class_id);
|
||||
*frame_end = res + BytesInSizeClass(class_id);
|
||||
|
@ -69,12 +69,12 @@ class FakeStack {
|
||||
|
||||
// stack_size_log is at least 15 (stack_size >= 32K).
|
||||
static uptr SizeRequiredForFlags(uptr stack_size_log) {
|
||||
return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog);
|
||||
return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
|
||||
}
|
||||
|
||||
// Each size class occupies stack_size bytes.
|
||||
static uptr SizeRequiredForFrames(uptr stack_size_log) {
|
||||
return (1ULL << stack_size_log) * kNumberOfSizeClasses;
|
||||
return (((uptr)1) << stack_size_log) * kNumberOfSizeClasses;
|
||||
}
|
||||
|
||||
// Number of bytes requires for the whole object.
|
||||
@ -91,12 +91,12 @@ class FakeStack {
|
||||
// and so on.
|
||||
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
|
||||
uptr t = kNumberOfSizeClasses - 1 - class_id;
|
||||
const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1;
|
||||
const uptr all_ones = (((uptr)1) << (kNumberOfSizeClasses - 1)) - 1;
|
||||
return ((all_ones >> t) << t) << (stack_size_log - 15);
|
||||
}
|
||||
|
||||
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
|
||||
return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id);
|
||||
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
|
||||
}
|
||||
|
||||
// Divide n by the numbe of frames in size class.
|
||||
@ -114,7 +114,8 @@ class FakeStack {
|
||||
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
|
||||
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
|
||||
SizeRequiredForFlags(stack_size_log) +
|
||||
(1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos;
|
||||
(((uptr)1) << stack_size_log) * class_id +
|
||||
BytesInSizeClass(class_id) * pos;
|
||||
}
|
||||
|
||||
// Allocate the fake frame.
|
||||
@ -137,7 +138,7 @@ class FakeStack {
|
||||
|
||||
// Number of bytes in a fake frame of this size class.
|
||||
static uptr BytesInSizeClass(uptr class_id) {
|
||||
return 1UL << (class_id + kMinStackFrameSizeLog);
|
||||
return ((uptr)1) << (class_id + kMinStackFrameSizeLog);
|
||||
}
|
||||
|
||||
// The fake frame is guaranteed to have a right redzone.
|
||||
@ -159,7 +160,7 @@ class FakeStack {
|
||||
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
|
||||
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
|
||||
COMPILER_CHECK(kNumberOfSizeClasses == 11);
|
||||
static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
|
||||
static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
|
||||
|
||||
uptr hint_position_[kNumberOfSizeClasses];
|
||||
uptr stack_size_log_;
|
||||
|
@ -116,7 +116,7 @@ void InitializeFlags() {
|
||||
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
|
||||
#endif
|
||||
|
||||
SetVerbosity(common_flags()->verbosity);
|
||||
InitializeCommonFlags();
|
||||
|
||||
// TODO(eugenis): dump all flags at verbosity>=2?
|
||||
if (Verbosity()) ReportUnrecognizedFlags();
|
||||
@ -159,6 +159,14 @@ void InitializeFlags() {
|
||||
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
|
||||
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
|
||||
}
|
||||
if (!f->replace_str && common_flags()->intercept_strlen) {
|
||||
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
|
||||
"Use intercept_strlen=0 to disable it.");
|
||||
}
|
||||
if (!f->replace_str && common_flags()->intercept_strchr) {
|
||||
Report("WARNING: strchr* interceptors are enabled even though "
|
||||
"replace_str=0. Use intercept_strchr=0 to disable them.");
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
@ -43,7 +43,7 @@ ASAN_FLAG(
|
||||
"If set, uses custom wrappers and replacements for libc string functions "
|
||||
"to find more errors.")
|
||||
ASAN_FLAG(bool, replace_intrin, true,
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
|
||||
ASAN_FLAG(bool, detect_stack_use_after_return, false,
|
||||
"Enables stack-use-after-return checking at run-time.")
|
||||
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
|
||||
@ -77,6 +77,8 @@ ASAN_FLAG(bool, print_stats, false,
|
||||
"Print various statistics after printing an error message or if "
|
||||
"atexit=1.")
|
||||
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
|
||||
ASAN_FLAG(bool, print_scariness, false,
|
||||
"Print the scariness score. Experimental.")
|
||||
ASAN_FLAG(bool, atexit, false,
|
||||
"If set, prints ASan exit stats even after program terminates "
|
||||
"successfully.")
|
||||
@ -104,7 +106,7 @@ ASAN_FLAG(bool, alloc_dealloc_mismatch,
|
||||
"Report errors on malloc/delete, new/free, new/delete[], etc.")
|
||||
|
||||
ASAN_FLAG(bool, new_delete_type_mismatch, true,
|
||||
"Report errors on mismatch betwen size of new and delete.")
|
||||
"Report errors on mismatch between size of new and delete.")
|
||||
ASAN_FLAG(
|
||||
bool, strict_init_order, false,
|
||||
"If true, assume that dynamic initializers can never access globals from "
|
||||
@ -135,3 +137,5 @@ ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
|
||||
ASAN_FLAG(bool, halt_on_error, true,
|
||||
"Crash the program after printing the first error report "
|
||||
"(WARNING: USE AT YOUR OWN RISK!)")
|
||||
ASAN_FLAG(bool, use_odr_indicator, false,
|
||||
"Use special ODR indicator symbol for ODR violation detection")
|
||||
|
@ -135,6 +135,70 @@ bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
enum GlobalSymbolState {
|
||||
UNREGISTERED = 0,
|
||||
REGISTERED = 1
|
||||
};
|
||||
|
||||
// Check ODR violation for given global G via special ODR indicator. We use
|
||||
// this method in case compiler instruments global variables through their
|
||||
// local aliases.
|
||||
static void CheckODRViolationViaIndicator(const Global *g) {
|
||||
u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
|
||||
if (*odr_indicator == UNREGISTERED) {
|
||||
*odr_indicator = REGISTERED;
|
||||
return;
|
||||
}
|
||||
// If *odr_indicator is DEFINED, some module have already registered
|
||||
// externally visible symbol with the same name. This is an ODR violation.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->odr_indicator == l->g->odr_indicator &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
|
||||
// Check ODR violation for given global G by checking if it's already poisoned.
|
||||
// We use this method in case compiler doesn't use private aliases for global
|
||||
// variables.
|
||||
static void CheckODRViolationViaPoisoning(const Global *g) {
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clang provides two different ways for global variables protection:
|
||||
// it can poison the global itself or its private alias. In former
|
||||
// case we may poison same symbol multiple times, that can help us to
|
||||
// cheaply detect ODR violation: if we try to poison an already poisoned
|
||||
// global, we have ODR violation error.
|
||||
// In latter case, we poison each symbol exactly once, so we use special
|
||||
// indicator symbol to perform similar check.
|
||||
// In either case, compiler provides a special odr_indicator field to Global
|
||||
// structure, that can contain two kinds of values:
|
||||
// 1) Non-zero value. In this case, odr_indicator is an address of
|
||||
// corresponding indicator variable for given global.
|
||||
// 2) Zero. This means that we don't use private aliases for global variables
|
||||
// and can freely check ODR violation with the first method.
|
||||
//
|
||||
// This routine chooses between two different methods of ODR violation
|
||||
// detection.
|
||||
static inline bool UseODRIndicator(const Global *g) {
|
||||
// Use ODR indicator method iff use_odr_indicator flag is set and
|
||||
// indicator symbol address is not 0.
|
||||
return flags()->use_odr_indicator && g->odr_indicator > 0;
|
||||
}
|
||||
|
||||
// Register a global variable.
|
||||
// This function may be called more than once for every global
|
||||
// so we store the globals in a map.
|
||||
@ -144,22 +208,24 @@ static void RegisterGlobal(const Global *g) {
|
||||
ReportGlobal(*g, "Added");
|
||||
CHECK(flags()->report_globals);
|
||||
CHECK(AddrIsInMem(g->beg));
|
||||
CHECK(AddrIsAlignedByGranularity(g->beg));
|
||||
if (!AddrIsAlignedByGranularity(g->beg)) {
|
||||
Report("The following global variable is not properly aligned.\n");
|
||||
Report("This may happen if another global with the same name\n");
|
||||
Report("resides in another non-instrumented module.\n");
|
||||
Report("Or the global comes from a C file built w/o -fno-common.\n");
|
||||
Report("In either case this is likely an ODR violation bug,\n");
|
||||
Report("but AddressSanitizer can not provide more details.\n");
|
||||
ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g));
|
||||
CHECK(AddrIsAlignedByGranularity(g->beg));
|
||||
}
|
||||
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
|
||||
if (flags()->detect_odr_violation) {
|
||||
// Try detecting ODR (One Definition Rule) violation, i.e. the situation
|
||||
// where two globals with the same name are defined in different modules.
|
||||
if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
|
||||
// This check may not be enough: if the first global is much larger
|
||||
// the entire redzone of the second global may be within the first global.
|
||||
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
|
||||
if (g->beg == l->g->beg &&
|
||||
(flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
|
||||
!IsODRViolationSuppressed(g->name))
|
||||
ReportODRViolation(g, FindRegistrationSite(g),
|
||||
l->g, FindRegistrationSite(l->g));
|
||||
}
|
||||
}
|
||||
if (UseODRIndicator(g))
|
||||
CheckODRViolationViaIndicator(g);
|
||||
else
|
||||
CheckODRViolationViaPoisoning(g);
|
||||
}
|
||||
if (CanPoisonMemory())
|
||||
PoisonRedZones(*g);
|
||||
@ -190,6 +256,12 @@ static void UnregisterGlobal(const Global *g) {
|
||||
// We unpoison the shadow memory for the global but we do not remove it from
|
||||
// the list because that would require O(n^2) time with the current list
|
||||
// implementation. It might not be worth doing anyway.
|
||||
|
||||
// Release ODR indicator.
|
||||
if (UseODRIndicator(g)) {
|
||||
u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
|
||||
*odr_indicator = UNREGISTERED;
|
||||
}
|
||||
}
|
||||
|
||||
void StopInitOrderChecking() {
|
||||
@ -212,6 +284,25 @@ void StopInitOrderChecking() {
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
|
||||
// Apply __asan_register_globals to all globals found in the same loaded
|
||||
// executable or shared library as `flag'. The flag tracks whether globals have
|
||||
// already been registered or not for this image.
|
||||
void __asan_register_image_globals(uptr *flag) {
|
||||
if (*flag)
|
||||
return;
|
||||
AsanApplyToGlobals(__asan_register_globals, flag);
|
||||
*flag = 1;
|
||||
}
|
||||
|
||||
// This mirrors __asan_register_image_globals.
|
||||
void __asan_unregister_image_globals(uptr *flag) {
|
||||
if (!*flag)
|
||||
return;
|
||||
AsanApplyToGlobals(__asan_unregister_globals, flag);
|
||||
*flag = 0;
|
||||
}
|
||||
|
||||
// Register an array of globals.
|
||||
void __asan_register_globals(__asan_global *globals, uptr n) {
|
||||
if (!flags()->report_globals) return;
|
||||
|
@ -19,16 +19,20 @@ extern "C" {
|
||||
// Every time the ASan ABI changes we also change the version number in the
|
||||
// __asan_init function name. Objects built with incompatible ASan ABI
|
||||
// versions will not link with run-time.
|
||||
//
|
||||
// Changes between ABI versions:
|
||||
// v1=>v2: added 'module_name' to __asan_global
|
||||
// v2=>v3: stack frame description (created by the compiler)
|
||||
// contains the function PC as the 3-rd field (see
|
||||
// DescribeAddressIfStack).
|
||||
// v3=>v4: added '__asan_global_source_location' to __asan_global.
|
||||
// contains the function PC as the 3rd field (see
|
||||
// DescribeAddressIfStack)
|
||||
// v3=>v4: added '__asan_global_source_location' to __asan_global
|
||||
// v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
|
||||
// __asan_stack_free_ functions.
|
||||
// __asan_stack_free_ functions
|
||||
// v5=>v6: changed the name of the version check symbol
|
||||
#define __asan_version_mismatch_check __asan_version_mismatch_check_v6
|
||||
// v6=>v7: added 'odr_indicator' to __asan_global
|
||||
// v7=>v8: added '__asan_(un)register_image_globals' functions for dead
|
||||
// stripping support on Mach-O platforms
|
||||
#define __asan_version_mismatch_check __asan_version_mismatch_check_v8
|
||||
}
|
||||
|
||||
#endif // ASAN_INIT_VERSION_H
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "asan_stack.h"
|
||||
#include "asan_stats.h"
|
||||
#include "asan_suppressions.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
#if SANITIZER_POSIX
|
||||
@ -110,7 +111,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
|
||||
} while (0)
|
||||
|
||||
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
#if SANITIZER_INTERCEPT_STRNLEN
|
||||
if (REAL(strnlen)) {
|
||||
return REAL(strnlen)(s, maxlen);
|
||||
}
|
||||
@ -143,6 +144,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
(void) ctx; \
|
||||
|
||||
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
|
||||
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
|
||||
ASAN_INTERCEPT_FUNC_VER(name, ver)
|
||||
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
|
||||
ASAN_WRITE_RANGE(ctx, ptr, size)
|
||||
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
|
||||
@ -195,6 +198,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
|
||||
} else { \
|
||||
*begin = *end = 0; \
|
||||
}
|
||||
// Asan needs custom handling of these:
|
||||
#undef SANITIZER_INTERCEPT_MEMSET
|
||||
#undef SANITIZER_INTERCEPT_MEMMOVE
|
||||
#undef SANITIZER_INTERCEPT_MEMCPY
|
||||
#include "sanitizer_common/sanitizer_common_interceptors.inc"
|
||||
|
||||
// Syscall interceptors don't have contexts, we don't support suppressions
|
||||
@ -218,6 +225,7 @@ struct ThreadStartParam {
|
||||
atomic_uintptr_t is_registered;
|
||||
};
|
||||
|
||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
|
||||
AsanThread *t = nullptr;
|
||||
@ -228,7 +236,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
return t->ThreadStart(GetTid(), ¶m->is_registered);
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_PTHREAD_CREATE
|
||||
INTERCEPTOR(int, pthread_create, void *thread,
|
||||
void *attr, void *(*start_routine)(void*), void *arg) {
|
||||
EnsureMainThreadIDIsCorrect();
|
||||
@ -242,7 +249,17 @@ INTERCEPTOR(int, pthread_create, void *thread,
|
||||
ThreadStartParam param;
|
||||
atomic_store(¶m.t, 0, memory_order_relaxed);
|
||||
atomic_store(¶m.is_registered, 0, memory_order_relaxed);
|
||||
int result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m);
|
||||
int result;
|
||||
{
|
||||
// Ignore all allocations made by pthread_create: thread stack/TLS may be
|
||||
// stored by pthread for future reuse even after thread destruction, and
|
||||
// the linked list it's stored in doesn't even hold valid pointers to the
|
||||
// objects, the latter are calculated by obscure pointer arithmetic.
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::ScopedInterceptorDisabler disabler;
|
||||
#endif
|
||||
result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m);
|
||||
}
|
||||
if (result == 0) {
|
||||
u32 current_tid = GetCurrentTidOrInvalid();
|
||||
AsanThread *t =
|
||||
@ -271,7 +288,8 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
if (!IsHandledDeadlySignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
return REAL(bsd_signal)(signum, handler);
|
||||
}
|
||||
return 0;
|
||||
@ -279,7 +297,8 @@ INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
|
||||
#endif
|
||||
|
||||
INTERCEPTOR(void*, signal, int signum, void *handler) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
if (!IsHandledDeadlySignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
return REAL(signal)(signum, handler);
|
||||
}
|
||||
return nullptr;
|
||||
@ -287,7 +306,8 @@ INTERCEPTOR(void*, signal, int signum, void *handler) {
|
||||
|
||||
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
|
||||
struct sigaction *oldact) {
|
||||
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
|
||||
if (!IsHandledDeadlySignal(signum) ||
|
||||
common_flags()->allow_user_segv_handler) {
|
||||
return REAL(sigaction)(signum, act, oldact);
|
||||
}
|
||||
return 0;
|
||||
@ -453,25 +473,6 @@ INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
|
||||
ASAN_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(char*, strchr, const char *str, int c) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strchr);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
|
||||
// strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
|
||||
// used.
|
||||
if (asan_init_is_running) {
|
||||
return REAL(strchr)(str, c);
|
||||
}
|
||||
ENSURE_ASAN_INITED();
|
||||
char *result = REAL(strchr)(str, c);
|
||||
if (flags()->replace_str) {
|
||||
uptr len = REAL(strlen)(str);
|
||||
uptr bytes_read = (result ? result - str : len) + 1;
|
||||
ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_INDEX
|
||||
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
|
||||
INTERCEPTOR(char*, index, const char *string, int c)
|
||||
@ -549,7 +550,6 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
|
||||
return REAL(strcpy)(to, from); // NOLINT
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_STRDUP
|
||||
INTERCEPTOR(char*, strdup, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
|
||||
@ -564,29 +564,28 @@ INTERCEPTOR(char*, strdup, const char *s) {
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
return reinterpret_cast<char*>(new_mem);
|
||||
}
|
||||
#endif
|
||||
|
||||
INTERCEPTOR(SIZE_T, strlen, const char *s) {
|
||||
#if ASAN_INTERCEPT___STRDUP
|
||||
INTERCEPTOR(char*, __strdup, const char *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strlen);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strlen(s);
|
||||
// strlen is called from malloc_default_purgeable_zone()
|
||||
// in __asan::ReplaceSystemAlloc() on Mac.
|
||||
if (asan_init_is_running) {
|
||||
return REAL(strlen)(s);
|
||||
}
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
|
||||
if (UNLIKELY(!asan_inited)) return internal_strdup(s);
|
||||
ENSURE_ASAN_INITED();
|
||||
SIZE_T length = REAL(strlen)(s);
|
||||
uptr length = REAL(strlen)(s);
|
||||
if (flags()->replace_str) {
|
||||
ASAN_READ_RANGE(ctx, s, length + 1);
|
||||
}
|
||||
return length;
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *new_mem = asan_malloc(length + 1, &stack);
|
||||
REAL(memcpy)(new_mem, s, length + 1);
|
||||
return reinterpret_cast<char*>(new_mem);
|
||||
}
|
||||
#endif // ASAN_INTERCEPT___STRDUP
|
||||
|
||||
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
|
||||
SIZE_T length = REAL(wcslen)(s);
|
||||
SIZE_T length = internal_wcslen(s);
|
||||
if (!asan_init_is_running) {
|
||||
ENSURE_ASAN_INITED();
|
||||
ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
|
||||
@ -607,19 +606,6 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
|
||||
return REAL(strncpy)(to, from, size);
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
|
||||
void *ctx;
|
||||
ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
|
||||
ENSURE_ASAN_INITED();
|
||||
uptr length = REAL(strnlen)(s, maxlen);
|
||||
if (flags()->replace_str) {
|
||||
ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
|
||||
}
|
||||
return length;
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_STRNLEN
|
||||
|
||||
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
|
||||
char **endptr, int base) {
|
||||
void *ctx;
|
||||
@ -702,12 +688,12 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
|
||||
}
|
||||
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
|
||||
|
||||
#if ASAN_INTERCEPT___CXA_ATEXIT
|
||||
static void AtCxaAtexit(void *unused) {
|
||||
(void)unused;
|
||||
StopInitOrderChecking();
|
||||
}
|
||||
|
||||
#if ASAN_INTERCEPT___CXA_ATEXIT
|
||||
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
|
||||
void *dso_handle) {
|
||||
#if SANITIZER_MAC
|
||||
@ -739,25 +725,23 @@ void InitializeAsanInterceptors() {
|
||||
InitializeCommonInterceptors();
|
||||
|
||||
// Intercept mem* functions.
|
||||
ASAN_INTERCEPT_FUNC(memmove);
|
||||
ASAN_INTERCEPT_FUNC(memcpy);
|
||||
ASAN_INTERCEPT_FUNC(memset);
|
||||
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
|
||||
ASAN_INTERCEPT_FUNC(memcpy);
|
||||
// In asan, REAL(memmove) is not used, but it is used in msan.
|
||||
ASAN_INTERCEPT_FUNC(memmove);
|
||||
}
|
||||
CHECK(REAL(memcpy));
|
||||
|
||||
// Intercept str* functions.
|
||||
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
|
||||
ASAN_INTERCEPT_FUNC(strchr);
|
||||
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
|
||||
ASAN_INTERCEPT_FUNC(strlen);
|
||||
ASAN_INTERCEPT_FUNC(wcslen);
|
||||
ASAN_INTERCEPT_FUNC(strncat);
|
||||
ASAN_INTERCEPT_FUNC(strncpy);
|
||||
#if ASAN_INTERCEPT_STRDUP
|
||||
ASAN_INTERCEPT_FUNC(strdup);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT_STRNLEN
|
||||
ASAN_INTERCEPT_FUNC(strnlen);
|
||||
#if ASAN_INTERCEPT___STRDUP
|
||||
ASAN_INTERCEPT_FUNC(__strdup);
|
||||
#endif
|
||||
#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
|
||||
ASAN_INTERCEPT_FUNC(index);
|
||||
|
@ -23,14 +23,12 @@
|
||||
#if !SANITIZER_WINDOWS
|
||||
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
|
||||
# define ASAN_INTERCEPT__LONGJMP 1
|
||||
# define ASAN_INTERCEPT_STRDUP 1
|
||||
# define ASAN_INTERCEPT_INDEX 1
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
|
||||
# define ASAN_INTERCEPT_FORK 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
|
||||
# define ASAN_INTERCEPT__LONGJMP 0
|
||||
# define ASAN_INTERCEPT_STRDUP 0
|
||||
# define ASAN_INTERCEPT_INDEX 0
|
||||
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
|
||||
# define ASAN_INTERCEPT_FORK 0
|
||||
@ -42,12 +40,6 @@
|
||||
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
|
||||
#endif
|
||||
|
||||
#if !SANITIZER_MAC
|
||||
# define ASAN_INTERCEPT_STRNLEN 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT_STRNLEN 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
# define ASAN_INTERCEPT_SWAPCONTEXT 1
|
||||
#else
|
||||
@ -80,6 +72,12 @@
|
||||
# define ASAN_INTERCEPT___CXA_ATEXIT 0
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
# define ASAN_INTERCEPT___STRDUP 1
|
||||
#else
|
||||
# define ASAN_INTERCEPT___STRDUP 0
|
||||
#endif
|
||||
|
||||
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
|
||||
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
|
||||
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
|
||||
|
@ -54,8 +54,17 @@ extern "C" {
|
||||
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
|
||||
__asan_global_source_location *location; // Source location of a global,
|
||||
// or NULL if it is unknown.
|
||||
uptr odr_indicator; // The address of the ODR indicator symbol.
|
||||
};
|
||||
|
||||
// These functions can be called on some platforms to find globals in the same
|
||||
// loaded image as `flag' and apply __asan_(un)register_globals to them,
|
||||
// filtering out redundant calls.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_register_image_globals(uptr *flag);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __asan_unregister_image_globals(uptr *flag);
|
||||
|
||||
// These two functions should be called by the instrumented code.
|
||||
// 'globals' is an array of structures describing 'n' globals.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
|
@ -36,9 +36,9 @@
|
||||
// If set, values like allocator chunk size, as well as defaults for some flags
|
||||
// will be changed towards less memory overhead.
|
||||
#ifndef ASAN_LOW_MEMORY
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
|
||||
# define ASAN_LOW_MEMORY 1
|
||||
#else
|
||||
# else
|
||||
# define ASAN_LOW_MEMORY 0
|
||||
# endif
|
||||
#endif
|
||||
@ -62,6 +62,9 @@ using __sanitizer::StackTrace;
|
||||
|
||||
void AsanInitFromRtl();
|
||||
|
||||
// asan_win.cc
|
||||
void InitializePlatformExceptionHandlers();
|
||||
|
||||
// asan_rtl.cc
|
||||
void NORETURN ShowStatsAndAbort();
|
||||
|
||||
@ -73,6 +76,13 @@ void *AsanDoesNotSupportStaticLinkage();
|
||||
void AsanCheckDynamicRTPrereqs();
|
||||
void AsanCheckIncompatibleRT();
|
||||
|
||||
// Support function for __asan_(un)register_image_globals. Searches for the
|
||||
// loaded image containing `needle' and then enumerates all global metadata
|
||||
// structures declared in that image, applying `op' (e.g.,
|
||||
// __asan_(un)register_globals) to them.
|
||||
typedef void (*globals_op_fptr)(__asan_global *, uptr);
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle);
|
||||
|
||||
void AsanOnDeadlySignal(int, void *siginfo, void *context);
|
||||
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
|
||||
@ -95,16 +105,24 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
|
||||
bool PlatformHasDifferentMemcpyAndMemmove();
|
||||
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
|
||||
(PlatformHasDifferentMemcpyAndMemmove())
|
||||
#elif SANITIZER_WINDOWS64
|
||||
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
|
||||
#else
|
||||
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
|
||||
#endif // SANITIZER_MAC
|
||||
|
||||
// Add convenient macro for interface functions that may be represented as
|
||||
// weak hooks.
|
||||
#define ASAN_MALLOC_HOOK(ptr, size) \
|
||||
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size)
|
||||
#define ASAN_FREE_HOOK(ptr) \
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr)
|
||||
#define ASAN_MALLOC_HOOK(ptr, size) \
|
||||
do { \
|
||||
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \
|
||||
RunMallocHooks(ptr, size); \
|
||||
} while (false)
|
||||
#define ASAN_FREE_HOOK(ptr) \
|
||||
do { \
|
||||
if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \
|
||||
RunFreeHooks(ptr); \
|
||||
} while (false)
|
||||
#define ASAN_ON_ERROR() \
|
||||
if (&__asan_on_error) __asan_on_error()
|
||||
|
||||
@ -112,7 +130,6 @@ extern int asan_inited;
|
||||
// Used to avoid infinite recursion in __asan_init().
|
||||
extern bool asan_init_is_running;
|
||||
extern void (*death_callback)(void);
|
||||
|
||||
// These magic values are written to shadow for better error reporting.
|
||||
const int kAsanHeapLeftRedzoneMagic = 0xfa;
|
||||
const int kAsanHeapRightRedzoneMagic = 0xfb;
|
||||
|
@ -69,12 +69,17 @@ asan_rt_version_t __asan_rt_version;
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
|
||||
void *AsanDoesNotSupportStaticLinkage() {
|
||||
// This will fail to link with -static.
|
||||
return &_DYNAMIC; // defined in link.h
|
||||
}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
// FIXME: should we do anything for Android?
|
||||
void AsanCheckDynamicRTPrereqs() {}
|
||||
|
@ -24,9 +24,11 @@
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mac.h"
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <fcntl.h>
|
||||
#include <libkern/OSAtomic.h>
|
||||
#include <mach-o/dyld.h>
|
||||
#include <mach-o/getsect.h>
|
||||
#include <mach-o/loader.h>
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h> // for free()
|
||||
@ -36,9 +38,16 @@
|
||||
#include <sys/ucontext.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// from <crt_externs.h>, but we don't have that file on iOS
|
||||
extern "C" {
|
||||
extern char ***_NSGetArgv(void);
|
||||
extern char ***_NSGetEnviron(void);
|
||||
}
|
||||
|
||||
namespace __asan {
|
||||
|
||||
void InitializePlatformInterceptors() {}
|
||||
void InitializePlatformExceptionHandlers() {}
|
||||
|
||||
bool PlatformHasDifferentMemcpyAndMemmove() {
|
||||
// On OS X 10.7 memcpy() and memmove() are both resolved
|
||||
@ -60,6 +69,30 @@ void AsanCheckDynamicRTPrereqs() {}
|
||||
// No-op. Mac does not support static linkage anyway.
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
// Find the Mach-O header for the image containing the needle
|
||||
Dl_info info;
|
||||
int err = dladdr(needle, &info);
|
||||
if (err == 0) return;
|
||||
|
||||
#if __LP64__
|
||||
const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase;
|
||||
#else
|
||||
const struct mach_header *mh = (struct mach_header *)info.dli_fbase;
|
||||
#endif
|
||||
|
||||
// Look up the __asan_globals section in that image and register its globals
|
||||
unsigned long size = 0;
|
||||
__asan_global *globals = (__asan_global *)getsectiondata(
|
||||
mh,
|
||||
"__DATA", "__asan_globals",
|
||||
&size);
|
||||
|
||||
if (!globals) return;
|
||||
if (size % sizeof(__asan_global) != 0) return;
|
||||
op(globals, size / sizeof(__asan_global));
|
||||
}
|
||||
|
||||
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
@ -26,52 +26,58 @@
|
||||
// ---------------------- Replacement functions ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
static const uptr kCallocPoolSize = 1024;
|
||||
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
|
||||
static uptr allocated_for_dlsym;
|
||||
static const uptr kDlsymAllocPoolSize = 1024;
|
||||
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
|
||||
|
||||
static bool IsInCallocPool(const void *ptr) {
|
||||
sptr off = (sptr)ptr - (sptr)calloc_memory_for_dlsym;
|
||||
return 0 <= off && off < (sptr)kCallocPoolSize;
|
||||
static bool IsInDlsymAllocPool(const void *ptr) {
|
||||
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
return off < sizeof(alloc_memory_for_dlsym);
|
||||
}
|
||||
|
||||
static void *AllocateFromLocalPool(uptr size_in_bytes) {
|
||||
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
|
||||
void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym];
|
||||
allocated_for_dlsym += size_in_words;
|
||||
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, free, void *ptr) {
|
||||
GET_STACK_TRACE_FREE;
|
||||
if (UNLIKELY(IsInCallocPool(ptr)))
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
return;
|
||||
asan_free(ptr, &stack, FROM_MALLOC);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void, cfree, void *ptr) {
|
||||
GET_STACK_TRACE_FREE;
|
||||
if (UNLIKELY(IsInCallocPool(ptr)))
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
|
||||
return;
|
||||
asan_free(ptr, &stack, FROM_MALLOC);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, malloc, uptr size) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
|
||||
return AllocateFromLocalPool(size);
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_malloc(size, &stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
|
||||
if (UNLIKELY(!asan_inited)) {
|
||||
if (UNLIKELY(!asan_inited))
|
||||
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
|
||||
static uptr allocated;
|
||||
uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
|
||||
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
|
||||
allocated += size_in_words;
|
||||
CHECK(allocated < kCallocPoolSize);
|
||||
return mem;
|
||||
}
|
||||
return AllocateFromLocalPool(nmemb * size);
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
return asan_calloc(nmemb, size, &stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
if (UNLIKELY(IsInCallocPool(ptr))) {
|
||||
uptr offset = (uptr)ptr - (uptr)calloc_memory_for_dlsym;
|
||||
uptr copy_size = Min(size, kCallocPoolSize - offset);
|
||||
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
|
||||
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
|
||||
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
|
||||
void *new_ptr = asan_malloc(size, &stack);
|
||||
internal_memcpy(new_ptr, ptr, copy_size);
|
||||
return new_ptr;
|
||||
@ -92,7 +98,7 @@ INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
|
||||
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
|
||||
DTLS_on_libc_memalign(res, size * boundary);
|
||||
DTLS_on_libc_memalign(res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,8 @@
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
#if SANITIZER_WINDOWS
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
|
||||
#include "asan_allocator.h"
|
||||
#include "asan_interceptors.h"
|
||||
@ -48,6 +50,11 @@ void _free_dbg(void *ptr, int) {
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void _free_base(void *ptr) {
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void cfree(void *ptr) {
|
||||
CHECK(!"cfree() should not be used on Windows");
|
||||
@ -59,6 +66,11 @@ void *malloc(size_t size) {
|
||||
return asan_malloc(size, &stack);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_malloc_base(size_t size) {
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_malloc_dbg(size_t size, int, const char *, int) {
|
||||
return malloc(size);
|
||||
@ -70,6 +82,11 @@ void *calloc(size_t nmemb, size_t size) {
|
||||
return asan_calloc(nmemb, size, &stack);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_calloc_base(size_t nmemb, size_t size) {
|
||||
return calloc(nmemb, size);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
|
||||
return calloc(nmemb, size);
|
||||
@ -92,6 +109,11 @@ void *_realloc_dbg(void *ptr, size_t size, int) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_realloc_base(void *ptr, size_t size) {
|
||||
return realloc(ptr, size);
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
void *_recalloc(void *p, size_t n, size_t elem_size) {
|
||||
if (!p)
|
||||
@ -103,7 +125,7 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
|
||||
}
|
||||
|
||||
ALLOCATION_FUNCTION_ATTRIBUTE
|
||||
size_t _msize(void *ptr) {
|
||||
size_t _msize(const void *ptr) {
|
||||
GET_CURRENT_PC_BP_SP;
|
||||
(void)sp;
|
||||
return asan_malloc_usable_size(ptr, pc, bp);
|
||||
@ -139,38 +161,89 @@ int _CrtSetReportMode(int, int) {
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
|
||||
SIZE_T dwBytes) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
void *p = asan_malloc(dwBytes, &stack);
|
||||
// Reading MSDN suggests that the *entire* usable allocation is zeroed out.
|
||||
// Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY.
|
||||
// https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083
|
||||
if (dwFlags == HEAP_ZERO_MEMORY)
|
||||
internal_memset(p, 0, asan_mz_size(p));
|
||||
else
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
return p;
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) {
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
GET_STACK_TRACE_FREE;
|
||||
asan_free(lpMem, &stack, FROM_MALLOC);
|
||||
return true;
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags,
|
||||
LPVOID lpMem, SIZE_T dwBytes) {
|
||||
GET_STACK_TRACE_MALLOC;
|
||||
// Realloc should never reallocate in place.
|
||||
if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY)
|
||||
return nullptr;
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
return asan_realloc(lpMem, dwBytes, &stack);
|
||||
}
|
||||
|
||||
INTERCEPTOR_WINAPI(SIZE_T, HeapSize, HANDLE hHeap, DWORD dwFlags,
|
||||
LPCVOID lpMem) {
|
||||
CHECK(dwFlags == 0 && "unsupported heap flags");
|
||||
GET_CURRENT_PC_BP_SP;
|
||||
(void)sp;
|
||||
return asan_malloc_usable_size(lpMem, pc, bp);
|
||||
}
|
||||
|
||||
namespace __asan {
|
||||
|
||||
static void TryToOverrideFunction(const char *fname, uptr new_func) {
|
||||
// Failure here is not fatal. The CRT may not be present, and different CRT
|
||||
// versions use different symbols.
|
||||
if (!__interception::OverrideFunction(fname, new_func))
|
||||
VPrintf(2, "Failed to override function %s\n", fname);
|
||||
}
|
||||
|
||||
void ReplaceSystemMalloc() {
|
||||
#if defined(ASAN_DYNAMIC)
|
||||
// We don't check the result because CRT might not be used in the process.
|
||||
__interception::OverrideFunction("free", (uptr)free);
|
||||
__interception::OverrideFunction("malloc", (uptr)malloc);
|
||||
__interception::OverrideFunction("_malloc_crt", (uptr)malloc);
|
||||
__interception::OverrideFunction("calloc", (uptr)calloc);
|
||||
__interception::OverrideFunction("_calloc_crt", (uptr)calloc);
|
||||
__interception::OverrideFunction("realloc", (uptr)realloc);
|
||||
__interception::OverrideFunction("_realloc_crt", (uptr)realloc);
|
||||
__interception::OverrideFunction("_recalloc", (uptr)_recalloc);
|
||||
__interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc);
|
||||
__interception::OverrideFunction("_msize", (uptr)_msize);
|
||||
__interception::OverrideFunction("_expand", (uptr)_expand);
|
||||
TryToOverrideFunction("free", (uptr)free);
|
||||
TryToOverrideFunction("_free_base", (uptr)free);
|
||||
TryToOverrideFunction("malloc", (uptr)malloc);
|
||||
TryToOverrideFunction("_malloc_base", (uptr)malloc);
|
||||
TryToOverrideFunction("_malloc_crt", (uptr)malloc);
|
||||
TryToOverrideFunction("calloc", (uptr)calloc);
|
||||
TryToOverrideFunction("_calloc_base", (uptr)calloc);
|
||||
TryToOverrideFunction("_calloc_crt", (uptr)calloc);
|
||||
TryToOverrideFunction("realloc", (uptr)realloc);
|
||||
TryToOverrideFunction("_realloc_base", (uptr)realloc);
|
||||
TryToOverrideFunction("_realloc_crt", (uptr)realloc);
|
||||
TryToOverrideFunction("_recalloc", (uptr)_recalloc);
|
||||
TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
|
||||
TryToOverrideFunction("_msize", (uptr)_msize);
|
||||
TryToOverrideFunction("_expand", (uptr)_expand);
|
||||
TryToOverrideFunction("_expand_base", (uptr)_expand);
|
||||
|
||||
// Override different versions of 'operator new' and 'operator delete'.
|
||||
// No need to override the nothrow versions as they just wrap the throw
|
||||
// versions.
|
||||
// FIXME: Unfortunately, MSVC miscompiles the statements that take the
|
||||
// addresses of the array versions of these operators,
|
||||
// see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992
|
||||
// We might want to try to work around this by [inline] assembly or compiling
|
||||
// parts of the RTL with Clang.
|
||||
void *(*op_new)(size_t sz) = operator new;
|
||||
void (*op_delete)(void *p) = operator delete;
|
||||
void *(*op_array_new)(size_t sz) = operator new[];
|
||||
void (*op_array_delete)(void *p) = operator delete[];
|
||||
__interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new);
|
||||
__interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete);
|
||||
__interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new);
|
||||
__interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete);
|
||||
// Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which
|
||||
// enable cross-module inlining. This means our _malloc_base hook won't catch
|
||||
// all CRT allocations. This code here patches the import table of
|
||||
// ucrtbase.dll so that all attempts to use the lower-level win32 heap
|
||||
// allocation API will be directed to ASan's heap. We don't currently
|
||||
// intercept all calls to HeapAlloc. If we did, we would have to check on
|
||||
// HeapFree whether the pointer came from ASan of from the system.
|
||||
#define INTERCEPT_UCRT_FUNCTION(func) \
|
||||
if (!INTERCEPT_FUNCTION_DLLIMPORT("ucrtbase.dll", \
|
||||
"api-ms-win-core-heap-l1-1-0.dll", func)) \
|
||||
VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapAlloc);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapFree);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapReAlloc);
|
||||
INTERCEPT_UCRT_FUNCTION(HeapSize);
|
||||
#undef INTERCEPT_UCRT_FUNCTION
|
||||
#endif
|
||||
}
|
||||
} // namespace __asan
|
||||
|
@ -87,6 +87,20 @@
|
||||
// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
|
||||
// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
|
||||
//
|
||||
// Default Linux/S390 mapping:
|
||||
// || `[0x30000000, 0x7fffffff]` || HighMem ||
|
||||
// || `[0x26000000, 0x2fffffff]` || HighShadow ||
|
||||
// || `[0x24000000, 0x25ffffff]` || ShadowGap ||
|
||||
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
|
||||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
|
||||
//
|
||||
// Default Linux/SystemZ mapping:
|
||||
// || `[0x14000000000000, 0x1fffffffffffff]` || HighMem ||
|
||||
// || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow ||
|
||||
// || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap ||
|
||||
// || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow ||
|
||||
// || `[0x00000000000000, 0x0fffffffffffff]` || LowMem ||
|
||||
//
|
||||
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
|
||||
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
|
||||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
|
||||
@ -115,16 +129,18 @@ static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
|
||||
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
|
||||
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
|
||||
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kIosShadowOffset64 = 0x130000000;
|
||||
static const u64 kIosShadowOffset64 = 0x120200000;
|
||||
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
|
||||
static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
|
||||
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
|
||||
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
|
||||
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
|
||||
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
|
||||
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
|
||||
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
|
||||
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
|
||||
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
|
||||
|
||||
#define SHADOW_SCALE kDefaultShadowScale
|
||||
|
||||
@ -138,28 +154,36 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
|
||||
# elif SANITIZER_WINDOWS
|
||||
# define SHADOW_OFFSET kWindowsShadowOffset32
|
||||
# elif SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset32
|
||||
# elif SANITIZER_IOS
|
||||
# define SHADOW_OFFSET kIosShadowOffset32
|
||||
# if SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset32
|
||||
# else
|
||||
# define SHADOW_OFFSET kIosShadowOffset32
|
||||
# endif
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset32
|
||||
# endif
|
||||
#else
|
||||
# if defined(__aarch64__)
|
||||
# if SANITIZER_IOS
|
||||
# if SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset64
|
||||
# else
|
||||
# define SHADOW_OFFSET kIosShadowOffset64
|
||||
# endif
|
||||
# elif defined(__aarch64__)
|
||||
# define SHADOW_OFFSET kAArch64_ShadowOffset64
|
||||
# elif defined(__powerpc64__)
|
||||
# define SHADOW_OFFSET kPPC64_ShadowOffset64
|
||||
# elif defined(__s390x__)
|
||||
# define SHADOW_OFFSET kSystemZ_ShadowOffset64
|
||||
# elif SANITIZER_FREEBSD
|
||||
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
|
||||
# elif SANITIZER_MAC
|
||||
# define SHADOW_OFFSET kDefaultShadowOffset64
|
||||
# elif defined(__mips64)
|
||||
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
|
||||
# elif SANITIZER_IOSSIM
|
||||
# define SHADOW_OFFSET kIosSimShadowOffset64
|
||||
# elif SANITIZER_IOS
|
||||
# define SHADOW_OFFSET kIosShadowOffset64
|
||||
# elif SANITIZER_WINDOWS64
|
||||
# define SHADOW_OFFSET kWindowsShadowOffset64
|
||||
# else
|
||||
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
|
||||
# endif
|
||||
|
100
contrib/compiler-rt/lib/asan/asan_memory_profile.cc
Normal file
100
contrib/compiler-rt/lib/asan/asan_memory_profile.cc
Normal file
@ -0,0 +1,100 @@
|
||||
//===-- asan_memory_profile.cc.cc -----------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// This file implements __sanitizer_print_memory_profile.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_stacktrace.h"
|
||||
#include "sanitizer_common/sanitizer_stoptheworld.h"
|
||||
#include "lsan/lsan_common.h"
|
||||
#include "asan/asan_allocator.h"
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
|
||||
namespace __asan {
|
||||
|
||||
struct AllocationSite {
|
||||
u32 id;
|
||||
uptr total_size;
|
||||
uptr count;
|
||||
};
|
||||
|
||||
class HeapProfile {
|
||||
public:
|
||||
HeapProfile() : allocations_(1024) {}
|
||||
void Insert(u32 id, uptr size) {
|
||||
total_allocated_ += size;
|
||||
total_count_++;
|
||||
// Linear lookup will be good enough for most cases (although not all).
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
if (allocations_[i].id == id) {
|
||||
allocations_[i].total_size += size;
|
||||
allocations_[i].count++;
|
||||
return;
|
||||
}
|
||||
}
|
||||
allocations_.push_back({id, size, 1});
|
||||
}
|
||||
|
||||
void Print(uptr top_percent) {
|
||||
InternalSort(&allocations_, allocations_.size(),
|
||||
[](const AllocationSite &a, const AllocationSite &b) {
|
||||
return a.total_size > b.total_size;
|
||||
});
|
||||
CHECK(total_allocated_);
|
||||
uptr total_shown = 0;
|
||||
Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
|
||||
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
|
||||
for (uptr i = 0; i < allocations_.size(); i++) {
|
||||
auto &a = allocations_[i];
|
||||
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
|
||||
a.total_size * 100 / total_allocated_, a.count);
|
||||
StackDepotGet(a.id).Print();
|
||||
total_shown += a.total_size;
|
||||
if (total_shown * 100 / total_allocated_ > top_percent)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
uptr total_allocated_ = 0;
|
||||
uptr total_count_ = 0;
|
||||
InternalMmapVector<AllocationSite> allocations_;
|
||||
};
|
||||
|
||||
static void ChunkCallback(uptr chunk, void *arg) {
|
||||
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
|
||||
AsanChunkView cv = FindHeapChunkByAddress(chunk);
|
||||
if (!cv.IsAllocated()) return;
|
||||
u32 id = cv.GetAllocStackId();
|
||||
if (!id) return;
|
||||
hp->Insert(id, cv.UsedSize());
|
||||
}
|
||||
|
||||
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
|
||||
void *argument) {
|
||||
HeapProfile hp;
|
||||
__lsan::ForEachChunk(ChunkCallback, &hp);
|
||||
hp.Print(reinterpret_cast<uptr>(argument));
|
||||
}
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_print_memory_profile(uptr top_percent) {
|
||||
__sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
#endif // CAN_SANITIZE_LEAKS
|
@ -20,9 +20,25 @@
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
// C++ operators can't have visibility attributes on Windows.
|
||||
// C++ operators can't have dllexport attributes on Windows. We export them
|
||||
// anyway by passing extra -export flags to the linker, which is exactly that
|
||||
// dllexport would normally do. We need to export them in order to make the
|
||||
// VS2015 dynamic CRT (MD) work.
|
||||
#if SANITIZER_WINDOWS
|
||||
# define CXX_OPERATOR_ATTRIBUTE
|
||||
# ifdef _WIN64
|
||||
# pragma comment(linker, "/export:??2@YAPEAX_K@Z") // operator new
|
||||
# pragma comment(linker, "/export:??3@YAXPEAX@Z") // operator delete
|
||||
# pragma comment(linker, "/export:??3@YAXPEAX_K@Z") // sized operator delete
|
||||
# pragma comment(linker, "/export:??_U@YAPEAX_K@Z") // operator new[]
|
||||
# pragma comment(linker, "/export:??_V@YAXPEAX@Z") // operator delete[]
|
||||
# else
|
||||
# pragma comment(linker, "/export:??2@YAPAXI@Z") // operator new
|
||||
# pragma comment(linker, "/export:??3@YAXPAX@Z") // operator delete
|
||||
# pragma comment(linker, "/export:??3@YAXPAXI@Z") // sized operator delete
|
||||
# pragma comment(linker, "/export:??_U@YAPAXI@Z") // operator new[]
|
||||
# pragma comment(linker, "/export:??_V@YAXPAX@Z") // operator delete[]
|
||||
# endif
|
||||
#else
|
||||
# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
|
||||
#endif
|
||||
|
@ -343,7 +343,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
|
||||
&stack);
|
||||
}
|
||||
CHECK_LE(end - beg,
|
||||
FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
|
||||
FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
|
||||
|
||||
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
|
||||
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
|
||||
|
@ -36,14 +36,23 @@ namespace __asan {
|
||||
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
ScopedDeadlySignal signal_scope(GetCurrentThread());
|
||||
int code = (int)((siginfo_t*)siginfo)->si_code;
|
||||
// Write the first message using the bullet-proof write.
|
||||
if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die();
|
||||
// Write the first message using fd=2, just in case.
|
||||
// It may actually fail to write in case stderr is closed.
|
||||
internal_write(2, "ASAN:DEADLYSIGNAL\n", 18);
|
||||
SignalContext sig = SignalContext::Create(siginfo, context);
|
||||
|
||||
// Access at a reasonable offset above SP, or slightly below it (to account
|
||||
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
|
||||
// probably a stack overflow.
|
||||
#ifdef __s390__
|
||||
// On s390, the fault address in siginfo points to start of the page, not
|
||||
// to the precise word that was accessed. Mask off the low bits of sp to
|
||||
// take it into account.
|
||||
bool IsStackAccess = sig.addr >= (sig.sp & ~0xFFF) &&
|
||||
sig.addr < sig.sp + 0xFFFF;
|
||||
#else
|
||||
bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
|
||||
#endif
|
||||
|
||||
#if __powerpc__
|
||||
// Large stack frames can be allocated with e.g.
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "asan_internal.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "asan_report.h"
|
||||
#include "asan_scariness_score.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
@ -470,7 +471,7 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
|
||||
// previously. That's unfortunate, but I have no better solution,
|
||||
// especially given that the alloca may be from entirely different place
|
||||
// (e.g. use-after-scope, or different thread's stack).
|
||||
#if defined(__powerpc64__) && defined(__BIG_ENDIAN__)
|
||||
#if SANITIZER_PPC64V1
|
||||
// On PowerPC64 ELFv1, the address of a function actually points to a
|
||||
// three-doubleword data structure with the first field containing
|
||||
// the address of the function's code.
|
||||
@ -687,6 +688,9 @@ class ScopedInErrorReport {
|
||||
if (flags()->print_stats)
|
||||
__asan_print_accumulated_stats();
|
||||
|
||||
if (common_flags()->print_cmdline)
|
||||
PrintCmdline();
|
||||
|
||||
// Copy the message buffer so that we could start logging without holding a
|
||||
// lock that gets aquired during printing.
|
||||
InternalScopedBuffer<char> buffer_copy(kErrorMessageBufferSize);
|
||||
@ -732,10 +736,10 @@ class ScopedInErrorReport {
|
||||
};
|
||||
|
||||
StaticSpinMutex ScopedInErrorReport::lock_;
|
||||
u32 ScopedInErrorReport::reporting_thread_tid_;
|
||||
u32 ScopedInErrorReport::reporting_thread_tid_ = kInvalidTid;
|
||||
|
||||
void ReportStackOverflow(const SignalContext &sig) {
|
||||
ScopedInErrorReport in_report;
|
||||
ScopedInErrorReport in_report(/*report*/ nullptr, /*fatal*/ true);
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
@ -744,13 +748,14 @@ void ReportStackOverflow(const SignalContext &sig) {
|
||||
(void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
|
||||
GetCurrentTidOrInvalid());
|
||||
Printf("%s", d.EndWarning());
|
||||
ScarinessScore::PrintSimple(10, "stack-overflow");
|
||||
GET_STACK_TRACE_SIGNAL(sig);
|
||||
stack.Print();
|
||||
ReportErrorSummary("stack-overflow", &stack);
|
||||
}
|
||||
|
||||
void ReportDeadlySignal(const char *description, const SignalContext &sig) {
|
||||
ScopedInErrorReport in_report(/*report*/nullptr, /*fatal*/true);
|
||||
ScopedInErrorReport in_report(/*report*/ nullptr, /*fatal*/ true);
|
||||
Decorator d;
|
||||
Printf("%s", d.Warning());
|
||||
Report(
|
||||
@ -758,10 +763,32 @@ void ReportDeadlySignal(const char *description, const SignalContext &sig) {
|
||||
" (pc %p bp %p sp %p T%d)\n",
|
||||
description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
|
||||
(void *)sig.sp, GetCurrentTidOrInvalid());
|
||||
if (sig.pc < GetPageSizeCached()) {
|
||||
Report("Hint: pc points to the zero page.\n");
|
||||
}
|
||||
Printf("%s", d.EndWarning());
|
||||
ScarinessScore SS;
|
||||
if (sig.pc < GetPageSizeCached())
|
||||
Report("Hint: pc points to the zero page.\n");
|
||||
if (sig.is_memory_access) {
|
||||
const char *access_type =
|
||||
sig.write_flag == SignalContext::WRITE
|
||||
? "WRITE"
|
||||
: (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
|
||||
Report("The signal is caused by a %s memory access.\n", access_type);
|
||||
if (sig.addr < GetPageSizeCached()) {
|
||||
Report("Hint: address points to the zero page.\n");
|
||||
SS.Scare(10, "null-deref");
|
||||
} else if (sig.addr == sig.pc) {
|
||||
SS.Scare(60, "wild-jump");
|
||||
} else if (sig.write_flag == SignalContext::WRITE) {
|
||||
SS.Scare(30, "wild-addr-write");
|
||||
} else if (sig.write_flag == SignalContext::READ) {
|
||||
SS.Scare(20, "wild-addr-read");
|
||||
} else {
|
||||
SS.Scare(25, "wild-addr");
|
||||
}
|
||||
} else {
|
||||
SS.Scare(10, "signal");
|
||||
}
|
||||
SS.Print();
|
||||
GET_STACK_TRACE_SIGNAL(sig);
|
||||
stack.Print();
|
||||
MaybeDumpInstructionBytes(sig.pc);
|
||||
@ -781,13 +808,14 @@ void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
|
||||
ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
|
||||
Printf("%s", d.EndWarning());
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
ScarinessScore::PrintSimple(42, "double-free");
|
||||
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
DescribeHeapAddress(addr, 1);
|
||||
ReportErrorSummary("double-free", &stack);
|
||||
}
|
||||
|
||||
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
|
||||
void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
|
||||
BufferedStackTrace *free_stack) {
|
||||
ScopedInErrorReport in_report;
|
||||
Decorator d;
|
||||
@ -801,8 +829,9 @@ void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
|
||||
Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
|
||||
Printf(" size of the allocated type: %zd bytes;\n"
|
||||
" size of the deallocated type: %zd bytes.\n",
|
||||
asan_mz_size(reinterpret_cast<void*>(addr)), delete_size);
|
||||
alloc_size, delete_size);
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
ScarinessScore::PrintSimple(10, "new-delete-type-mismatch");
|
||||
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
DescribeHeapAddress(addr, 1);
|
||||
@ -822,6 +851,7 @@ void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
|
||||
curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
|
||||
Printf("%s", d.EndWarning());
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
ScarinessScore::PrintSimple(40, "bad-free");
|
||||
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
DescribeHeapAddress(addr, 1);
|
||||
@ -843,6 +873,7 @@ void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
|
||||
alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
|
||||
Printf("%s", d.EndWarning());
|
||||
CHECK_GT(free_stack->size, 0);
|
||||
ScarinessScore::PrintSimple(10, "alloc-dealloc-mismatch");
|
||||
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
|
||||
stack.Print();
|
||||
DescribeHeapAddress(addr, 1);
|
||||
@ -891,6 +922,7 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function,
|
||||
"memory ranges [%p,%p) and [%p, %p) overlap\n", \
|
||||
bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
|
||||
Printf("%s", d.EndWarning());
|
||||
ScarinessScore::PrintSimple(10, bug_type);
|
||||
stack->Print();
|
||||
DescribeAddress((uptr)offset1, length1, bug_type);
|
||||
DescribeAddress((uptr)offset2, length2, bug_type);
|
||||
@ -905,6 +937,7 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
|
||||
Printf("%s", d.Warning());
|
||||
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
|
||||
Printf("%s", d.EndWarning());
|
||||
ScarinessScore::PrintSimple(10, bug_type);
|
||||
stack->Print();
|
||||
DescribeAddress(offset, size, bug_type);
|
||||
ReportErrorSummary(bug_type, stack);
|
||||
@ -979,10 +1012,10 @@ static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
|
||||
uptr a2 = reinterpret_cast<uptr>(p2);
|
||||
AsanChunkView chunk1 = FindHeapChunkByAddress(a1);
|
||||
AsanChunkView chunk2 = FindHeapChunkByAddress(a2);
|
||||
bool valid1 = chunk1.IsValid();
|
||||
bool valid2 = chunk2.IsValid();
|
||||
if ((valid1 != valid2) || (valid1 && valid2 && !chunk1.Eq(chunk2))) {
|
||||
GET_CALLER_PC_BP_SP; \
|
||||
bool valid1 = chunk1.IsAllocated();
|
||||
bool valid2 = chunk2.IsAllocated();
|
||||
if (!valid1 || !valid2 || !chunk1.Eq(chunk2)) {
|
||||
GET_CALLER_PC_BP_SP;
|
||||
return ReportInvalidPointerPair(pc, bp, sp, a1, a2);
|
||||
}
|
||||
}
|
||||
@ -1013,10 +1046,34 @@ static bool SuppressErrorReport(uptr pc) {
|
||||
Die();
|
||||
}
|
||||
|
||||
static void PrintContainerOverflowHint() {
|
||||
Printf("HINT: if you don't care about these errors you may set "
|
||||
"ASAN_OPTIONS=detect_container_overflow=0.\n"
|
||||
"If you suspect a false positive see also: "
|
||||
"https://github.com/google/sanitizers/wiki/"
|
||||
"AddressSanitizerContainerOverflow.\n");
|
||||
}
|
||||
|
||||
static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
|
||||
return s[-1] > 127 && s[1] > 127;
|
||||
}
|
||||
|
||||
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
uptr access_size, u32 exp, bool fatal) {
|
||||
if (!fatal && SuppressErrorReport(pc)) return;
|
||||
ENABLE_FRAME_POINTER;
|
||||
ScarinessScore SS;
|
||||
|
||||
if (access_size) {
|
||||
if (access_size <= 9) {
|
||||
char desr[] = "?-byte";
|
||||
desr[0] = '0' + access_size;
|
||||
SS.Scare(access_size + access_size / 2, desr);
|
||||
} else if (access_size >= 10) {
|
||||
SS.Scare(15, "multi-byte");
|
||||
}
|
||||
is_write ? SS.Scare(20, "write") : SS.Scare(1, "read");
|
||||
}
|
||||
|
||||
// Optimization experiments.
|
||||
// The experiments can be used to evaluate potential optimizations that remove
|
||||
@ -1029,6 +1086,7 @@ void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
|
||||
// Determine the error type.
|
||||
const char *bug_descr = "unknown-crash";
|
||||
u8 shadow_val = 0;
|
||||
if (AddrIsInMem(addr)) {
|
||||
u8 *shadow_addr = (u8*)MemToShadow(addr);
|
||||
// If we are accessing 16 bytes, look at the second shadow byte.
|
||||
@ -1037,49 +1095,76 @@ void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
// If we are in the partial right redzone, look at the next shadow byte.
|
||||
if (*shadow_addr > 0 && *shadow_addr < 128)
|
||||
shadow_addr++;
|
||||
switch (*shadow_addr) {
|
||||
bool far_from_bounds = false;
|
||||
shadow_val = *shadow_addr;
|
||||
int bug_type_score = 0;
|
||||
// For use-after-frees reads are almost as bad as writes.
|
||||
int read_after_free_bonus = 0;
|
||||
switch (shadow_val) {
|
||||
case kAsanHeapLeftRedzoneMagic:
|
||||
case kAsanHeapRightRedzoneMagic:
|
||||
case kAsanArrayCookieMagic:
|
||||
bug_descr = "heap-buffer-overflow";
|
||||
bug_type_score = 10;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanHeapFreeMagic:
|
||||
bug_descr = "heap-use-after-free";
|
||||
bug_type_score = 20;
|
||||
if (!is_write) read_after_free_bonus = 18;
|
||||
break;
|
||||
case kAsanStackLeftRedzoneMagic:
|
||||
bug_descr = "stack-buffer-underflow";
|
||||
bug_type_score = 25;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanInitializationOrderMagic:
|
||||
bug_descr = "initialization-order-fiasco";
|
||||
bug_type_score = 1;
|
||||
break;
|
||||
case kAsanStackMidRedzoneMagic:
|
||||
case kAsanStackRightRedzoneMagic:
|
||||
case kAsanStackPartialRedzoneMagic:
|
||||
bug_descr = "stack-buffer-overflow";
|
||||
bug_type_score = 25;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanStackAfterReturnMagic:
|
||||
bug_descr = "stack-use-after-return";
|
||||
bug_type_score = 30;
|
||||
if (!is_write) read_after_free_bonus = 18;
|
||||
break;
|
||||
case kAsanUserPoisonedMemoryMagic:
|
||||
bug_descr = "use-after-poison";
|
||||
bug_type_score = 20;
|
||||
break;
|
||||
case kAsanContiguousContainerOOBMagic:
|
||||
bug_descr = "container-overflow";
|
||||
bug_type_score = 10;
|
||||
break;
|
||||
case kAsanStackUseAfterScopeMagic:
|
||||
bug_descr = "stack-use-after-scope";
|
||||
bug_type_score = 10;
|
||||
break;
|
||||
case kAsanGlobalRedzoneMagic:
|
||||
bug_descr = "global-buffer-overflow";
|
||||
bug_type_score = 10;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
case kAsanIntraObjectRedzone:
|
||||
bug_descr = "intra-object-overflow";
|
||||
bug_type_score = 10;
|
||||
break;
|
||||
case kAsanAllocaLeftMagic:
|
||||
case kAsanAllocaRightMagic:
|
||||
bug_descr = "dynamic-stack-buffer-overflow";
|
||||
bug_type_score = 25;
|
||||
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
|
||||
break;
|
||||
}
|
||||
SS.Scare(bug_type_score + read_after_free_bonus, bug_descr);
|
||||
if (far_from_bounds)
|
||||
SS.Scare(10, "far-from-bounds");
|
||||
}
|
||||
|
||||
ReportData report = { pc, sp, bp, addr, (bool)is_write, access_size,
|
||||
@ -1102,10 +1187,13 @@ void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)),
|
||||
d.EndAccess());
|
||||
|
||||
SS.Print();
|
||||
GET_STACK_TRACE_FATAL(pc, bp);
|
||||
stack.Print();
|
||||
|
||||
DescribeAddress(addr, access_size, bug_descr);
|
||||
if (shadow_val == kAsanContiguousContainerOOBMagic)
|
||||
PrintContainerOverflowHint();
|
||||
ReportErrorSummary(bug_descr, &stack);
|
||||
PrintShadowMemoryForAddress(addr);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
|
||||
uptr access_size, u32 exp, bool fatal);
|
||||
void ReportStackOverflow(const SignalContext &sig);
|
||||
void ReportDeadlySignal(const char *description, const SignalContext &sig);
|
||||
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
|
||||
void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
|
||||
BufferedStackTrace *free_stack);
|
||||
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
|
||||
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
|
||||
|
@ -86,8 +86,8 @@ void ShowStatsAndAbort() {
|
||||
// Reserve memory range [beg, end].
|
||||
// We need to use inclusive range because end+1 may not be representable.
|
||||
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
|
||||
CHECK_EQ((beg % GetPageSizeCached()), 0);
|
||||
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
|
||||
CHECK_EQ((beg % GetMmapGranularity()), 0);
|
||||
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
|
||||
uptr size = end - beg + 1;
|
||||
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
|
||||
void *res = MmapFixedNoReserve(beg, size, name);
|
||||
@ -320,26 +320,26 @@ static void InitializeHighMemEnd() {
|
||||
kHighMemEnd = GetMaxVirtualAddress();
|
||||
// Increase kHighMemEnd to make sure it's properly
|
||||
// aligned together with kHighMemBeg:
|
||||
kHighMemEnd |= SHADOW_GRANULARITY * GetPageSizeCached() - 1;
|
||||
kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
|
||||
#endif // !ASAN_FIXED_MAPPING
|
||||
CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
|
||||
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
|
||||
}
|
||||
|
||||
static void ProtectGap(uptr addr, uptr size) {
|
||||
if (!flags()->protect_shadow_gap)
|
||||
return;
|
||||
void *res = MmapNoAccess(addr, size, "shadow gap");
|
||||
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
// A few pages at the start of the address space can not be protected.
|
||||
// But we really want to protect as much as possible, to prevent this memory
|
||||
// being returned as a result of a non-FIXED mmap().
|
||||
if (addr == kZeroBaseShadowStart) {
|
||||
uptr step = GetPageSizeCached();
|
||||
uptr step = GetMmapGranularity();
|
||||
while (size > step && addr < kZeroBaseMaxShadowStart) {
|
||||
addr += step;
|
||||
size -= step;
|
||||
void *res = MmapNoAccess(addr, size, "shadow gap");
|
||||
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
}
|
||||
@ -415,10 +415,13 @@ static void AsanInitInternal() {
|
||||
|
||||
AsanCheckIncompatibleRT();
|
||||
AsanCheckDynamicRTPrereqs();
|
||||
AvoidCVE_2016_2143();
|
||||
|
||||
SetCanPoisonMemory(flags()->poison_heap);
|
||||
SetMallocContextSize(common_flags()->malloc_context_size);
|
||||
|
||||
InitializePlatformExceptionHandlers();
|
||||
|
||||
InitializeHighMemEnd();
|
||||
|
||||
// Make sure we are not statically linked.
|
||||
@ -462,6 +465,12 @@ static void AsanInitInternal() {
|
||||
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
|
||||
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
|
||||
}
|
||||
#elif SANITIZER_WINDOWS64
|
||||
// Disable the "mid mem" shadow layout.
|
||||
if (!full_shadow_is_available) {
|
||||
kMidMemBeg = 0;
|
||||
kMidMemEnd = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (Verbosity()) PrintAddressSpaceLayout();
|
||||
@ -539,12 +548,12 @@ static void AsanInitInternal() {
|
||||
force_interface_symbols(); // no-op.
|
||||
SanitizerInitializeUnwinder();
|
||||
|
||||
#if CAN_SANITIZE_LEAKS
|
||||
__lsan::InitCommonLsan();
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
__lsan::InitCommonLsan();
|
||||
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
|
||||
Atexit(__lsan::DoLeakCheck);
|
||||
}
|
||||
}
|
||||
#endif // CAN_SANITIZE_LEAKS
|
||||
|
||||
#if CAN_SANITIZE_UB
|
||||
__ubsan::InitAsPlugin();
|
||||
@ -552,6 +561,15 @@ static void AsanInitInternal() {
|
||||
|
||||
InitializeSuppressions();
|
||||
|
||||
if (CAN_SANITIZE_LEAKS) {
|
||||
// LateInitialize() calls dlsym, which can allocate an error string buffer
|
||||
// in the TLS. Let's ignore the allocation to avoid reporting a leak.
|
||||
__lsan::ScopedInterceptorDisabler disabler;
|
||||
Symbolizer::LateInitialize();
|
||||
} else {
|
||||
Symbolizer::LateInitialize();
|
||||
}
|
||||
|
||||
VReport(1, "AddressSanitizer Init done\n");
|
||||
}
|
||||
|
||||
|
67
contrib/compiler-rt/lib/asan/asan_scariness_score.h
Normal file
67
contrib/compiler-rt/lib/asan/asan_scariness_score.h
Normal file
@ -0,0 +1,67 @@
|
||||
//===-- asan_scariness_score.h ----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Compute the level of scariness of the error message.
|
||||
// Don't expect any deep science here, just a set of heuristics that suggest
|
||||
// that e.g. 1-byte-read-global-buffer-overflow is less scary than
|
||||
// 8-byte-write-stack-use-after-return.
|
||||
//
|
||||
// Every error report has one or more features, such as memory access size,
|
||||
// type (read or write), type of accessed memory (e.g. free-d heap, or a global
|
||||
// redzone), etc. Every such feature has an int score and a string description.
|
||||
// The overall score is the sum of all feature scores and the description
|
||||
// is a concatenation of feature descriptions.
|
||||
// Examples:
|
||||
// 17 (4-byte-read-heap-buffer-overflow)
|
||||
// 65 (multi-byte-write-stack-use-after-return)
|
||||
// 10 (null-deref)
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef ASAN_SCARINESS_SCORE_H
|
||||
#define ASAN_SCARINESS_SCORE_H
|
||||
|
||||
#include "asan_flags.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
|
||||
namespace __asan {
|
||||
class ScarinessScore {
|
||||
public:
|
||||
ScarinessScore() {
|
||||
descr[0] = 0;
|
||||
}
|
||||
void Scare(int add_to_score, const char *reason) {
|
||||
if (descr[0])
|
||||
internal_strlcat(descr, "-", sizeof(descr));
|
||||
internal_strlcat(descr, reason, sizeof(descr));
|
||||
score += add_to_score;
|
||||
};
|
||||
int GetScore() const { return score; }
|
||||
const char *GetDescription() const { return descr; }
|
||||
void Print() {
|
||||
if (score && flags()->print_scariness)
|
||||
Printf("SCARINESS: %d (%s)\n", score, descr);
|
||||
}
|
||||
static void PrintSimple(int score, const char *descr) {
|
||||
ScarinessScore SS;
|
||||
SS.Scare(score, descr);
|
||||
SS.Print();
|
||||
}
|
||||
|
||||
private:
|
||||
int score = 0;
|
||||
char descr[1024];
|
||||
};
|
||||
|
||||
} // namespace __asan
|
||||
|
||||
#endif // ASAN_SCARINESS_SCORE_H
|
@ -48,7 +48,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
|
||||
uptr stack_top = t->stack_top();
|
||||
uptr stack_bottom = t->stack_bottom();
|
||||
ScopedUnwinding unwind_scope(t);
|
||||
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
|
||||
if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
|
||||
stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom,
|
||||
fast);
|
||||
}
|
||||
} else if (!t && !fast) {
|
||||
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */
|
||||
stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
|
||||
|
@ -89,6 +89,7 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
|
||||
|
||||
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
|
||||
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
|
||||
CHECK(frames);
|
||||
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
|
||||
const char *function_name = cur->info.function;
|
||||
if (!function_name) {
|
||||
|
@ -120,6 +120,71 @@ void AsanThread::Destroy() {
|
||||
DTLS_Destroy();
|
||||
}
|
||||
|
||||
void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
|
||||
uptr size) {
|
||||
if (atomic_load(&stack_switching_, memory_order_relaxed)) {
|
||||
Report("ERROR: starting fiber switch while in fiber switch\n");
|
||||
Die();
|
||||
}
|
||||
|
||||
next_stack_bottom_ = bottom;
|
||||
next_stack_top_ = bottom + size;
|
||||
atomic_store(&stack_switching_, 1, memory_order_release);
|
||||
|
||||
FakeStack *current_fake_stack = fake_stack_;
|
||||
if (fake_stack_save)
|
||||
*fake_stack_save = fake_stack_;
|
||||
fake_stack_ = nullptr;
|
||||
SetTLSFakeStack(nullptr);
|
||||
// if fake_stack_save is null, the fiber will die, delete the fakestack
|
||||
if (!fake_stack_save && current_fake_stack)
|
||||
current_fake_stack->Destroy(this->tid());
|
||||
}
|
||||
|
||||
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
|
||||
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
|
||||
Report("ERROR: finishing a fiber switch that has not started\n");
|
||||
Die();
|
||||
}
|
||||
|
||||
if (fake_stack_save) {
|
||||
SetTLSFakeStack(fake_stack_save);
|
||||
fake_stack_ = fake_stack_save;
|
||||
}
|
||||
|
||||
stack_bottom_ = next_stack_bottom_;
|
||||
stack_top_ = next_stack_top_;
|
||||
atomic_store(&stack_switching_, 0, memory_order_release);
|
||||
next_stack_top_ = 0;
|
||||
next_stack_bottom_ = 0;
|
||||
}
|
||||
|
||||
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
|
||||
if (!atomic_load(&stack_switching_, memory_order_acquire))
|
||||
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
|
||||
char local;
|
||||
const uptr cur_stack = (uptr)&local;
|
||||
// Note: need to check next stack first, because FinishSwitchFiber
|
||||
// may be in process of overwriting stack_top_/bottom_. But in such case
|
||||
// we are already on the next stack.
|
||||
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
|
||||
return StackBounds{next_stack_bottom_, next_stack_top_}; // NOLINT
|
||||
return StackBounds{stack_bottom_, stack_top_}; // NOLINT
|
||||
}
|
||||
|
||||
uptr AsanThread::stack_top() {
|
||||
return GetStackBounds().top;
|
||||
}
|
||||
|
||||
uptr AsanThread::stack_bottom() {
|
||||
return GetStackBounds().bottom;
|
||||
}
|
||||
|
||||
uptr AsanThread::stack_size() {
|
||||
const auto bounds = GetStackBounds();
|
||||
return bounds.top - bounds.bottom;
|
||||
}
|
||||
|
||||
// We want to create the FakeStack lazyly on the first use, but not eralier
|
||||
// than the stack size is known and the procedure has to be async-signal safe.
|
||||
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
||||
@ -150,6 +215,8 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
||||
}
|
||||
|
||||
void AsanThread::Init() {
|
||||
next_stack_top_ = next_stack_bottom_ = 0;
|
||||
atomic_store(&stack_switching_, false, memory_order_release);
|
||||
fake_stack_ = nullptr; // Will be initialized lazily if needed.
|
||||
CHECK_EQ(this->stack_size(), 0U);
|
||||
SetThreadStackAndTls();
|
||||
@ -195,10 +262,12 @@ thread_return_t AsanThread::ThreadStart(
|
||||
|
||||
void AsanThread::SetThreadStackAndTls() {
|
||||
uptr tls_size = 0;
|
||||
GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size_, &tls_begin_,
|
||||
&tls_size);
|
||||
stack_top_ = stack_bottom_ + stack_size_;
|
||||
uptr stack_size = 0;
|
||||
GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
|
||||
const_cast<uptr *>(&stack_size), &tls_begin_, &tls_size);
|
||||
stack_top_ = stack_bottom_ + stack_size;
|
||||
tls_end_ = tls_begin_ + tls_size;
|
||||
dtls_ = DTLS_Get();
|
||||
|
||||
int local;
|
||||
CHECK(AddrIsInStack((uptr)&local));
|
||||
@ -249,6 +318,11 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AsanThread::AddrIsInStack(uptr addr) {
|
||||
const auto bounds = GetStackBounds();
|
||||
return addr >= bounds.bottom && addr < bounds.top;
|
||||
}
|
||||
|
||||
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
|
||||
void *addr) {
|
||||
AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
|
||||
@ -322,8 +396,8 @@ __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
|
||||
// --- Implementation of LSan-specific functions --- {{{1
|
||||
namespace __lsan {
|
||||
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
uptr *tls_begin, uptr *tls_end,
|
||||
uptr *cache_begin, uptr *cache_end) {
|
||||
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
|
||||
uptr *cache_end, DTLS **dtls) {
|
||||
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
|
||||
if (!t) return false;
|
||||
*stack_begin = t->stack_bottom();
|
||||
@ -333,6 +407,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
|
||||
// ASan doesn't keep allocator caches in TLS, so these are unused.
|
||||
*cache_begin = 0;
|
||||
*cache_end = 0;
|
||||
*dtls = t->dtls();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -355,3 +430,29 @@ void EnsureMainThreadIDIsCorrect() {
|
||||
__asan::EnsureMainThreadIDIsCorrect();
|
||||
}
|
||||
} // namespace __lsan
|
||||
|
||||
// ---------------------- Interface ---------------- {{{1
|
||||
using namespace __asan; // NOLINT
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
|
||||
uptr size) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
|
||||
return;
|
||||
}
|
||||
t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_finish_switch_fiber(void* fakestack) {
|
||||
AsanThread *t = GetCurrentThread();
|
||||
if (!t) {
|
||||
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
|
||||
return;
|
||||
}
|
||||
t->FinishSwitchFiber((FakeStack*)fakestack);
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,10 @@
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_thread_registry.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
struct DTLS;
|
||||
} // namespace __sanitizer
|
||||
|
||||
namespace __asan {
|
||||
|
||||
const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
|
||||
@ -62,11 +66,12 @@ class AsanThread {
|
||||
thread_return_t ThreadStart(uptr os_id,
|
||||
atomic_uintptr_t *signal_thread_is_registered);
|
||||
|
||||
uptr stack_top() { return stack_top_; }
|
||||
uptr stack_bottom() { return stack_bottom_; }
|
||||
uptr stack_size() { return stack_size_; }
|
||||
uptr stack_top();
|
||||
uptr stack_bottom();
|
||||
uptr stack_size();
|
||||
uptr tls_begin() { return tls_begin_; }
|
||||
uptr tls_end() { return tls_end_; }
|
||||
DTLS *dtls() { return dtls_; }
|
||||
u32 tid() { return context_->tid; }
|
||||
AsanThreadContext *context() { return context_; }
|
||||
void set_context(AsanThreadContext *context) { context_ = context; }
|
||||
@ -78,9 +83,7 @@ class AsanThread {
|
||||
};
|
||||
bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
|
||||
|
||||
bool AddrIsInStack(uptr addr) {
|
||||
return addr >= stack_bottom_ && addr < stack_top_;
|
||||
}
|
||||
bool AddrIsInStack(uptr addr);
|
||||
|
||||
void DeleteFakeStack(int tid) {
|
||||
if (!fake_stack_) return;
|
||||
@ -90,13 +93,19 @@ class AsanThread {
|
||||
t->Destroy(tid);
|
||||
}
|
||||
|
||||
void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
|
||||
void FinishSwitchFiber(FakeStack *fake_stack_save);
|
||||
|
||||
bool has_fake_stack() {
|
||||
return (reinterpret_cast<uptr>(fake_stack_) > 1);
|
||||
return !atomic_load(&stack_switching_, memory_order_relaxed) &&
|
||||
(reinterpret_cast<uptr>(fake_stack_) > 1);
|
||||
}
|
||||
|
||||
FakeStack *fake_stack() {
|
||||
if (!__asan_option_detect_stack_use_after_return)
|
||||
return nullptr;
|
||||
if (atomic_load(&stack_switching_, memory_order_relaxed))
|
||||
return nullptr;
|
||||
if (!has_fake_stack())
|
||||
return AsyncSignalSafeLazyInitFakeStack();
|
||||
return fake_stack_;
|
||||
@ -122,16 +131,27 @@ class AsanThread {
|
||||
void ClearShadowForThreadStackAndTLS();
|
||||
FakeStack *AsyncSignalSafeLazyInitFakeStack();
|
||||
|
||||
struct StackBounds {
|
||||
uptr bottom;
|
||||
uptr top;
|
||||
};
|
||||
StackBounds GetStackBounds() const;
|
||||
|
||||
AsanThreadContext *context_;
|
||||
thread_callback_t start_routine_;
|
||||
void *arg_;
|
||||
|
||||
uptr stack_top_;
|
||||
uptr stack_bottom_;
|
||||
// stack_size_ == stack_top_ - stack_bottom_;
|
||||
// It needs to be set in a async-signal-safe manner.
|
||||
uptr stack_size_;
|
||||
// these variables are used when the thread is about to switch stack
|
||||
uptr next_stack_top_;
|
||||
uptr next_stack_bottom_;
|
||||
// true if switching is in progress
|
||||
atomic_uint8_t stack_switching_;
|
||||
|
||||
uptr tls_begin_;
|
||||
uptr tls_end_;
|
||||
DTLS *dtls_;
|
||||
|
||||
FakeStack *fake_stack_;
|
||||
AsanThreadLocalMallocStorage malloc_storage_;
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "asan_report.h"
|
||||
#include "asan_stack.h"
|
||||
#include "asan_thread.h"
|
||||
#include "asan_mapping.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_mutex.h"
|
||||
|
||||
@ -46,11 +47,20 @@ void __sanitizer_default_free_hook(void *ptr) { }
|
||||
const char* __asan_default_default_options() { return ""; }
|
||||
const char* __asan_default_default_suppressions() { return ""; }
|
||||
void __asan_default_on_error() {}
|
||||
// 64-bit msvc will not prepend an underscore for symbols.
|
||||
#ifdef _WIN64
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_malloc_hook=__sanitizer_default_malloc_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__sanitizer_free_hook=__sanitizer_default_free_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_default_options=__asan_default_default_options") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_default_suppressions=__asan_default_default_suppressions") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:__asan_on_error=__asan_default_on_error") // NOLINT
|
||||
#else
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
|
||||
#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
|
||||
#endif
|
||||
// }}}
|
||||
} // extern "C"
|
||||
|
||||
@ -61,6 +71,17 @@ INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
|
||||
REAL(RaiseException)(a, b, c, d);
|
||||
}
|
||||
|
||||
|
||||
#ifdef _WIN64
|
||||
|
||||
INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT
|
||||
CHECK(REAL(__C_specific_handler));
|
||||
__asan_handle_no_return();
|
||||
return REAL(__C_specific_handler)(a, b, c, d);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
|
||||
CHECK(REAL(_except_handler3));
|
||||
__asan_handle_no_return();
|
||||
@ -76,6 +97,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
#endif
|
||||
|
||||
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
|
||||
AsanThread *t = (AsanThread*)arg;
|
||||
@ -139,8 +161,13 @@ namespace __asan {
|
||||
void InitializePlatformInterceptors() {
|
||||
ASAN_INTERCEPT_FUNC(CreateThread);
|
||||
ASAN_INTERCEPT_FUNC(RaiseException);
|
||||
|
||||
#ifdef _WIN64
|
||||
ASAN_INTERCEPT_FUNC(__C_specific_handler);
|
||||
#else
|
||||
ASAN_INTERCEPT_FUNC(_except_handler3);
|
||||
ASAN_INTERCEPT_FUNC(_except_handler4);
|
||||
#endif
|
||||
|
||||
// NtWaitForWorkViaWorkerFactory is always linked dynamically.
|
||||
CHECK(::__interception::OverrideFunction(
|
||||
@ -149,6 +176,10 @@ void InitializePlatformInterceptors() {
|
||||
(uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
|
||||
}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
// ---------------------- TSD ---------------- {{{
|
||||
static bool tsd_key_inited = false;
|
||||
|
||||
@ -194,6 +225,55 @@ void AsanOnDeadlySignal(int, void *siginfo, void *context) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
#if SANITIZER_WINDOWS64
|
||||
// Exception handler for dealing with shadow memory.
|
||||
static LONG CALLBACK
|
||||
ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
|
||||
static uptr page_size = GetPageSizeCached();
|
||||
static uptr alloc_granularity = GetMmapGranularity();
|
||||
// Only handle access violations.
|
||||
if (exception_pointers->ExceptionRecord->ExceptionCode !=
|
||||
EXCEPTION_ACCESS_VIOLATION) {
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
|
||||
// Only handle access violations that land within the shadow memory.
|
||||
uptr addr =
|
||||
(uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]);
|
||||
|
||||
// Check valid shadow range.
|
||||
if (!AddrIsInShadow(addr)) return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
// This is an access violation while trying to read from the shadow. Commit
|
||||
// the relevant page and let execution continue.
|
||||
|
||||
// Determine the address of the page that is being accessed.
|
||||
uptr page = RoundDownTo(addr, page_size);
|
||||
|
||||
// Query the existing page.
|
||||
MEMORY_BASIC_INFORMATION mem_info = {};
|
||||
if (::VirtualQuery((LPVOID)page, &mem_info, sizeof(mem_info)) == 0)
|
||||
return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
// Commit the page.
|
||||
uptr result =
|
||||
(uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE);
|
||||
if (result != page) return EXCEPTION_CONTINUE_SEARCH;
|
||||
|
||||
// The page mapping succeeded, so continue execution as usual.
|
||||
return EXCEPTION_CONTINUE_EXECUTION;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void InitializePlatformExceptionHandlers() {
|
||||
#if SANITIZER_WINDOWS64
|
||||
// On Win64, we map memory on demand with access violation handler.
|
||||
// Install our exception handler.
|
||||
CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler));
|
||||
#endif
|
||||
}
|
||||
|
||||
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
|
||||
|
||||
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
|
||||
@ -242,10 +322,16 @@ int __asan_set_seh_filter() {
|
||||
}
|
||||
|
||||
#if !ASAN_DYNAMIC
|
||||
// Put a pointer to __asan_set_seh_filter at the end of the global list
|
||||
// of C initializers, after the default EH is set by the CRT.
|
||||
#pragma section(".CRT$XIZ", long, read) // NOLINT
|
||||
__declspec(allocate(".CRT$XIZ"))
|
||||
// The CRT runs initializers in this order:
|
||||
// - C initializers, from XIA to XIZ
|
||||
// - C++ initializers, from XCA to XCZ
|
||||
// Prior to 2015, the CRT set the unhandled exception filter at priority XIY,
|
||||
// near the end of C initialization. Starting in 2015, it was moved to the
|
||||
// beginning of C++ initialization. We set our priority to XCAB to run
|
||||
// immediately after the CRT runs. This way, our exception filter is called
|
||||
// first and we can delegate to their filter if appropriate.
|
||||
#pragma section(".CRT$XCAB", long, read) // NOLINT
|
||||
__declspec(allocate(".CRT$XCAB"))
|
||||
int (*__intercept_seh)() = __asan_set_seh_filter;
|
||||
#endif
|
||||
// }}}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#ifdef ASAN_DLL_THUNK
|
||||
#include "asan_init_version.h"
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_common/sanitizer_platform_interceptors.h"
|
||||
|
||||
// ---------- Function interception helper functions and macros ----------- {{{1
|
||||
extern "C" {
|
||||
@ -335,6 +336,7 @@ INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
|
||||
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
|
||||
@ -342,21 +344,28 @@ INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
|
||||
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
|
||||
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
|
||||
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
|
||||
|
||||
// TODO(timurrrr): Add more interface functions on the as-needed basis.
|
||||
|
||||
// ----------------- Memory allocation functions ---------------------
|
||||
WRAP_V_W(free)
|
||||
WRAP_V_W(_free_base)
|
||||
WRAP_V_WW(_free_dbg)
|
||||
|
||||
WRAP_W_W(malloc)
|
||||
WRAP_W_W(_malloc_base)
|
||||
WRAP_W_WWWW(_malloc_dbg)
|
||||
|
||||
WRAP_W_WW(calloc)
|
||||
WRAP_W_WW(_calloc_base)
|
||||
WRAP_W_WWWWW(_calloc_dbg)
|
||||
WRAP_W_WWW(_calloc_impl)
|
||||
|
||||
WRAP_W_WW(realloc)
|
||||
WRAP_W_WW(_realloc_base)
|
||||
WRAP_W_WWW(_realloc_dbg)
|
||||
WRAP_W_WWW(_recalloc)
|
||||
|
||||
@ -371,6 +380,10 @@ WRAP_W_W(_expand_dbg)
|
||||
|
||||
INTERCEPT_LIBRARY_FUNCTION(atoi);
|
||||
INTERCEPT_LIBRARY_FUNCTION(atol);
|
||||
|
||||
#ifdef _WIN64
|
||||
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
|
||||
#else
|
||||
INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
|
||||
|
||||
// _except_handler4 checks -GS cookie which is different for each module, so we
|
||||
@ -379,10 +392,13 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
|
||||
__asan_handle_no_return();
|
||||
return REAL(_except_handler4)(a, b, c, d);
|
||||
}
|
||||
#endif
|
||||
|
||||
INTERCEPT_LIBRARY_FUNCTION(frexp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(longjmp);
|
||||
#if SANITIZER_INTERCEPT_MEMCHR
|
||||
INTERCEPT_LIBRARY_FUNCTION(memchr);
|
||||
#endif
|
||||
INTERCEPT_LIBRARY_FUNCTION(memcmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(memcpy);
|
||||
INTERCEPT_LIBRARY_FUNCTION(memmove);
|
||||
@ -392,12 +408,14 @@ INTERCEPT_LIBRARY_FUNCTION(strchr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
|
||||
INTERCEPT_LIBRARY_FUNCTION(strcspn);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strdup);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strlen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncat);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncmp);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strncpy);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strnlen);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strpbrk);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strrchr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strspn);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strstr);
|
||||
INTERCEPT_LIBRARY_FUNCTION(strtol);
|
||||
@ -407,7 +425,9 @@ INTERCEPT_LIBRARY_FUNCTION(wcslen);
|
||||
// is defined.
|
||||
void InterceptHooks() {
|
||||
INTERCEPT_HOOKS();
|
||||
#ifndef _WIN64
|
||||
INTERCEPT_FUNCTION(_except_handler4);
|
||||
#endif
|
||||
}
|
||||
|
||||
// We want to call __asan_init before C/C++ initializers/constructors are
|
||||
|
@ -29,7 +29,7 @@
|
||||
|
||||
// First, declare CRT sections we'll be using in this file
|
||||
#pragma section(".CRT$XID", long, read) // NOLINT
|
||||
#pragma section(".CRT$XIZ", long, read) // NOLINT
|
||||
#pragma section(".CRT$XCAB", long, read) // NOLINT
|
||||
#pragma section(".CRT$XTW", long, read) // NOLINT
|
||||
#pragma section(".CRT$XTY", long, read) // NOLINT
|
||||
|
||||
@ -93,7 +93,8 @@ static int SetSEHFilter() { return __asan_set_seh_filter(); }
|
||||
|
||||
// Unfortunately, putting a pointer to __asan_set_seh_filter into
|
||||
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
|
||||
__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
|
||||
__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
|
||||
SetSEHFilter;
|
||||
}
|
||||
|
||||
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__adddf3vfp)
|
||||
vmov r0, r1, d6 // move result back to r0/r1 pair
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__adddf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__addsf3vfp)
|
||||
vmov r0, s14 // move result back to r0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__addsf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -94,3 +94,5 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdrcmple)
|
||||
b __aeabi_cdcmple
|
||||
END_COMPILERRT_FUNCTION(__aeabi_cdrcmple)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -89,3 +89,5 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfrcmple)
|
||||
b __aeabi_cfcmple
|
||||
END_COMPILERRT_FUNCTION(__aeabi_cfrcmple)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -38,3 +38,6 @@ DEFINE_AEABI_DCMP(lt)
|
||||
DEFINE_AEABI_DCMP(le)
|
||||
DEFINE_AEABI_DCMP(ge)
|
||||
DEFINE_AEABI_DCMP(gt)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -38,3 +38,6 @@ DEFINE_AEABI_FCMP(lt)
|
||||
DEFINE_AEABI_FCMP(le)
|
||||
DEFINE_AEABI_FCMP(ge)
|
||||
DEFINE_AEABI_FCMP(gt)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -26,3 +26,6 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
|
||||
add sp, sp, #4
|
||||
pop { pc }
|
||||
END_COMPILERRT_FUNCTION(__aeabi_idivmod)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -29,3 +29,6 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
|
||||
add sp, sp, #16
|
||||
pop {r11, pc}
|
||||
END_COMPILERRT_FUNCTION(__aeabi_ldivmod)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
// void __aeabi_memcmp(void *dest, void *src, size_t n) { memcmp(dest, src, n); }
|
||||
|
||||
.syntax unified
|
||||
.p2align 2
|
||||
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcmp)
|
||||
b memcmp
|
||||
@ -19,4 +20,5 @@ END_COMPILERRT_FUNCTION(__aeabi_memcmp)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp4, __aeabi_memcmp)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp8, __aeabi_memcmp)
|
||||
|
||||
.section .note.GNU-stack,"",%progbits
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
// void __aeabi_memcpy(void *dest, void *src, size_t n) { memcpy(dest, src, n); }
|
||||
|
||||
.syntax unified
|
||||
.p2align 2
|
||||
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcpy)
|
||||
b memcpy
|
||||
@ -19,4 +20,5 @@ END_COMPILERRT_FUNCTION(__aeabi_memcpy)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy4, __aeabi_memcpy)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy8, __aeabi_memcpy)
|
||||
|
||||
.section .note.GNU-stack,"",%progbits
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -19,4 +19,5 @@ END_COMPILERRT_FUNCTION(__aeabi_memmove)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove4, __aeabi_memmove)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove8, __aeabi_memmove)
|
||||
|
||||
.section .note.GNU-stack,"",%progbits
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
// void __aeabi_memset(void *dest, size_t n, int c) { memset(dest, c, n); }
|
||||
// void __aeabi_memclr(void *dest, size_t n) { __aeabi_memset(dest, n, 0); }
|
||||
|
||||
.syntax unified
|
||||
.p2align 2
|
||||
DEFINE_COMPILERRT_FUNCTION(__aeabi_memset)
|
||||
mov r3, r1
|
||||
@ -32,4 +33,5 @@ END_COMPILERRT_FUNCTION(__aeabi_memclr)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr4, __aeabi_memclr)
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr8, __aeabi_memclr)
|
||||
|
||||
.section .note.GNU-stack,"",%progbits
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
|
||||
add sp, sp, #4
|
||||
pop { pc }
|
||||
END_COMPILERRT_FUNCTION(__aeabi_uidivmod)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -29,3 +29,6 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
|
||||
add sp, sp, #16
|
||||
pop {r11, pc}
|
||||
END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -45,3 +45,6 @@ DEFINE_COMPILERRT_FUNCTION(__bswapdi2)
|
||||
mov r1, r2 // r1 = r2 = rev(r0)
|
||||
JMP(lr)
|
||||
END_COMPILERRT_FUNCTION(__bswapdi2)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -37,3 +37,6 @@ DEFINE_COMPILERRT_FUNCTION(__bswapsi2)
|
||||
#endif
|
||||
JMP(lr)
|
||||
END_COMPILERRT_FUNCTION(__bswapsi2)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -95,3 +95,6 @@ DEFINE_COMPILERRT_FUNCTION(__clzdi2)
|
||||
JMP(lr)
|
||||
#endif // __ARM_FEATURE_CLZ
|
||||
END_COMPILERRT_FUNCTION(__clzdi2)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -74,3 +74,6 @@ DEFINE_COMPILERRT_FUNCTION(__clzsi2)
|
||||
JMP(lr)
|
||||
#endif // __ARM_FEATURE_CLZ
|
||||
END_COMPILERRT_FUNCTION(__clzsi2)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -146,3 +146,6 @@ DEFINE_COMPILERRT_FUNCTION(__unordsf2)
|
||||
END_COMPILERRT_FUNCTION(__unordsf2)
|
||||
|
||||
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fcmpun, __unordsf2)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__divdf3vfp)
|
||||
vmov r0, r1, d5 // move result back to r0/r1 pair
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__divdf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -72,3 +72,6 @@ LOCAL_LABEL(divzero):
|
||||
CLEAR_FRAME_AND_RETURN
|
||||
#endif
|
||||
END_COMPILERRT_FUNCTION(__divmodsi4)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__divsf3vfp)
|
||||
vmov r0, s13 // move result back to r0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__divsf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -63,3 +63,6 @@ ESTABLISH_FRAME
|
||||
CLEAR_FRAME_AND_RETURN
|
||||
#endif
|
||||
END_COMPILERRT_FUNCTION(__divsi3)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__eqdf2vfp)
|
||||
movne r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__eqdf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__eqsf2vfp)
|
||||
movne r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__eqsf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__extendsfdf2vfp)
|
||||
vmov r0, r1, d7 // return result in r0/r1 pair
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__extendsfdf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__fixdfsivfp)
|
||||
vmov r0, s15 // move s15 to result register
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__fixdfsivfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__fixsfsivfp)
|
||||
vmov r0, s15 // move s15 to result register
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__fixsfsivfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -25,3 +25,6 @@ DEFINE_COMPILERRT_FUNCTION(__fixunsdfsivfp)
|
||||
vmov r0, s15 // move s15 to result register
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__fixunsdfsivfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -25,3 +25,6 @@ DEFINE_COMPILERRT_FUNCTION(__fixunssfsivfp)
|
||||
vmov r0, s15 // move s15 to result register
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__fixunssfsivfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__floatsidfvfp)
|
||||
vmov r0, r1, d7 // move d7 to result register pair r0/r1
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__floatsidfvfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__floatsisfvfp)
|
||||
vmov r0, s15 // move s15 to result register
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__floatsisfvfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__floatunssidfvfp)
|
||||
vmov r0, r1, d7 // move d7 to result register pair r0/r1
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__floatunssidfvfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__floatunssisfvfp)
|
||||
vmov r0, s15 // move s15 to result register
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__floatunssisfvfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__gedf2vfp)
|
||||
movlt r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__gedf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__gesf2vfp)
|
||||
movlt r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__gesf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__gtdf2vfp)
|
||||
movle r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__gtdf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__gtsf2vfp)
|
||||
movle r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__gtsf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__ledf2vfp)
|
||||
movhi r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__ledf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__lesf2vfp)
|
||||
movhi r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__lesf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__ltdf2vfp)
|
||||
movpl r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__ltdf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__ltsf2vfp)
|
||||
movpl r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__ltsf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -61,3 +61,6 @@ LOCAL_LABEL(divzero):
|
||||
CLEAR_FRAME_AND_RETURN
|
||||
#endif
|
||||
END_COMPILERRT_FUNCTION(__modsi3)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__muldf3vfp)
|
||||
vmov r0, r1, d6 // move result back to r0/r1 pair
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__muldf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__mulsf3vfp)
|
||||
vmov r0, s13 // move result back to r0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__mulsf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__nedf2vfp)
|
||||
moveq r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__nedf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -21,3 +21,6 @@ DEFINE_COMPILERRT_FUNCTION(__negdf2vfp)
|
||||
eor r1, r1, #-2147483648 // flip sign bit on double in r0/r1 pair
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__negdf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -21,3 +21,6 @@ DEFINE_COMPILERRT_FUNCTION(__negsf2vfp)
|
||||
eor r0, r0, #-2147483648 // flip sign bit on float in r0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__negsf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -27,3 +27,6 @@ DEFINE_COMPILERRT_FUNCTION(__nesf2vfp)
|
||||
moveq r0, #0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__nesf2vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -31,3 +31,5 @@ DEFINE_COMPILERRT_PRIVATE_FUNCTION(__restore_vfp_d8_d15_regs)
|
||||
bx lr // return to prolog
|
||||
END_COMPILERRT_FUNCTION(__restore_vfp_d8_d15_regs)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -31,3 +31,5 @@ DEFINE_COMPILERRT_PRIVATE_FUNCTION(__save_vfp_d8_d15_regs)
|
||||
bx lr // return to prolog
|
||||
END_COMPILERRT_FUNCTION(__save_vfp_d8_d15_regs)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -24,3 +24,6 @@ DEFINE_COMPILERRT_FUNCTION(__subdf3vfp)
|
||||
vmov r0, r1, d6 // move result back to r0/r1 pair
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__subdf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -25,3 +25,6 @@ DEFINE_COMPILERRT_FUNCTION(__subsf3vfp)
|
||||
vmov r0, s14 // move result back to r0
|
||||
bx lr
|
||||
END_COMPILERRT_FUNCTION(__subsf3vfp)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -42,3 +42,5 @@ DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch16)
|
||||
bx ip // jump to computed label
|
||||
END_COMPILERRT_FUNCTION(__switch16)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -42,3 +42,5 @@ DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch32)
|
||||
bx ip // jump to computed label
|
||||
END_COMPILERRT_FUNCTION(__switch32)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -40,3 +40,5 @@ DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch8)
|
||||
bx ip // jump to computed label
|
||||
END_COMPILERRT_FUNCTION(__switch8)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -40,3 +40,5 @@ DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switchu8)
|
||||
bx ip // jump to computed label
|
||||
END_COMPILERRT_FUNCTION(__switchu8)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -19,3 +19,5 @@
|
||||
|
||||
SYNC_OP_4(add_4)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -22,3 +22,5 @@
|
||||
SYNC_OP_8(add_8)
|
||||
#endif
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -17,3 +17,6 @@
|
||||
#define and_4(rD, rN, rM) and rD, rN, rM
|
||||
|
||||
SYNC_OP_4(and_4)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -21,3 +21,6 @@
|
||||
|
||||
SYNC_OP_8(and_8)
|
||||
#endif
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
@ -18,3 +18,5 @@
|
||||
|
||||
SYNC_OP_4(max_4)
|
||||
|
||||
NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user