Upgrade our copy of clang, llvm and lldb to 3.6.0 release.

Please note that from 3.5.0 onwards, clang/llvm/lldb require C++11
support to build; see UPDATING for more information.

Release notes for llvm and clang can be found here:
<http://llvm.org/releases/3.6.0/docs/ReleaseNotes.html>
<http://llvm.org/releases/3.6.0/tools/clang/docs/ReleaseNotes.html>

Thanks to Ed Maste for the lldb part of this upgrade.

Exp-run:	antoine
This commit is contained in:
dim 2015-03-15 13:31:13 +00:00
commit 2fdf486463
3566 changed files with 272534 additions and 150164 deletions

View File

@ -38,6 +38,61 @@
# xargs -n1 | sort | uniq -d;
# done
# 20150315: new clang import which bumps version from 3.5.1 to 3.6.0.
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.5.1/altivec.h
OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h
OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h
OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h
OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.5.1/cpuid.h
OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h
OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h
OLD_FILES+=usr/include/clang/3.5.1/immintrin.h
OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/module.modulemap
OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h
OLD_DIRS+=usr/include/clang/3.5.1
OLD_DIRS+=usr/include/clang
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.5.1/lib
OLD_DIRS+=usr/lib/clang/3.5.1
# 20150302: binutils documentation distributed as a manpage
OLD_FILES+=usr/share/doc/binutils/as.txt
OLD_FILES+=usr/share/doc/binutils/ld.txt

View File

@ -31,6 +31,11 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 11.x IS SLOW:
disable the most expensive debugging functionality run
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
20150315:
Clang, llvm and lldb have been upgraded to 3.6.0 release. Please see
the 20141231 entry below for information about prerequisites and
upgrading, if you are not already using 3.5.0 or higher.
20150307:
The 32-bit PowerPC kernel has been changed to a position-independent
executable. This can only be booted with a version of loader(8)

View File

@ -114,8 +114,7 @@ extern "C" {
// Returns the old value.
int __asan_set_error_exit_code(int exit_code);
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
// Deprecated. Call __sanitizer_set_death_callback instead.
void __asan_set_death_callback(void (*callback)(void));
void __asan_set_error_report_callback(void (*callback)(const char*));

View File

@ -62,18 +62,6 @@ extern "C" {
void __sanitizer_unaligned_store32(void *p, uint32_t x);
void __sanitizer_unaligned_store64(void *p, uint64_t x);
// Initialize coverage.
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Open <name>.sancov.packed in the coverage directory and return the file
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
// This is intended for use by sandboxing code.
intptr_t __sanitizer_maybe_open_cov_file(const char *name);
// Get the number of total unique covered entities (blocks, edges, calls).
// This can be useful for coverage-directed in-process fuzzers.
uintptr_t __sanitizer_get_total_unique_coverage();
// Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar.
// A contiguous container is a container that keeps all of its elements
@ -120,6 +108,9 @@ extern "C" {
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __sanitizer_set_death_callback(void (*callback)(void));
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -0,0 +1,46 @@
//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Public interface for sanitizer coverage.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_COVERAG_INTERFACE_H
#define SANITIZER_COVERAG_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// Initialize coverage.
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Open <name>.sancov.packed in the coverage directory and return the file
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
// This is intended for use by sandboxing code.
intptr_t __sanitizer_maybe_open_cov_file(const char *name);
// Get the number of total unique covered entities (blocks, edges, calls).
// This can be useful for coverage-directed in-process fuzzers.
uintptr_t __sanitizer_get_total_unique_coverage();
// Reset the basic-block (edge) coverage to the initial state.
// Useful for in-process fuzzing to start collecting coverage from scratch.
// Experimental, will likely not work for multi-threaded process.
void __sanitizer_reset_coverage();
// Set *data to the array of covered PCs and return the size of that array.
// Some of the entries in *data will be zero.
uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_COVERAG_INTERFACE_H

View File

@ -38,7 +38,9 @@ extern "C" {
contents). */
void __msan_unpoison_string(const volatile char *a);
/* Make memory region fully uninitialized (without changing its contents). */
/* Make memory region fully uninitialized (without changing its contents).
This is a legacy interface that does not update origin information. Use
__msan_allocated_memory() instead. */
void __msan_poison(const volatile void *a, size_t size);
/* Make memory region partially uninitialized (without changing its contents).

View File

@ -1,7 +1,6 @@
AddressSanitizer RT
================================
This directory contains sources of the AddressSanitizer (asan) runtime library.
We are in the process of integrating AddressSanitizer with LLVM, stay tuned.
This directory contains sources of the AddressSanitizer (ASan) runtime library.
Directory structure:
README.txt : This file.
@ -13,14 +12,13 @@ tests/* : ASan unit tests.
Also ASan runtime needs the following libraries:
lib/interception/ : Machinery used to intercept function calls.
lib/sanitizer_common/ : Code shared between ASan and TSan.
lib/sanitizer_common/ : Code shared between various sanitizers.
Currently ASan runtime can be built by both make and cmake build systems.
(see compiler-rt/make and files Makefile.mk for make-based build and
files CMakeLists.txt for cmake-based build).
ASan runtime currently also embeds part of LeakSanitizer runtime for
leak detection (lib/lsan/lsan_common.{cc,h}).
ASan unit and output tests work only with cmake. You may run this
command from the root of your cmake build tree:
ASan runtime can only be built by CMake. You can run ASan tests
from the root of your CMake build tree:
make check-asan

View File

@ -16,40 +16,106 @@
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static struct AsanDeactivatedFlags {
int quarantine_size;
int max_redzone;
AllocatorOptions allocator_options;
int malloc_context_size;
bool poison_heap;
bool alloc_dealloc_mismatch;
bool allocator_may_return_null;
bool coverage;
const char *coverage_dir;
void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
#define ASAN_ACTIVATION_FLAG(Type, Name) \
RegisterFlag(parser, #Name, "", &f->Name);
#define COMMON_ACTIVATION_FLAG(Type, Name) \
RegisterFlag(parser, #Name, "", &cf->Name);
#include "asan_activation_flags.inc"
#undef ASAN_ACTIVATION_FLAG
#undef COMMON_ACTIVATION_FLAG
RegisterIncludeFlag(parser, cf);
}
void OverrideFromActivationFlags() {
Flags f;
CommonFlags cf;
FlagParser parser;
RegisterActivationFlags(&parser, &f, &cf);
// Copy the current activation flags.
allocator_options.CopyTo(&f, &cf);
cf.malloc_context_size = malloc_context_size;
f.poison_heap = poison_heap;
cf.coverage = coverage;
cf.coverage_dir = coverage_dir;
cf.verbosity = Verbosity();
cf.help = false; // this is activation-specific help
// Check if activation flags need to be overriden.
if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
parser.ParseString(env);
}
// Override from getprop asan.options.
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
parser.ParseString(buf);
SetVerbosity(cf.verbosity);
if (Verbosity()) ReportUnrecognizedFlags();
if (cf.help) parser.PrintFlagDescriptions();
allocator_options.SetFrom(&f, &cf);
malloc_context_size = cf.malloc_context_size;
poison_heap = f.poison_heap;
coverage = cf.coverage;
coverage_dir = cf.coverage_dir;
}
void Print() {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.may_return_null, coverage, coverage_dir);
}
} asan_deactivated_flags;
static bool asan_is_deactivated;
void AsanStartDeactivated() {
void AsanDeactivate() {
CHECK(!asan_is_deactivated);
VReport(1, "Deactivating ASan\n");
// Save flag values.
asan_deactivated_flags.quarantine_size = flags()->quarantine_size;
asan_deactivated_flags.max_redzone = flags()->max_redzone;
asan_deactivated_flags.poison_heap = flags()->poison_heap;
asan_deactivated_flags.malloc_context_size =
common_flags()->malloc_context_size;
asan_deactivated_flags.alloc_dealloc_mismatch =
flags()->alloc_dealloc_mismatch;
asan_deactivated_flags.allocator_may_return_null =
common_flags()->allocator_may_return_null;
flags()->quarantine_size = 0;
flags()->max_redzone = 16;
flags()->poison_heap = false;
common_flags()->malloc_context_size = 0;
flags()->alloc_dealloc_mismatch = false;
common_flags()->allocator_may_return_null = true;
// Stash runtime state.
GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
asan_deactivated_flags.poison_heap = CanPoisonMemory();
asan_deactivated_flags.coverage = common_flags()->coverage;
asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
// Deactivate the runtime.
SetCanPoisonMemory(false);
SetMallocContextSize(1);
ReInitializeCoverage(false, nullptr);
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
disabled.max_redzone = 16;
disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true;
ReInitializeAllocator(disabled);
asan_is_deactivated = true;
}
@ -58,31 +124,19 @@ void AsanActivate() {
if (!asan_is_deactivated) return;
VReport(1, "Activating ASan\n");
// Restore flag values.
// FIXME: this is not atomic, and there may be other threads alive.
flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
flags()->max_redzone = asan_deactivated_flags.max_redzone;
flags()->poison_heap = asan_deactivated_flags.poison_heap;
common_flags()->malloc_context_size =
asan_deactivated_flags.malloc_context_size;
flags()->alloc_dealloc_mismatch =
asan_deactivated_flags.alloc_dealloc_mismatch;
common_flags()->allocator_may_return_null =
asan_deactivated_flags.allocator_may_return_null;
asan_deactivated_flags.OverrideFromActivationFlags();
ParseExtraActivationFlags();
ReInitializeAllocator();
SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
ReInitializeCoverage(asan_deactivated_flags.coverage,
asan_deactivated_flags.coverage_dir);
ReInitializeAllocator(asan_deactivated_flags.allocator_options);
asan_is_deactivated = false;
VReport(
1,
"quarantine_size %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d\n",
flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
common_flags()->malloc_context_size, flags()->alloc_dealloc_mismatch,
common_flags()->allocator_may_return_null);
if (Verbosity()) {
Report("Activated with flags:\n");
asan_deactivated_flags.Print();
}
}
} // namespace __asan

View File

@ -16,7 +16,7 @@
#define ASAN_ACTIVATION_H
namespace __asan {
void AsanStartDeactivated();
void AsanDeactivate();
void AsanActivate();
} // namespace __asan

View File

@ -0,0 +1,35 @@
//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// A subset of ASan (and common) runtime flags supported at activation time.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_ACTIVATION_FLAG
# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
#endif
#ifndef COMMON_ACTIVATION_FLAG
# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
#endif
// ASAN_ACTIVATION_FLAG(Type, Name)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
ASAN_ACTIVATION_FLAG(int, redzone)
ASAN_ACTIVATION_FLAG(int, max_redzone)
ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
ASAN_ACTIVATION_FLAG(bool, poison_heap)
COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
COMMON_ACTIVATION_FLAG(int, malloc_context_size)
COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)

View File

@ -0,0 +1,909 @@
//===-- asan_allocator.cc -------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Implementation of ASan's memory allocator, 2-nd version.
// This variant uses the allocator from sanitizer_common, i.e. the one shared
// with ThreadSanitizer and MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
#include "lsan/lsan_common.h"
namespace __asan {
// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
// We use adaptive redzones: for larger allocation larger redzones are used.
static u32 RZLog2Size(u32 rz_log) {
CHECK_LT(rz_log, 8);
return 16 << rz_log;
}
static u32 RZSize2Log(u32 rz_size) {
CHECK_GE(rz_size, 16);
CHECK_LE(rz_size, 2048);
CHECK(IsPowerOfTwo(rz_size));
u32 res = Log2(rz_size) - 4;
CHECK_EQ(rz_size, RZLog2Size(res));
return res;
}
static AsanAllocator &get_allocator();
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
// U -- user memory.
// R -- right redzone (0 or more bytes)
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
// memory.
// If the left redzone is greater than the ChunkHeader size we store a magic
// value in the first uptr word of the memory block and store the address of
// ChunkBase in the next uptr.
// M B L L L L L L L L L H H U U U U U U
// | ^
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
static const uptr kAllocBegMagic = 0xCC6E96B9;
struct ChunkHeader {
// 1-st 8 bytes.
u32 chunk_state : 8; // Must be first.
u32 alloc_tid : 24;
u32 free_tid : 24;
u32 from_memalign : 1;
u32 alloc_type : 2;
u32 rz_log : 3;
u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
// SecondaryAllocator's metadata.
u32 user_requested_size;
u32 alloc_context_id;
};
struct ChunkBase : ChunkHeader {
// Header2, intersects with user memory.
u32 free_context_id;
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
CHUNK_ALLOCATED = 2,
CHUNK_QUARANTINE = 3
};
struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(
get_allocator().GetMetaData(AllocBeg(locked_version)));
}
void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
if (locked_version)
return get_allocator().GetBlockBeginFastLocked(
reinterpret_cast<void *>(this));
return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
};
struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *cache)
: cache_(cache) {
}
void Recycle(AsanChunk *m) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
CHECK_NE(m->alloc_tid, kInvalidTid);
CHECK_NE(m->free_tid, kInvalidTid);
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapLeftRedzoneMagic);
void *p = reinterpret_cast<void *>(m->AllocBeg());
if (p != m) {
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
alloc_magic[0] = 0;
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
}
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
get_allocator().Deallocate(cache_, p);
}
void *Allocate(uptr size) {
return get_allocator().Allocate(cache_, size, 1, false);
}
void Deallocate(void *p) {
get_allocator().Deallocate(cache_, p);
}
AllocatorCache *cache_;
};
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
typedef AsanQuarantine::Cache QuarantineCache;
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
FlushUnneededASanShadowMemory(p, size);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
return &ms->allocator_cache;
}
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
}
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
quarantine_size_mb = f->quarantine_size_mb;
min_redzone = f->redzone;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size_mb = quarantine_size_mb;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
AsanAllocator allocator;
AsanQuarantine quarantine;
StaticSpinMutex fallback_mutex;
AllocatorCache fallback_allocator_cache;
QuarantineCache fallback_quarantine_cache;
// ------------------- Options --------------------------
atomic_uint16_t min_redzone;
atomic_uint16_t max_redzone;
atomic_uint8_t alloc_dealloc_mismatch;
// ------------------- Initialization ------------------------
explicit Allocator(LinkerInitialized)
: quarantine(LINKER_INITIALIZED),
fallback_quarantine_cache(LINKER_INITIALIZED) {}
void CheckOptions(const AllocatorOptions &options) const {
CHECK_GE(options.min_redzone, 16);
CHECK_GE(options.max_redzone, options.min_redzone);
CHECK_LE(options.max_redzone, 2048);
CHECK(IsPowerOfTwo(options.min_redzone));
CHECK(IsPowerOfTwo(options.max_redzone));
}
void SharedInitCode(const AllocatorOptions &options) {
CheckOptions(options);
quarantine.Init((uptr)options.quarantine_size_mb << 20,
kMaxThreadLocalQuarantine);
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
memory_order_release);
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
atomic_store(&max_redzone, options.max_redzone, memory_order_release);
}
void Initialize(const AllocatorOptions &options) {
allocator.Init(options.may_return_null);
SharedInitCode(options);
}
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
SharedInitCode(options);
}
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
}
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
u32 rz_log =
user_requested_size <= 64 - 16 ? 0 :
user_requested_size <= 128 - 32 ? 1 :
user_requested_size <= 512 - 64 ? 2 :
user_requested_size <= 4096 - 128 ? 3 :
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
}
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
AsanChunk *right_chunk) {
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
if (left_chunk->chunk_state != right_chunk->chunk_state) {
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
return right_chunk;
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
sptr l_offset = 0, r_offset = 0;
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
return left_chunk;
return right_chunk;
}
// -------------------- Allocation/Deallocation routines ---------------
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (alignment < min_alignment)
alignment = min_alignment;
if (size == 0) {
// We'd be happy to avoid allocating memory for zero-size requests, but
// some programs/tests depend on this behavior and assume that malloc
// would not return NULL even for zero-size allocations. Moreover, it
// looks like operator new should never return NULL, and results of
// consecutive "new" calls must be different even if the allocated size
// is zero.
size = 1;
}
CHECK(IsPowerOfTwo(alignment));
uptr rz_log = ComputeRZLog(size);
uptr rz_size = RZLog2Size(rz_log);
uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
needed_size += rz_size;
using_primary_allocator = false;
}
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
(void*)size);
return allocator.ReturnNullOrDie();
}
AsanThread *t = GetCurrentThread();
void *allocated;
bool check_rss_limit = true;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated =
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated =
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
if (!allocated)
return allocator.ReturnNullOrDie();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
// chunk. This is possible if CanPoisonMemory() was false for some
// time, for example, due to flags()->start_disabled.
// Anyway, poison the block before using it for anything else.
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
CHECK_LE(user_end, alloc_end);
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
m->rz_log = rz_log;
u32 alloc_tid = t ? t->tid() : 0;
m->alloc_tid = alloc_tid;
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
m->free_tid = kInvalidTid;
m->from_memalign = user_beg != beg_plus_redzone;
if (alloc_beg != chunk_beg) {
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
}
if (using_primary_allocator) {
CHECK(size);
m->user_requested_size = size;
CHECK(allocator.FromPrimary(allocated));
} else {
CHECK(!allocator.FromPrimary(allocated));
m->user_requested_size = SizeClassMap::kMaxSize;
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
meta[0] = size;
meta[1] = chunk_beg;
}
m->alloc_context_id = StackDepotPut(*stack);
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
// Deal with the end of the region if size is not aligned to granularity.
if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
u8 *shadow =
(u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
}
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += needed_size - size;
uptr class_id =
Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
thread_stats.malloced_by_size[class_id]++;
if (needed_size > SizeClassMap::kMaxSize)
thread_stats.malloc_large++;
void *res = reinterpret_cast<void *>(user_beg);
if (can_fill && fl.max_malloc_fill_size) {
uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
REAL(memset)(res, fl.malloc_fill_byte, fill_size);
}
#if CAN_SANITIZE_LEAKS
m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
ASAN_MALLOC_HOOK(res, size);
return res;
}
void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
CHUNK_QUARANTINE, memory_order_acquire))
ReportInvalidFree(ptr, old_chunk_state, stack);
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type) {
if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
(AllocType)alloc_type);
}
}
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(*stack);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.frees++;
thread_stats.freed += m->UsedSize();
// Push into quarantine.
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m,
m->UsedSize());
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *ac = &fallback_allocator_cache;
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m,
m->UsedSize());
}
}
void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
QuarantineChunk(m, ptr, stack, alloc_type);
}
void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.reallocs++;
thread_stats.realloced += new_size;
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
u8 chunk_state = m->chunk_state;
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), (void*)0);
uptr memcpy_size = Min(new_size, m->UsedSize());
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
if (ptr && allocator.FromPrimary(ptr))
REAL(memset)(ptr, 0, nmemb * size);
return ptr;
}
void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
else
ReportFreeNotMalloced((uptr)ptr, stack);
}
void CommitBack(AsanThreadLocalMallocStorage *ms) {
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac));
allocator.SwallowCache(ac);
}
// -------------------------- Chunk lookup ----------------------
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return 0;
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
}
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
if (alloc_magic[0] == kAllocBegMagic)
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
return reinterpret_cast<AsanChunk *>(alloc_beg);
}
AsanChunk *GetAsanChunkByAddr(uptr p) {
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
// Allocator must be locked when this function is called.
AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
void *alloc_beg =
allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
if (!m1) return AsanChunkView(m1);
sptr offset = 0;
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
AsanChunk *m2 = 0;
for (uptr l = 1; l < GetPageSizeCached(); l++) {
m2 = GetAsanChunkByAddr(addr - l);
if (m2 == m1) continue; // Still the same chunk.
break;
}
if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
m1 = ChooseChunk(addr, m2, m1);
}
return AsanChunkView(m1);
}
void PrintStats() {
allocator.PrintStats();
}
void ForceLock() {
allocator.ForceLock();
fallback_mutex.Lock();
}
void ForceUnlock() {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
};
static Allocator instance(LINKER_INITIALIZED);
static AsanAllocator &get_allocator() {
return instance.allocator;
}
bool AsanChunkView::IsValid() {
return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
StackTrace AsanChunkView::GetAllocStack() {
return GetStackTraceFromId(chunk_->alloc_context_id);
}
StackTrace AsanChunkView::GetFreeStack() {
return GetStackTraceFromId(chunk_->free_context_id);
}
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
}
void ReInitializeAllocator(const AllocatorOptions &options) {
instance.ReInitialize(options);
}
void GetAllocatorOptions(AllocatorOptions *options) {
instance.GetOptions(options);
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);
}
void PrintInternalAllocatorStats() {
instance.PrintStats();
}
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
return instance.Allocate(size, alignment, stack, alloc_type, true);
}
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
instance.Deallocate(ptr, 0, stack, alloc_type);
}
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
instance.Deallocate(ptr, size, stack, alloc_type);
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
}
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
return instance.Calloc(nmemb, size, stack);
}
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (p == 0)
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
instance.Deallocate(p, 0, stack, FROM_MALLOC);
return 0;
}
return instance.Reallocate(p, size, stack);
}
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack) {
void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
if (ptr == 0) return 0;
uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
GET_STACK_TRACE_FATAL(pc, bp);
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
}
return usable_size;
}
uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
void asan_mz_force_lock() {
instance.ForceLock();
}
void asan_mz_force_unlock() {
instance.ForceUnlock();
}
void AsanSoftRssLimitExceededCallback(bool exceeded) {
instance.allocator.SetRssLimitIsExceeded(exceeded);
}
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
void LockAllocator() {
__asan::get_allocator().ForceLock();
}
void UnlockAllocator() {
__asan::get_allocator().ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&__asan::get_allocator();
*end = *begin + sizeof(__asan::get_allocator());
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
return 0;
if (m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
CHECK(m);
return m->Beg();
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
}
bool LsanMetadata::allocated() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->chunk_state == __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return static_cast<ChunkTag>(m->lsan_tag);
}
void LsanMetadata::set_tag(ChunkTag value) {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
m->lsan_tag = value;
}
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize(/*locked_version=*/true);
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->alloc_context_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__asan::get_allocator().ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
if (!m) return kIgnoreObjectInvalid;
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
if (m->lsan_tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
int __sanitizer_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return instance.AllocationSize(ptr) > 0;
}
uptr __sanitizer_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = instance.AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
#endif

View File

@ -9,12 +9,13 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_allocator2.cc.
// ASan-private header for asan_allocator.cc.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ALLOCATOR_H
#define ASAN_ALLOCATOR_H
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "sanitizer_common/sanitizer_allocator.h"
@ -31,8 +32,20 @@ enum AllocType {
static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
void InitializeAllocator();
void ReInitializeAllocator();
struct AllocatorOptions {
u32 quarantine_size_mb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
};
void InitializeAllocator(const AllocatorOptions &options);
void ReInitializeAllocator(const AllocatorOptions &options);
void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
@ -127,12 +140,12 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
SecondaryAllocator> AsanAllocator;
struct AsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
AllocatorCache allocator2_cache;
AllocatorCache allocator_cache;
void CommitBack();
private:
// These objects are allocated via mmap() and are zero-initialized.
@ -160,6 +173,7 @@ void asan_mz_force_lock();
void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
void AsanSoftRssLimitExceededCallback(bool exceeded);
} // namespace __asan
#endif // ASAN_ALLOCATOR_H

View File

@ -1,792 +0,0 @@
//===-- asan_allocator2.cc ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Implementation of ASan's memory allocator, 2-nd version.
// This variant uses the allocator from sanitizer_common, i.e. the one shared
// with ThreadSanitizer and MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
#include "lsan/lsan_common.h"
namespace __asan {
void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
FlushUnneededASanShadowMemory(p, size);
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.munmaps++;
thread_stats.munmaped += size;
}
// We can not use THREADLOCAL because it is not supported on some of the
// platforms we care about (OSX 10.6, Android).
// static THREADLOCAL AllocatorCache cache;
AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
return &ms->allocator2_cache;
}
static Allocator allocator;
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 64UL << 30);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
// Every chunk of memory allocated by this allocator can be in one of 3 states:
// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
CHUNK_ALLOCATED = 2,
CHUNK_QUARANTINE = 3
};
// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
// We use adaptive redzones: for larger allocation larger redzones are used.
static u32 RZLog2Size(u32 rz_log) {
CHECK_LT(rz_log, 8);
return 16 << rz_log;
}
static u32 RZSize2Log(u32 rz_size) {
CHECK_GE(rz_size, 16);
CHECK_LE(rz_size, 2048);
CHECK(IsPowerOfTwo(rz_size));
u32 res = Log2(rz_size) - 4;
CHECK_EQ(rz_size, RZLog2Size(res));
return res;
}
static uptr ComputeRZLog(uptr user_requested_size) {
u32 rz_log =
user_requested_size <= 64 - 16 ? 0 :
user_requested_size <= 128 - 32 ? 1 :
user_requested_size <= 512 - 64 ? 2 :
user_requested_size <= 4096 - 128 ? 3 :
user_requested_size <= (1 << 14) - 256 ? 4 :
user_requested_size <= (1 << 15) - 512 ? 5 :
user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
return Min(Max(rz_log, RZSize2Log(flags()->redzone)),
RZSize2Log(flags()->max_redzone));
}
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
// U -- user memory.
// R -- right redzone (0 or more bytes)
// ChunkBase consists of ChunkHeader and other bytes that overlap with user
// memory.
// If the left redzone is greater than the ChunkHeader size we store a magic
// value in the first uptr word of the memory block and store the address of
// ChunkBase in the next uptr.
// M B L L L L L L L L L H H U U U U U U
// | ^
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
static const uptr kAllocBegMagic = 0xCC6E96B9;
struct ChunkHeader {
// 1-st 8 bytes.
u32 chunk_state : 8; // Must be first.
u32 alloc_tid : 24;
u32 free_tid : 24;
u32 from_memalign : 1;
u32 alloc_type : 2;
u32 rz_log : 3;
u32 lsan_tag : 2;
// 2-nd 8 bytes
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
// SecondaryAllocator's metadata.
u32 user_requested_size;
u32 alloc_context_id;
};
struct ChunkBase : ChunkHeader {
// Header2, intersects with user memory.
u32 free_context_id;
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
struct AsanChunk: ChunkBase {
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
uptr UsedSize(bool locked_version = false) {
if (user_requested_size != SizeClassMap::kMaxSize)
return user_requested_size;
return *reinterpret_cast<uptr *>(
allocator.GetMetaData(AllocBeg(locked_version)));
}
void *AllocBeg(bool locked_version = false) {
if (from_memalign) {
if (locked_version)
return allocator.GetBlockBeginFastLocked(
reinterpret_cast<void *>(this));
return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
};
bool AsanChunkView::IsValid() {
return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
StackTrace AsanChunkView::GetAllocStack() {
return GetStackTraceFromId(chunk_->alloc_context_id);
}
StackTrace AsanChunkView::GetFreeStack() {
return GetStackTraceFromId(chunk_->free_context_id);
}
struct QuarantineCallback;
typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
typedef AsanQuarantine::Cache QuarantineCache;
static AsanQuarantine quarantine(LINKER_INITIALIZED);
static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED);
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
}
struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *cache)
: cache_(cache) {
}
void Recycle(AsanChunk *m) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
CHECK_NE(m->alloc_tid, kInvalidTid);
CHECK_NE(m->free_tid, kInvalidTid);
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapLeftRedzoneMagic);
void *p = reinterpret_cast<void *>(m->AllocBeg());
if (p != m) {
uptr *alloc_magic = reinterpret_cast<uptr *>(p);
CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
alloc_magic[0] = 0;
CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
}
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
thread_stats.really_freed += m->UsedSize();
allocator.Deallocate(cache_, p);
}
void *Allocate(uptr size) {
return allocator.Allocate(cache_, size, 1, false);
}
void Deallocate(void *p) {
allocator.Deallocate(cache_, p);
}
AllocatorCache *cache_;
};
void InitializeAllocator() {
allocator.Init();
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
void ReInitializeAllocator() {
quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (alignment < min_alignment)
alignment = min_alignment;
if (size == 0) {
// We'd be happy to avoid allocating memory for zero-size requests, but
// some programs/tests depend on this behavior and assume that malloc would
// not return NULL even for zero-size allocations. Moreover, it looks like
// operator new should never return NULL, and results of consecutive "new"
// calls must be different even if the allocated size is zero.
size = 1;
}
CHECK(IsPowerOfTwo(alignment));
uptr rz_log = ComputeRZLog(size);
uptr rz_size = RZLog2Size(rz_log);
uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
needed_size += rz_size;
using_primary_allocator = false;
}
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
(void*)size);
return AllocatorReturnNull();
}
AsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated = allocator.Allocate(cache, needed_size, 8, false);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8, false);
}
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
// chunk. This is possible if flags()->poison_heap was disabled for some
// time, for example, due to flags()->start_disabled.
// Anyway, poison the block before using it for anything else.
uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
}
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr beg_plus_redzone = alloc_beg + rz_size;
uptr user_beg = beg_plus_redzone;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
CHECK_LE(user_end, alloc_end);
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
m->rz_log = rz_log;
u32 alloc_tid = t ? t->tid() : 0;
m->alloc_tid = alloc_tid;
CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
m->free_tid = kInvalidTid;
m->from_memalign = user_beg != beg_plus_redzone;
if (alloc_beg != chunk_beg) {
CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
}
if (using_primary_allocator) {
CHECK(size);
m->user_requested_size = size;
CHECK(allocator.FromPrimary(allocated));
} else {
CHECK(!allocator.FromPrimary(allocated));
m->user_requested_size = SizeClassMap::kMaxSize;
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
meta[0] = size;
meta[1] = chunk_beg;
}
m->alloc_context_id = StackDepotPut(*stack);
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
// Deal with the end of the region if size is not aligned to granularity.
if (size != size_rounded_down_to_granularity && fl.poison_heap) {
u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity);
*shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
}
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.mallocs++;
thread_stats.malloced += size;
thread_stats.malloced_redzones += needed_size - size;
uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size));
thread_stats.malloced_by_size[class_id]++;
if (needed_size > SizeClassMap::kMaxSize)
thread_stats.malloc_large++;
void *res = reinterpret_cast<void *>(user_beg);
if (can_fill && fl.max_malloc_fill_size) {
uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
REAL(memset)(res, fl.malloc_fill_byte, fill_size);
}
#if CAN_SANITIZE_LEAKS
m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
ASAN_MALLOC_HOOK(res, size);
return res;
}
static void ReportInvalidFree(void *ptr, u8 chunk_state,
BufferedStackTrace *stack) {
if (chunk_state == CHUNK_QUARANTINE)
ReportDoubleFree((uptr)ptr, stack);
else
ReportFreeNotMalloced((uptr)ptr, stack);
}
static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
CHUNK_QUARANTINE, memory_order_acquire))
ReportInvalidFree(ptr, old_chunk_state, stack);
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlag.
static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch)
ReportAllocTypeMismatch((uptr)ptr, stack,
(AllocType)m->alloc_type, (AllocType)alloc_type);
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(*stack);
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
kAsanHeapFreeMagic);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.frees++;
thread_stats.freed += m->UsedSize();
// Push into quarantine.
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
AllocatorCache *ac = GetAllocatorCache(ms);
quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac),
m, m->UsedSize());
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *ac = &fallback_allocator_cache;
quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac),
m, m->UsedSize());
}
}
static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
QuarantineChunk(m, ptr, stack, alloc_type);
}
static void *Reallocate(void *old_ptr, uptr new_size,
BufferedStackTrace *stack) {
CHECK(old_ptr && new_size);
uptr p = reinterpret_cast<uptr>(old_ptr);
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.reallocs++;
thread_stats.realloced += new_size;
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
u8 chunk_state = m->chunk_state;
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), (void*)0);
uptr memcpy_size = Min(new_size, m->UsedSize());
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
static AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return 0;
if (!allocator.FromPrimary(alloc_beg)) {
uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
}
uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
if (alloc_magic[0] == kAllocBegMagic)
return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
return reinterpret_cast<AsanChunk *>(alloc_beg);
}
static AsanChunk *GetAsanChunkByAddr(uptr p) {
void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
// Allocator must be locked when this function is called.
static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
void *alloc_beg =
allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
return GetAsanChunk(alloc_beg);
}
static uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
if (m->chunk_state != CHUNK_ALLOCATED) return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr,
AsanChunk *left_chunk, AsanChunk *right_chunk) {
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
if (left_chunk->chunk_state != right_chunk->chunk_state) {
if (left_chunk->chunk_state == CHUNK_ALLOCATED)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_ALLOCATED)
return right_chunk;
if (left_chunk->chunk_state == CHUNK_QUARANTINE)
return left_chunk;
if (right_chunk->chunk_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
sptr l_offset = 0, r_offset = 0;
CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
if (l_offset < r_offset)
return left_chunk;
return right_chunk;
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
if (!m1) return AsanChunkView(m1);
sptr offset = 0;
if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
AsanChunk *m2 = 0;
for (uptr l = 1; l < GetPageSizeCached(); l++) {
m2 = GetAsanChunkByAddr(addr - l);
if (m2 == m1) continue; // Still the same chunk.
break;
}
if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
m1 = ChooseChunk(addr, m2, m1);
}
return AsanChunkView(m1);
}
void AsanThreadLocalMallocStorage::CommitBack() {
AllocatorCache *ac = GetAllocatorCache(this);
quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac));
allocator.SwallowCache(GetAllocatorCache(this));
}
void PrintInternalAllocatorStats() {
allocator.PrintStats();
}
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
return Allocate(size, alignment, stack, alloc_type, true);
}
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
Deallocate(ptr, 0, stack, alloc_type);
}
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
Deallocate(ptr, size, stack, alloc_type);
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, 8, stack, FROM_MALLOC, true);
}
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return AllocatorReturnNull();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
if (ptr && allocator.FromPrimary(ptr))
REAL(memset)(ptr, 0, nmemb * size);
return ptr;
}
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
Deallocate(p, 0, stack, FROM_MALLOC);
return 0;
}
return Reallocate(p, size, stack);
}
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
}
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
return Allocate(size, PageSize, stack, FROM_MALLOC, true);
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack) {
void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true);
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
if (ptr == 0) return 0;
uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
GET_STACK_TRACE_FATAL(pc, bp);
ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
}
return usable_size;
}
uptr asan_mz_size(const void *ptr) {
return AllocationSize(reinterpret_cast<uptr>(ptr));
}
void asan_mz_force_lock() {
allocator.ForceLock();
fallback_mutex.Lock();
}
void asan_mz_force_unlock() {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
void LockAllocator() {
__asan::allocator.ForceLock();
}
void UnlockAllocator() {
__asan::allocator.ForceUnlock();
}
void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*begin = (uptr)&__asan::allocator;
*end = *begin + sizeof(__asan::allocator);
}
uptr PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if (m->chunk_state != __asan::CHUNK_ALLOCATED)
return 0;
if (m->AddrIsInside(addr, /*locked_version=*/true))
return chunk;
if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m =
__asan::GetAsanChunkByAddrFastLocked(chunk);
CHECK(m);
return m->Beg();
}
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
}
bool LsanMetadata::allocated() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->chunk_state == __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return static_cast<ChunkTag>(m->lsan_tag);
}
void LsanMetadata::set_tag(ChunkTag value) {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
m->lsan_tag = value;
}
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->UsedSize(/*locked_version=*/true);
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
return m->alloc_context_id;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__asan::allocator.ForEachChunk(callback, arg);
}
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
if (!m) return kIgnoreObjectInvalid;
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
if (m->lsan_tag == kIgnored)
return kIgnoreObjectAlreadyIgnored;
m->lsan_tag = __lsan::kIgnored;
return kIgnoreObjectSuccess;
} else {
return kIgnoreObjectInvalid;
}
}
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
int __sanitizer_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}
uptr __sanitizer_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
#endif

View File

@ -81,8 +81,8 @@ void AsanLocateAddress(uptr addr, AddressDescription *descr) {
GetInfoForHeapAddress(addr, descr);
}
uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
bool alloc_stack) {
static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;

View File

@ -60,7 +60,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
void FakeStack::Destroy(int tid) {
PoisonAll(0);
if (common_flags()->verbosity >= 2) {
if (Verbosity() >= 2) {
InternalScopedString str(kNumberOfSizeClasses * 50);
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
@ -192,20 +192,19 @@ static FakeStack *GetFakeStackFast() {
return GetFakeStack();
}
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
if (!fs) return real_stack;
if (!fs) return 0;
uptr local_stack;
uptr real_stack = reinterpret_cast<uptr>(&local_stack);
FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
if (!ff)
return real_stack; // Out of fake stack, return the real one.
if (!ff) return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
}
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
if (ptr == real_stack)
return;
ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8);
}
@ -216,12 +215,12 @@ ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
using namespace __asan;
#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
__asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
return OnMalloc(class_id, size, real_stack); \
__asan_stack_malloc_##class_id(uptr size) { \
return OnMalloc(class_id, size); \
} \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
uptr ptr, uptr size, uptr real_stack) { \
OnFree(ptr, class_id, size, real_stack); \
uptr ptr, uptr size) { \
OnFree(ptr, class_id, size); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)

View File

@ -0,0 +1,160 @@
//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan flag parsing logic.
//===----------------------------------------------------------------------===//
#include "asan_activation.h"
#include "asan_flags.h"
#include "asan_interface_internal.h"
#include "asan_stack.h"
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
namespace __asan {
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
#else
return "";
#endif
}
void Flags::SetDefaults() {
#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "asan_flags.inc"
#undef ASAN_FLAG
}
static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "asan_flags.inc"
#undef ASAN_FLAG
}
void InitializeFlags() {
// Set the default values and prepare for parsing ASan and common flags.
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.detect_leaks = CAN_SANITIZE_LEAKS;
cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf.malloc_context_size = kDefaultMallocContextSize;
cf.intercept_tls_get_addr = true;
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser asan_parser;
RegisterAsanFlags(&asan_parser, f);
RegisterCommonFlags(&asan_parser);
// Set the default values and prepare for parsing LSan flags (which can also
// overwrite common flags).
#if CAN_SANITIZE_LEAKS
__lsan::Flags *lf = __lsan::flags();
lf->SetDefaults();
FlagParser lsan_parser;
__lsan::RegisterLsanFlags(&lsan_parser, lf);
RegisterCommonFlags(&lsan_parser);
#endif
// Override from ASan compile definition.
const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
asan_parser.ParseString(asan_compile_def);
// Override from user-specified string.
const char *asan_default_options = MaybeCallAsanDefaultOptions();
asan_parser.ParseString(asan_default_options);
// Override from command line.
asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
#if CAN_SANITIZE_LEAKS
lsan_parser.ParseString(GetEnv("LSAN_OPTIONS"));
#endif
// Let activation flags override current settings. On Android they come
// from a system property. On other platforms this is no-op.
if (!flags()->start_deactivated) {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
asan_parser.ParseString(buf);
}
SetVerbosity(common_flags()->verbosity);
// TODO(eugenis): dump all flags at verbosity>=2?
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) {
// TODO(samsonov): print all of the flags (ASan, LSan, common).
asan_parser.PrintFlagDescriptions();
}
// Flag validation:
if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
Die();
}
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
f->check_initialization_order = true;
}
CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
CHECK_GE(f->redzone, 16);
CHECK_GE(f->max_redzone, f->redzone);
CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone));
// quarantine_size is deprecated but we still honor it.
// quarantine_size can not be used together with quarantine_size_mb.
if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
Report("%s: please use either 'quarantine_size' (deprecated) or "
"quarantine_size_mb, but not both\n", SanitizerToolName);
Die();
}
if (f->quarantine_size >= 0)
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
}
} // namespace __asan
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char* __asan_default_options() { return ""; }
} // extern "C"
#endif

View File

@ -16,6 +16,7 @@
#define ASAN_FLAGS_H
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
// ASan flag values can be defined in four ways:
// 1) initialized with default values at startup.
@ -24,55 +25,24 @@
// 3) overriden from string returned by user-specified function
// __asan_default_options().
// 4) overriden from env variable ASAN_OPTIONS.
// 5) overriden during ASan activation (for now used on Android only).
namespace __asan {
struct Flags {
// Flag descriptions are in asan_rtl.cc.
int quarantine_size;
int redzone;
int max_redzone;
bool debug;
int report_globals;
bool check_initialization_order;
bool replace_str;
bool replace_intrin;
bool mac_ignore_invalid_free;
bool detect_stack_use_after_return;
int min_uar_stack_size_log;
int max_uar_stack_size_log;
bool uar_noreserve;
int max_malloc_fill_size, malloc_fill_byte;
int exitcode;
bool allow_user_poisoning;
int sleep_before_dying;
bool check_malloc_usable_size;
bool unmap_shadow_on_exit;
bool abort_on_error;
bool print_stats;
bool print_legend;
bool atexit;
bool allow_reexec;
bool print_full_thread_history;
bool poison_heap;
bool poison_partial;
bool poison_array_cookie;
bool alloc_dealloc_mismatch;
bool new_delete_type_mismatch;
bool strict_memcmp;
bool strict_init_order;
bool start_deactivated;
int detect_invalid_pointer_pairs;
bool detect_container_overflow;
int detect_odr_violation;
bool dump_instruction_bytes;
#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "asan_flags.inc"
#undef ASAN_FLAG
void SetDefaults();
};
extern Flags asan_flags_dont_use_directly;
inline Flags *flags() {
return &asan_flags_dont_use_directly;
}
void InitializeFlags(Flags *f, const char *env);
void InitializeFlags();
} // namespace __asan

View File

@ -0,0 +1,145 @@
//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// ASan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_FLAG
# error "Define ASAN_FLAG prior to including this file!"
#endif
// ASAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
ASAN_FLAG(int, quarantine_size, -1,
"Deprecated, please use quarantine_size_mb.")
ASAN_FLAG(int, quarantine_size_mb, -1,
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
ASAN_FLAG(int, max_redzone, 2048,
"Maximal size (in bytes) of redzones around heap objects.")
ASAN_FLAG(
bool, debug, false,
"If set, prints some debugging information and does additional checks.")
ASAN_FLAG(
int, report_globals, 1,
"Controls the way to handle globals (0 - don't detect buffer overflow on "
"globals, 1 - detect buffer overflow, 2 - print data about registered "
"globals).")
ASAN_FLAG(bool, check_initialization_order, false,
"If set, attempts to catch initialization order issues.")
ASAN_FLAG(
bool, replace_str, true,
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.")
ASAN_FLAG(bool, replace_intrin, true,
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
ASAN_FLAG(bool, mac_ignore_invalid_free, false,
"Ignore invalid free() calls to work around some bugs. Used on OS X "
"only.")
ASAN_FLAG(bool, detect_stack_use_after_return, false,
"Enables stack-use-after-return checking at run-time.")
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
"Minimum fake stack size log.")
ASAN_FLAG(int, max_uar_stack_size_log,
20, // 1Mb per size class, i.e. ~11Mb per thread
"Maximum fake stack size log.")
ASAN_FLAG(bool, uar_noreserve, false,
"Use mmap with 'noreserve' flag to allocate fake stack.")
ASAN_FLAG(
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.")
ASAN_FLAG(int, malloc_fill_byte, 0xbe,
"Value used to fill the newly allocated memory.")
ASAN_FLAG(int, exitcode, ASAN_DEFAULT_FAILURE_EXITCODE,
"Override the program exit status if the tool found an error.")
ASAN_FLAG(bool, allow_user_poisoning, true,
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.")
ASAN_FLAG(
int, sleep_before_dying, 0,
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).")
ASAN_FLAG(bool, check_malloc_usable_size, true,
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.")
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
"If set, explicitly unmaps the (huge) shadow at exit.")
ASAN_FLAG(
bool, abort_on_error, false,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
ASAN_FLAG(bool, print_stats, false,
"Print various statistics after printing an error message or if "
"atexit=1.")
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
ASAN_FLAG(bool, atexit, false,
"If set, prints ASan exit stats even after program terminates "
"successfully.")
ASAN_FLAG(
bool, print_full_thread_history, true,
"If set, prints thread creation stacks for the threads involved in the "
"report and their ancestors up to the main thread.")
ASAN_FLAG(
bool, poison_heap, true,
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
"for benchmarking the allocator or instrumentator.")
ASAN_FLAG(bool, poison_partial, true,
"If true, poison partially addressable 8-byte aligned words "
"(default=true). This flag affects heap and global buffers, but not "
"stack buffers.")
ASAN_FLAG(bool, poison_array_cookie, true,
"Poison (or not) the array cookie after operator new[].")
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
(SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
"Report errors on mismatch betwen size of new and delete.")
ASAN_FLAG(bool, strict_memcmp, true,
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
"comparing p1 and p2.")
ASAN_FLAG(
bool, strict_init_order, false,
"If true, assume that dynamic initializers can never access globals from "
"other modules, even if the latter are already initialized.")
ASAN_FLAG(
bool, start_deactivated, false,
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
"poisoning) to reduce memory consumption as much as possible, and "
"restores them to original values when the first instrumented module is "
"loaded into the process. This is mainly intended to be used on "
"Android. ")
ASAN_FLAG(
int, detect_invalid_pointer_pairs, 0,
"If non-zero, try to detect operations like <, <=, >, >= and - on "
"invalid pointer pairs (e.g. when pointers belong to different objects). "
"The bigger the value the harder we try.")
ASAN_FLAG(
bool, detect_container_overflow, true,
"If true, honor the container overflow annotations. "
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
ASAN_FLAG(int, detect_odr_violation, 2,
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes")
ASAN_FLAG(bool, dump_instruction_bytes, false,
"If true, dump 16 bytes starting at the instruction that caused SEGV")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")

View File

@ -164,7 +164,7 @@ static void RegisterGlobal(const Global *g) {
}
}
}
if (flags()->poison_heap)
if (CanPoisonMemory())
PoisonRedZones(*g);
ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
l->g = g;
@ -182,11 +182,13 @@ static void RegisterGlobal(const Global *g) {
static void UnregisterGlobal(const Global *g) {
CHECK(asan_inited);
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Removed");
CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
CHECK(AddrIsAlignedByGranularity(g->beg));
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
if (flags()->poison_heap)
if (CanPoisonMemory())
PoisonShadowForGlobal(g, 0);
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
@ -208,6 +210,20 @@ void StopInitOrderChecking() {
}
}
#if SANITIZER_WINDOWS // Should only be called on Windows.
SANITIZER_INTERFACE_ATTRIBUTE
void UnregisterGlobalsInRange(void *beg, void *end) {
if (!flags()->report_globals)
return;
BlockingMutexLock lock(&mu_for_globals);
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
void *address = (void *)l->g->beg;
if (beg <= address && address < end)
UnregisterGlobal(l->g);
}
}
#endif
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@ -249,7 +265,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
!flags()->poison_heap)
!CanPoisonMemory())
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
@ -275,7 +291,7 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
!flags()->poison_heap)
!CanPoisonMemory())
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);

View File

@ -25,8 +25,10 @@ extern "C" {
// contains the function PC as the 3-rd field (see
// DescribeAddressIfStack).
// v3=>v4: added '__asan_global_source_location' to __asan_global.
#define __asan_init __asan_init_v4
#define __asan_init_name "__asan_init_v4"
// v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
// __asan_stack_free_ functions.
#define __asan_init __asan_init_v5
#define __asan_init_name "__asan_init_v5"
}
#endif // ASAN_INIT_VERSION_H

View File

@ -142,14 +142,17 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
ASAN_READ_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
ASAN_INTERCEPTOR_ENTER(ctx, func); \
do { \
if (asan_init_is_running) \
return REAL(func)(__VA_ARGS__); \
ASAN_INTERCEPTOR_ENTER(ctx, func); \
if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
return REAL(func)(__VA_ARGS__); \
ENSURE_ASAN_INITED(); \
} while (false)
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
} while (false)
@ -169,8 +172,9 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping()
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping()
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
CoverageUpdateMapping()
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
@ -196,6 +200,12 @@ struct ThreadStartParam {
};
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
#if SANITIZER_WINDOWS
// FIXME: this is a bandaid fix for PR22025.
AsanThread *t = (AsanThread*)arg;
SetCurrentThread(t);
return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
#else
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
AsanThread *t = nullptr;
while ((t = reinterpret_cast<AsanThread *>(
@ -203,6 +213,7 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
internal_sched_yield();
SetCurrentThread(t);
return t->ThreadStart(GetTid(), &param->is_registered);
#endif
}
#if ASAN_INTERCEPT_PTHREAD_CREATE
@ -236,22 +247,26 @@ INTERCEPTOR(int, pthread_create, void *thread,
}
return result;
}
INTERCEPTOR(int, pthread_join, void *t, void **arg) {
return real_pthread_join(t, arg);
}
DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
if (!AsanInterceptsSignal(signum) ||
common_flags()->allow_user_segv_handler) {
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler);
}
return 0;
}
#else
INTERCEPTOR(void*, signal, int signum, void *handler) {
if (!AsanInterceptsSignal(signum) ||
common_flags()->allow_user_segv_handler) {
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return 0;
@ -260,8 +275,7 @@ INTERCEPTOR(void*, signal, int signum, void *handler) {
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
if (!AsanInterceptsSignal(signum) ||
common_flags()->allow_user_segv_handler) {
if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
@ -802,23 +816,14 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
// FIXME: The CreateThread interceptor is not the same as a pthread_create
// one. This is a bandaid fix for PR22025.
bool detached = false; // FIXME: how can we determine it on Windows?
ThreadStartParam param;
atomic_store(&param.t, 0, memory_order_relaxed);
atomic_store(&param.is_registered, 0, memory_order_relaxed);
DWORD result = REAL(CreateThread)(security, stack_size, asan_thread_start,
&param, thr_flags, tid);
if (result) {
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t =
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t =
AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
// The pthread_create interceptor waits here, so we do the same for
// consistency.
while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
internal_sched_yield();
}
return result;
return REAL(CreateThread)(security, stack_size,
asan_thread_start, t, thr_flags, tid);
}
namespace __asan {
@ -902,6 +907,7 @@ void InitializeAsanInterceptors() {
// Intercept threading-related functions
#if ASAN_INTERCEPT_PTHREAD_CREATE
ASAN_INTERCEPT_FUNC(pthread_create);
ASAN_INTERCEPT_FUNC(pthread_join);
#endif
// Intercept atexit function.

View File

@ -9,8 +9,11 @@
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// This header can be included by the instrumented program to fetch
// data (mostly allocator statistics) from ASan runtime library.
// This header declares the AddressSanitizer runtime interface functions.
// The runtime library has to define these functions so the instrumented program
// could call them.
//
// See also include/sanitizer/asan_interface.h
//===----------------------------------------------------------------------===//
#ifndef ASAN_INTERFACE_INTERNAL_H
#define ASAN_INTERFACE_INTERNAL_H

View File

@ -93,8 +93,8 @@ void AsanCheckIncompatibleRT();
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
void AsanOnSIGSEGV(int, void *siginfo, void *context);
void DisableReexec();
void MaybeReexec();
bool AsanInterceptsSignal(int signum);
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void AsanPlatformThreadInit();
void StopInitOrderChecking();
@ -107,10 +107,10 @@ void PlatformTSDDtor(void *tsd);
void AppendToErrorMessageBuffer(const char *buffer);
void ParseExtraActivationFlags();
void *AsanDlSymNext(const char *sym);
void ReserveShadowMemoryRange(uptr beg, uptr end);
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();

View File

@ -68,6 +68,10 @@ asan_rt_version_t __asan_rt_version;
namespace __asan {
void DisableReexec() {
// No need to re-exec on Linux.
}
void MaybeReexec() {
// No need to re-exec on Linux.
}
@ -220,10 +224,6 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#endif
}
bool AsanInterceptsSignal(int signum) {
return signum == SIGSEGV && common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
// Nothing here for now.
}

View File

@ -101,8 +101,15 @@ void LeakyResetEnv(const char *name, const char *name_value) {
}
}
static bool reexec_disabled = false;
void DisableReexec() {
reexec_disabled = true;
}
void MaybeReexec() {
if (!flags()->allow_reexec) return;
if (reexec_disabled) return;
// Make sure the dynamic ASan runtime library is preloaded so that the
// wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
// ourselves.
@ -113,8 +120,10 @@ void MaybeReexec() {
uptr old_env_len = dyld_insert_libraries ?
internal_strlen(dyld_insert_libraries) : 0;
uptr fname_len = internal_strlen(info.dli_fname);
const char *dylib_name = StripModuleName(info.dli_fname);
uptr dylib_name_len = internal_strlen(dylib_name);
if (!dyld_insert_libraries ||
!REAL(strstr)(dyld_insert_libraries, StripModuleName(info.dli_fname))) {
!REAL(strstr)(dyld_insert_libraries, dylib_name)) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
char program_name[1024];
@ -140,58 +149,74 @@ void MaybeReexec() {
VReport(1, "exec()-ing the program with\n");
VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
VReport(1, "to enable ASan wrappers.\n");
VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n");
execv(program_name, *_NSGetArgv());
} else {
// DYLD_INSERT_LIBRARIES is set and contains the runtime library.
if (old_env_len == fname_len) {
// It's just the runtime library name - fine to unset the variable.
LeakyResetEnv(kDyldInsertLibraries, NULL);
} else {
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
char *new_env = (char*)allocator_for_env.Allocate(
old_env_len + 2 + env_name_len);
CHECK(new_env);
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
new_env[env_name_len] = '=';
char *new_env_pos = new_env + env_name_len + 1;
// Iterate over colon-separated pieces of |dyld_insert_libraries|.
char *piece_start = dyld_insert_libraries;
char *piece_end = NULL;
char *old_env_end = dyld_insert_libraries + old_env_len;
do {
if (piece_start[0] == ':') piece_start++;
piece_end = REAL(strchr)(piece_start, ':');
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
uptr piece_len = piece_end - piece_start;
// If the current piece isn't the runtime library name,
// append it to new_env.
if ((piece_len != fname_len) ||
(internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) {
if (new_env_pos != new_env + env_name_len + 1) {
new_env_pos[0] = ':';
new_env_pos++;
}
internal_strncpy(new_env_pos, piece_start, piece_len);
}
// Move on to the next piece.
new_env_pos += piece_len;
piece_start = piece_end;
} while (piece_start < old_env_end);
// Can't use setenv() here, because it requires the allocator to be
// initialized.
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
// a separate function called after InitializeAllocator().
LeakyResetEnv(kDyldInsertLibraries, new_env);
}
// We get here only if execv() failed.
Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
"which is required for ASan to work. ASan tried to set the "
"environment variable and re-execute itself, but execv() failed, "
"possibly because of sandbox restrictions. Make sure to launch the "
"executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
CHECK("execv failed" && 0);
}
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
// the dylib from the environment variable, because interceptors are installed
// and we don't want our children to inherit the variable.
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
char *new_env = (char*)allocator_for_env.Allocate(
old_env_len + 2 + env_name_len);
CHECK(new_env);
internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
new_env[env_name_len] = '=';
char *new_env_pos = new_env + env_name_len + 1;
// Iterate over colon-separated pieces of |dyld_insert_libraries|.
char *piece_start = dyld_insert_libraries;
char *piece_end = NULL;
char *old_env_end = dyld_insert_libraries + old_env_len;
do {
if (piece_start[0] == ':') piece_start++;
piece_end = REAL(strchr)(piece_start, ':');
if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
uptr piece_len = piece_end - piece_start;
char *filename_start =
(char *)internal_memrchr(piece_start, '/', piece_len);
uptr filename_len = piece_len;
if (filename_start) {
filename_start += 1;
filename_len = piece_len - (filename_start - piece_start);
} else {
filename_start = piece_start;
}
// If the current piece isn't the runtime library name,
// append it to new_env.
if ((dylib_name_len != filename_len) ||
(internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
if (new_env_pos != new_env + env_name_len + 1) {
new_env_pos[0] = ':';
new_env_pos++;
}
internal_strncpy(new_env_pos, piece_start, piece_len);
new_env_pos += piece_len;
}
// Move on to the next piece.
piece_start = piece_end;
} while (piece_start < old_env_end);
// Can't use setenv() here, because it requires the allocator to be
// initialized.
// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
// a separate function called after InitializeAllocator().
if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
LeakyResetEnv(kDyldInsertLibraries, new_env);
}
// No-op. Mac does not support static linkage anyway.
@ -205,11 +230,6 @@ void AsanCheckDynamicRTPrereqs() {}
// No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {}
bool AsanInterceptsSignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) &&
common_flags()->handle_segv;
}
void AsanPlatformThreadInit() {
}
@ -312,7 +332,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
dispatch_function_t func) { \
GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (common_flags()->verbosity >= 2) { \
if (Verbosity() >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
asan_ctxt, pthread_self()); \
PRINT_CURRENT_STACK(); \
@ -330,7 +350,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (common_flags()->verbosity >= 2) {
if (Verbosity() >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
PRINT_CURRENT_STACK();
}
@ -343,7 +363,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_function_t func) {
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (common_flags()->verbosity >= 2) {
if (Verbosity() >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
asan_ctxt, pthread_self());
PRINT_CURRENT_STACK();
@ -373,13 +393,6 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
work(); \
}
// Forces the compiler to generate a frame pointer in the function.
#define ENABLE_FRAME_POINTER \
do { \
volatile uptr enable_fp; \
enable_fp = GET_CURRENT_FRAME(); \
} while (0)
INTERCEPTOR(void, dispatch_async,
dispatch_queue_t dq, void(^work)(void)) {
ENABLE_FRAME_POINTER;
@ -403,6 +416,10 @@ INTERCEPTOR(void, dispatch_after,
INTERCEPTOR(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void(^work)(void)) {
if (!work) {
REAL(dispatch_source_set_cancel_handler)(ds, work);
return;
}
ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, asan_block);

View File

@ -152,13 +152,17 @@ INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
namespace {
// TODO(glider): the mz_* functions should be united with the Linux wrappers,
// as they are basically copied from there.
size_t mz_size(malloc_zone_t* zone, const void* ptr) {
// TODO(glider): the __asan_mz_* functions should be united with the Linux
// wrappers, as they are basically copied from there.
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) {
return asan_mz_size(ptr);
}
void *mz_malloc(malloc_zone_t *zone, size_t size) {
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
@ -167,7 +171,9 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) {
return asan_malloc(size, &stack);
}
void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
if (UNLIKELY(!asan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const size_t kCallocPoolSize = 1024;
@ -183,7 +189,9 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
return asan_calloc(nmemb, size, &stack);
}
void *mz_valloc(malloc_zone_t *zone, size_t size) {
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
@ -210,11 +218,15 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) {
}
// TODO(glider): the allocation callbacks need to be refactored.
void mz_free(malloc_zone_t *zone, void *ptr) {
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_mz_free(malloc_zone_t *zone, void *ptr) {
free_common(zone, ptr);
}
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
@ -233,15 +245,16 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
}
}
void mz_destroy(malloc_zone_t* zone) {
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_mz_destroy(malloc_zone_t* zone) {
// A no-op -- we will not be destroyed!
Report("mz_destroy() called -- ignoring\n");
Report("__asan_mz_destroy() called -- ignoring\n");
}
// from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
if (UNLIKELY(!asan_inited)) {
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
@ -252,12 +265,12 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
// This function is currently unused, and we build with -Werror.
#if 0
void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) {
void __asan_mz_free_definite_size(
malloc_zone_t* zone, void *ptr, size_t size) {
// TODO(glider): check that |size| is valid.
UNIMPLEMENTED();
}
#endif
#endif
kern_return_t mi_enumerator(task_t task, void *,
unsigned type_mask, vm_address_t zone_address,
@ -299,13 +312,10 @@ void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
}
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
boolean_t mi_zone_locked(malloc_zone_t *zone) {
// UNIMPLEMENTED();
return false;
}
#endif
} // unnamed namespace
@ -324,32 +334,25 @@ void ReplaceSystemMalloc() {
asan_introspection.force_lock = &mi_force_lock;
asan_introspection.force_unlock = &mi_force_unlock;
asan_introspection.statistics = &mi_statistics;
asan_introspection.zone_locked = &mi_zone_locked;
internal_memset(&asan_zone, 0, sizeof(malloc_zone_t));
// Start with a version 4 zone which is used for OS X 10.4 and 10.5.
asan_zone.version = 4;
// Use version 6 for OSX >= 10.6.
asan_zone.version = 6;
asan_zone.zone_name = "asan";
asan_zone.size = &mz_size;
asan_zone.malloc = &mz_malloc;
asan_zone.calloc = &mz_calloc;
asan_zone.valloc = &mz_valloc;
asan_zone.free = &mz_free;
asan_zone.realloc = &mz_realloc;
asan_zone.destroy = &mz_destroy;
asan_zone.size = &__asan_mz_size;
asan_zone.malloc = &__asan_mz_malloc;
asan_zone.calloc = &__asan_mz_calloc;
asan_zone.valloc = &__asan_mz_valloc;
asan_zone.free = &__asan_mz_free;
asan_zone.realloc = &__asan_mz_realloc;
asan_zone.destroy = &__asan_mz_destroy;
asan_zone.batch_malloc = 0;
asan_zone.batch_free = 0;
asan_zone.introspect = &asan_introspection;
// from AvailabilityMacros.h
#if defined(MAC_OS_X_VERSION_10_6) && \
MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
// Switch to version 6 on OSX 10.6 to support memalign.
asan_zone.version = 6;
asan_zone.free_definite_size = 0;
asan_zone.memalign = &mz_memalign;
asan_introspection.zone_locked = &mi_zone_locked;
#endif
asan_zone.memalign = &__asan_mz_memalign;
asan_zone.introspect = &asan_introspection;
// Register the ASan zone.
malloc_zone_register(&asan_zone);

View File

@ -59,13 +59,20 @@
// || `[0x20000000, 0x23ffffff]` || LowShadow ||
// || `[0x00000000, 0x1fffffff]` || LowMem ||
//
// Default Linux/MIPS mapping:
// Default Linux/MIPS32 mapping:
// || `[0x2aaa0000, 0xffffffff]` || HighMem ||
// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
// || `[0x00000000, 0x0aa9ffff]` || LowMem ||
//
// Default Linux/MIPS64 mapping:
// || `[0x4000000000, 0xffffffffff]` || HighMem ||
// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
// || `[0x2400000000, 0x27ffffffff]` || ShadowGap ||
// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@ -79,6 +86,15 @@
// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
//
// Default Windows/i386 mapping:
// (the exact location of HighShadow/HighMem may vary depending
// on WoW64, /LARGEADDRESSAWARE, etc).
// || `[0x50000000, 0xffffffff]` || HighMem ||
// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
@ -87,10 +103,11 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_ANDROID
@ -101,12 +118,12 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# elif SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset32
# elif SANITIZER_WINDOWS
# define SHADOW_OFFSET kWindowsShadowOffset32
# else
# if SANITIZER_IOS
# define SHADOW_OFFSET kIosShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
# else
# if defined(__aarch64__)

View File

@ -15,13 +15,24 @@
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
static atomic_uint8_t can_poison_memory;
void SetCanPoisonMemory(bool value) {
atomic_store(&can_poison_memory, value, memory_order_release);
}
bool CanPoisonMemory() {
return atomic_load(&can_poison_memory, memory_order_acquire);
}
void PoisonShadow(uptr addr, uptr size, u8 value) {
if (!flags()->poison_heap) return;
if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
@ -34,7 +45,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
uptr size,
uptr redzone_size,
u8 value) {
if (!flags()->poison_heap) return;
if (!CanPoisonMemory()) return;
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
@ -63,10 +74,10 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
if (common_flags()->verbosity) {
if (Verbosity()) {
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
poison ? "" : "un", ptr, end, size);
if (common_flags()->verbosity >= 2)
if (Verbosity() >= 2)
PRINT_CURRENT_STACK();
}
CHECK(size);

View File

@ -19,6 +19,10 @@
namespace __asan {
// Enable/disable memory poisoning.
void SetCanPoisonMemory(bool value);
bool CanPoisonMemory();
// Poisons the shadow memory for "size" bytes starting from "addr".
void PoisonShadow(uptr addr, uptr size, u8 value);
@ -34,7 +38,7 @@ void PoisonShadowPartialRightRedzone(uptr addr,
// performance-critical code with care.
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
DCHECK(CanPoisonMemory());
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
@ -60,15 +64,14 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
void *res = MmapFixedNoReserve(page_beg, page_end - page_beg);
CHECK_EQ(page_beg, res);
ReserveShadowMemoryRange(page_beg, page_end - 1);
}
}
}
ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
DCHECK(flags()->poison_heap);
DCHECK(CanPoisonMemory());
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {

View File

@ -53,7 +53,7 @@ void AppendToErrorMessageBuffer(const char *buffer) {
buffer, remaining);
error_message_buffer[error_message_buffer_size - 1] = '\0';
// FIXME: reallocate the buffer instead of truncating the message.
error_message_buffer_pos += remaining > length ? length : remaining;
error_message_buffer_pos += Min(remaining, length);
}
}
@ -937,6 +937,8 @@ using namespace __asan; // NOLINT
void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
uptr access_size) {
ENABLE_FRAME_POINTER;
// Determine the error type.
const char *bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {

View File

@ -56,8 +56,6 @@ static void AsanDie() {
}
if (common_flags()->coverage)
__sanitizer_cov_dump();
if (death_callback)
death_callback();
if (flags()->abort_on_error)
Abort();
internal__exit(flags()->exitcode);
@ -72,265 +70,9 @@ static void AsanCheckFailed(const char *file, int line, const char *cond,
Die();
}
// -------------------------- Flags ------------------------- {{{1
static const int kDefaultMallocContextSize = 30;
Flags asan_flags_dont_use_directly; // use via flags().
static const char *MaybeCallAsanDefaultOptions() {
return (&__asan_default_options) ? __asan_default_options() : "";
}
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
// Stringize the macro value.
# define ASAN_STRINGIZE(x) #x
# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options)
return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS);
#else
return "";
#endif
}
static void ParseFlagsFromString(Flags *f, const char *str) {
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
CHECK((uptr)cf->malloc_context_size <= kStackTraceMax);
// Please write meaningful flag descriptions when adding new flags.
ParseFlag(str, &f->quarantine_size, "quarantine_size",
"Size (in bytes) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.");
ParseFlag(str, &f->redzone, "redzone",
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.");
ParseFlag(str, &f->max_redzone, "max_redzone",
"Maximal size (in bytes) of redzones around heap objects.");
CHECK_GE(f->redzone, 16);
CHECK_GE(f->max_redzone, f->redzone);
CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone));
ParseFlag(str, &f->debug, "debug",
"If set, prints some debugging information and does additional checks.");
ParseFlag(str, &f->report_globals, "report_globals",
"Controls the way to handle globals (0 - don't detect buffer overflow on "
"globals, 1 - detect buffer overflow, 2 - print data about registered "
"globals).");
ParseFlag(str, &f->check_initialization_order,
"check_initialization_order",
"If set, attempts to catch initialization order issues.");
ParseFlag(str, &f->replace_str, "replace_str",
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.");
ParseFlag(str, &f->replace_intrin, "replace_intrin",
"If set, uses custom wrappers for memset/memcpy/memmove intinsics.");
ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free",
"Ignore invalid free() calls to work around some bugs. Used on OS X "
"only.");
ParseFlag(str, &f->detect_stack_use_after_return,
"detect_stack_use_after_return",
"Enables stack-use-after-return checking at run-time.");
ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log",
"Minimum fake stack size log.");
ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log",
"Maximum fake stack size log.");
ParseFlag(str, &f->uar_noreserve, "uar_noreserve",
"Use mmap with 'norserve' flag to allocate fake stack.");
ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size",
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.");
ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte",
"Value used to fill the newly allocated memory.");
ParseFlag(str, &f->exitcode, "exitcode",
"Override the program exit status if the tool found an error.");
ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning",
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.");
ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying",
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).");
ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size",
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.");
ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit",
"If set, explicitly unmaps the (huge) shadow at exit.");
ParseFlag(str, &f->abort_on_error, "abort_on_error",
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.");
ParseFlag(str, &f->print_stats, "print_stats",
"Print various statistics after printing an error message or if "
"atexit=1.");
ParseFlag(str, &f->print_legend, "print_legend",
"Print the legend for the shadow bytes.");
ParseFlag(str, &f->atexit, "atexit",
"If set, prints ASan exit stats even after program terminates "
"successfully.");
ParseFlag(str, &f->allow_reexec, "allow_reexec",
"Allow the tool to re-exec the program. This may interfere badly with "
"the debugger.");
ParseFlag(str, &f->print_full_thread_history,
"print_full_thread_history",
"If set, prints thread creation stacks for the threads involved in the "
"report and their ancestors up to the main thread.");
ParseFlag(str, &f->poison_heap, "poison_heap",
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
"for benchmarking the allocator or instrumentator.");
ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
"Poison (or not) the array cookie after operator new[].");
ParseFlag(str, &f->poison_partial, "poison_partial",
"If true, poison partially addressable 8-byte aligned words "
"(default=true). This flag affects heap and global buffers, but not "
"stack buffers.");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
"Report errors on malloc/delete, new/free, new/delete[], etc.");
ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
"Report errors on mismatch betwen size of new and delete.");
ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
"comparing p1 and p2.");
ParseFlag(str, &f->strict_init_order, "strict_init_order",
"If true, assume that dynamic initializers can never access globals from "
"other modules, even if the latter are already initialized.");
ParseFlag(str, &f->start_deactivated, "start_deactivated",
"If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
"poisoning) to reduce memory consumption as much as possible, and "
"restores them to original values when the first instrumented module is "
"loaded into the process. This is mainly intended to be used on "
"Android. ");
ParseFlag(str, &f->detect_invalid_pointer_pairs,
"detect_invalid_pointer_pairs",
"If non-zero, try to detect operations like <, <=, >, >= and - on "
"invalid pointer pairs (e.g. when pointers belong to different objects). "
"The bigger the value the harder we try.");
ParseFlag(str, &f->detect_container_overflow,
"detect_container_overflow",
"If true, honor the container overflow annotations. "
"See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow");
ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation",
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes");
ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes",
"If true, dump 16 bytes starting at the instruction that caused SEGV");
}
void InitializeFlags(Flags *f, const char *env) {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->detect_leaks = CAN_SANITIZE_LEAKS;
cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf->malloc_context_size = kDefaultMallocContextSize;
cf->intercept_tls_get_addr = true;
cf->coverage = false;
internal_memset(f, 0, sizeof(*f));
f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28;
f->redzone = 16;
f->max_redzone = 2048;
f->debug = false;
f->report_globals = 1;
f->check_initialization_order = false;
f->replace_str = true;
f->replace_intrin = true;
f->mac_ignore_invalid_free = false;
f->detect_stack_use_after_return = false; // Also needs the compiler flag.
f->min_uar_stack_size_log = 16; // We can't do smaller anyway.
f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread.
f->uar_noreserve = false;
f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K.
f->malloc_fill_byte = 0xbe;
f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE;
f->allow_user_poisoning = true;
f->sleep_before_dying = 0;
f->check_malloc_usable_size = true;
f->unmap_shadow_on_exit = false;
f->abort_on_error = false;
f->print_stats = false;
f->print_legend = true;
f->atexit = false;
f->allow_reexec = true;
f->print_full_thread_history = true;
f->poison_heap = true;
f->poison_array_cookie = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
f->new_delete_type_mismatch = true;
f->strict_memcmp = true;
f->strict_init_order = false;
f->start_deactivated = false;
f->detect_invalid_pointer_pairs = 0;
f->detect_container_overflow = true;
f->detect_odr_violation = 2;
f->dump_instruction_bytes = false;
// Override from compile definition.
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
VReport(1, "Using the defaults from __asan_default_options: %s\n",
MaybeCallAsanDefaultOptions());
// Override from command line.
ParseFlagsFromString(f, env);
if (common_flags()->help) {
PrintFlagDescriptions();
}
if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) {
Report("%s: detect_leaks is not supported on this platform.\n",
SanitizerToolName);
cf->detect_leaks = false;
}
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
f->check_initialization_order = true;
}
}
// Parse flags that may change between startup and activation.
// On Android they come from a system property.
// On other platforms this is no-op.
void ParseExtraActivationFlags() {
char buf[100];
GetExtraActivationFlags(buf, sizeof(buf));
ParseFlagsFromString(flags(), buf);
if (buf[0] != '\0')
VReport(1, "Extra activation flags: %s\n", buf);
}
// -------------------------- Globals --------------------- {{{1
int asan_inited;
bool asan_init_is_running;
void (*death_callback)(void);
#if !ASAN_FIXED_MAPPING
uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
@ -344,7 +86,8 @@ void ShowStatsAndAbort() {
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
static void ReserveShadowMemoryRange(uptr beg, uptr end) {
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end) {
CHECK_EQ((beg % GetPageSizeCached()), 0);
CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
uptr size = end - beg + 1;
@ -355,6 +98,10 @@ static void ReserveShadowMemoryRange(uptr beg, uptr end) {
"Perhaps you're using ulimit -v\n", size);
Abort();
}
if (common_flags()->no_huge_pages_for_shadow)
NoHugePagesInRegion(beg, size);
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(beg, size);
}
// --------------- LowLevelAllocateCallbac ---------- {{{1
@ -500,7 +247,13 @@ static void InitializeHighMemEnd() {
}
static void ProtectGap(uptr a, uptr size) {
CHECK_EQ(a, (uptr)Mprotect(a, size));
void *res = Mprotect(a, size);
if (a == (uptr)res)
return;
Report("ERROR: Failed to protect the shadow gap. "
"ASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
}
static void PrintAddressSpaceLayout() {
@ -539,7 +292,7 @@ static void PrintAddressSpaceLayout() {
Printf("\n");
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20);
Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@ -561,8 +314,10 @@ static void AsanInitInternal() {
// Initialize flags. This must be done early, because most of the
// initialization steps look at flags().
const char *options = GetEnv("ASAN_OPTIONS");
InitializeFlags(flags(), options);
InitializeFlags();
SetCanPoisonMemory(flags()->poison_heap);
SetMallocContextSize(common_flags()->malloc_context_size);
InitializeHighMemEnd();
@ -574,20 +329,11 @@ static void AsanInitInternal() {
SetCheckFailedCallback(AsanCheckFailed);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
if (!flags()->start_deactivated)
ParseExtraActivationFlags();
__sanitizer_set_report_path(common_flags()->log_path);
// Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
if (options) {
VReport(1, "Parsed ASAN_OPTIONS: %s\n", options);
}
if (flags()->start_deactivated)
AsanStartDeactivated();
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@ -618,8 +364,7 @@ static void AsanInitInternal() {
}
#endif
if (common_flags()->verbosity)
PrintAddressSpaceLayout();
if (Verbosity()) PrintAddressSpaceLayout();
DisableCoreDumperIfNecessary();
@ -649,6 +394,8 @@ static void AsanInitInternal() {
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
@ -656,7 +403,12 @@ static void AsanInitInternal() {
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnSIGSEGV);
InitializeAllocator();
AllocatorOptions allocator_options;
allocator_options.SetFrom(flags(), common_flags());
InitializeAllocator(allocator_options);
MaybeStartBackgroudThread();
SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
@ -666,10 +418,12 @@ static void AsanInitInternal() {
if (flags()->atexit)
Atexit(asan_atexit);
if (common_flags()->coverage) {
__sanitizer_cov_init();
Atexit(__sanitizer_cov_dump);
}
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
// Now that ASan runtime is (mostly) initialized, deactivate it if
// necessary, so that it can be re-activated when requested.
if (flags()->start_deactivated)
AsanDeactivate();
// interceptors
InitTlsSize();
@ -686,7 +440,7 @@ static void AsanInitInternal() {
SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
__lsan::InitCommonLsan(false);
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
@ -724,13 +478,6 @@ static AsanInitializer asan_initializer;
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char* __asan_default_options() { return ""; }
} // extern "C"
#endif
int NOINLINE __asan_set_error_exit_code(int exit_code) {
int old = flags()->exitcode;
flags()->exitcode = exit_code;
@ -764,7 +511,7 @@ void NOINLINE __asan_handle_no_return() {
}
void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
death_callback = callback;
SetUserDieCallback(callback);
}
// Initialize as requested from instrumented application code.

View File

@ -13,6 +13,21 @@
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_atomic.h"
namespace __asan {
static atomic_uint32_t malloc_context_size;
void SetMallocContextSize(u32 size) {
atomic_store(&malloc_context_size, size, memory_order_release);
}
u32 GetMallocContextSize() {
return atomic_load(&malloc_context_size, memory_order_acquire);
}
} // namespace __asan
// ------------------ Interface -------------- {{{1

View File

@ -21,6 +21,11 @@
namespace __asan {
static const u32 kDefaultMallocContextSize = 30;
void SetMallocContextSize(u32 size);
u32 GetMallocContextSize();
// Get the stack trace with the given pc and bp.
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
@ -93,9 +98,8 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(common_flags()->malloc_context_size, \
common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_MALLOC \
GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC

View File

@ -15,57 +15,62 @@
#include "asan_suppressions.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
static bool suppressions_inited = false;
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kInterceptorName[] = "interceptor_name";
static const char kInterceptorViaFunction[] = "interceptor_via_fun";
static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
static const char *kSuppressionTypes[] = {
kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary};
void InitializeSuppressions() {
CHECK(!suppressions_inited);
SuppressionContext::InitIfNecessary();
suppressions_inited = true;
CHECK_EQ(nullptr, suppression_ctx);
suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
}
bool IsInterceptorSuppressed(const char *interceptor_name) {
CHECK(suppressions_inited);
SuppressionContext *ctx = SuppressionContext::Get();
CHECK(suppression_ctx);
Suppression *s;
// Match "interceptor_name" suppressions.
return ctx->Match(interceptor_name, SuppressionInterceptorName, &s);
return suppression_ctx->Match(interceptor_name, kInterceptorName, &s);
}
bool HaveStackTraceBasedSuppressions() {
CHECK(suppressions_inited);
SuppressionContext *ctx = SuppressionContext::Get();
return ctx->HasSuppressionType(SuppressionInterceptorViaFunction) ||
ctx->HasSuppressionType(SuppressionInterceptorViaLibrary);
CHECK(suppression_ctx);
return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) ||
suppression_ctx->HasSuppressionType(kInterceptorViaLibrary);
}
bool IsStackTraceSuppressed(const StackTrace *stack) {
CHECK(suppressions_inited);
if (!HaveStackTraceBasedSuppressions())
return false;
SuppressionContext *ctx = SuppressionContext::Get();
CHECK(suppression_ctx);
Symbolizer *symbolizer = Symbolizer::GetOrInit();
Suppression *s;
for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
uptr addr = stack->trace[i];
if (ctx->HasSuppressionType(SuppressionInterceptorViaLibrary)) {
if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) {
const char *module_name;
uptr module_offset;
// Match "interceptor_via_lib" suppressions.
if (symbolizer->GetModuleNameAndOffsetForPC(addr, &module_name,
&module_offset) &&
ctx->Match(module_name, SuppressionInterceptorViaLibrary, &s)) {
suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) {
return true;
}
}
if (ctx->HasSuppressionType(SuppressionInterceptorViaFunction)) {
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
const char *function_name = cur->info.function;
@ -73,7 +78,8 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
continue;
}
// Match "interceptor_via_fun" suppressions.
if (ctx->Match(function_name, SuppressionInterceptorViaFunction, &s)) {
if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
&s)) {
frames->ClearAll();
return true;
}

View File

@ -60,6 +60,10 @@ void PlatformTSDDtor(void *tsd) {
AsanThread::TSDDtor(tsd);
}
// ---------------------- Various stuff ---------------- {{{1
void DisableReexec() {
// No need to re-exec on Windows.
}
void MaybeReexec() {
// No need to re-exec on Windows.
}

View File

@ -294,7 +294,43 @@ INTERFACE_FUNCTION(__asan_stack_free_8)
INTERFACE_FUNCTION(__asan_stack_free_9)
INTERFACE_FUNCTION(__asan_stack_free_10)
// FIXME: we might want to have a sanitizer_win_dll_thunk?
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_cov)
INTERFACE_FUNCTION(__sanitizer_cov_dump)
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_free_hook)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
INTERFACE_FUNCTION(__sanitizer_get_heap_size)
INTERFACE_FUNCTION(__sanitizer_get_ownership)
INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_malloc_hook)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
INTERFACE_FUNCTION(__sanitizer_reset_coverage)
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
// TODO(timurrrr): Add more interface functions on the as-needed basis.

View File

@ -23,10 +23,11 @@
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
extern "C" {
__declspec(dllimport) int __asan_set_seh_filter();
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
#include <windows.h>
#include <psapi.h>
extern "C" {
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
// used when linking an MD runtime with a set of object files on Windows.
//
@ -37,16 +38,82 @@ __declspec(dllimport) int __asan_should_detect_stack_use_after_return();
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the a variable that is
// constant after initialization anyways.
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
}
// Set the ASan-specific SEH handler at the end of CRT initialization of each
// module (see asan_win.cc for the details).
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators as MT does.
// To work around this, for each DLL we schedule a call to
// UnregisterGlobalsInRange atexit() specifying the address range of the DLL
// image to unregister globals in that range. We don't do the same
// for the main module (.exe) as the asan_globals.cc allocator is destroyed
// by the time UnregisterGlobalsInRange is executed.
// See PR22545 for the details.
namespace __asan {
__declspec(dllimport)
void UnregisterGlobalsInRange(void *beg, void *end);
}
namespace {
void *this_module_base, *this_module_end;
void UnregisterGlobals() {
__asan::UnregisterGlobalsInRange(this_module_base, this_module_end);
}
int ScheduleUnregisterGlobals() {
HMODULE this_module = 0;
// Increments the reference counter of the DLL module, so need to call
// FreeLibrary later.
if (!GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
(LPCTSTR)&UnregisterGlobals, &this_module))
return 1;
// Skip the main module.
if (this_module == GetModuleHandle(0))
return 0;
MODULEINFO mi;
bool success =
GetModuleInformation(GetCurrentProcess(), this_module, &mi, sizeof(mi));
if (!FreeLibrary(this_module))
return 2;
if (!success)
return 3;
this_module_base = mi.lpBaseOfDll;
this_module_end = (char*)mi.lpBaseOfDll + mi.SizeOfImage;
return atexit(UnregisterGlobals);
}
} // namespace
///////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
extern "C" __declspec(dllimport) int __asan_set_seh_filter();
static int SetSEHFilter() { return __asan_set_seh_filter(); }
///////////////////////////////////////////////////////////////////////////////
// We schedule some work at start-up by placing callbacks to our code to the
// list of CRT C initializers.
//
// First, declare sections we'll be using:
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XIZ", long, read) // NOLINT
// We need to call 'atexit(UnregisterGlobals);' after atexit() is initialized
// (.CRT$XIC) but before the C++ constructors (.CRT$XCA).
__declspec(allocate(".CRT$XID"))
static int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
// We need to set the ASan-specific SEH handler at the end of CRT initialization
// of each module (see also asan_win.cc).
//
// Unfortunately, putting a pointer to __asan_set_seh_filter into
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
static int SetSEHFilter() { return __asan_set_seh_filter(); }
#pragma section(".CRT$XIZ", long, read) // NOLINT
__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
}
extern "C" __declspec(allocate(".CRT$XIZ"))
int (*__asan_seh_interceptor)() = SetSEHFilter;
#endif // ASAN_DYNAMIC_RUNTIME_THUNK

View File

@ -18,6 +18,7 @@ revert=no
extra_options=
device=
lib=
use_su=0
function usage {
echo "usage: $0 [--revert] [--device device-id] [--lib path] [--extra-options options]"
@ -26,13 +27,70 @@ function usage {
echo " --extra-options: Extra ASAN_OPTIONS."
echo " --device: Install to the given device. Use 'adb devices' to find"
echo " device-id."
echo " --use-su: Use 'su -c' prefix for every adb command instead of using"
echo " 'adb root' once."
echo
exit 1
}
function adb_push {
if [ $use_su -eq 0 ]; then
$ADB push "$1" "$2"
else
local FILENAME=$(basename $1)
$ADB push "$1" "/data/local/tmp/$FILENAME"
$ADB shell su -c "rm \\\"$2/$FILENAME\\\"" >&/dev/null
$ADB shell su -c "cat \\\"/data/local/tmp/$FILENAME\\\" > \\\"$2/$FILENAME\\\""
$ADB shell su -c "rm \\\"/data/local/tmp/$FILENAME\\\""
fi
}
function adb_remount {
if [ $use_su -eq 0 ]; then
$ADB remount
else
local STORAGE=`$ADB shell mount | grep /system | cut -d ' ' -f1`
if [ "$STORAGE" != "" ]; then
echo Remounting $STORAGE at /system
$ADB shell su -c "mount -o remount,rw $STORAGE /system"
else
echo Failed to get storage device name for "/system" mount point
fi
fi
}
function adb_shell {
if [ $use_su -eq 0 ]; then
$ADB shell $@
else
$ADB shell su -c "$*"
fi
}
function adb_root {
if [ $use_su -eq 0 ]; then
$ADB root
fi
}
function adb_wait_for_device {
$ADB wait-for-device
}
function adb_pull {
if [ $use_su -eq 0 ]; then
$ADB pull "$1" "$2"
else
local FILENAME=$(basename $1)
$ADB shell rm "/data/local/tmp/$FILENAME" >&/dev/null
$ADB shell su -c "[ -f \\\"$1\\\" ] && cat \\\"$1\\\" > \\\"/data/local/tmp/$FILENAME\\\" && chown root.shell \\\"/data/local/tmp/$FILENAME\\\" && chmod 755 \\\"/data/local/tmp/$FILENAME\\\"" &&
$ADB pull "/data/local/tmp/$FILENAME" "$2" >&/dev/null && $ADB shell "rm \"/data/local/tmp/$FILENAME\""
fi
}
function get_device_arch { # OUTVAR
local _outvar=$1
local _ABI=$($ADB shell getprop ro.product.cpu.abi)
local _ABI=$(adb_shell getprop ro.product.cpu.abi)
local _ARCH=
if [[ $_ABI == x86* ]]; then
_ARCH=i686
@ -74,6 +132,9 @@ while [[ $# > 0 ]]; do
fi
device="$1"
;;
--use-su)
use_su=1
;;
*)
usage
;;
@ -86,11 +147,25 @@ if [[ x$device != x ]]; then
ADB="$ADB -s $device"
fi
if [ $use_su -eq 1 ]; then
# Test if 'su' is present on the device
SU_TEST_OUT=`$ADB shell su -c "echo foo" 2>&1 | sed 's/\r$//'`
if [ $? != 0 -o "$SU_TEST_OUT" != "foo" ]; then
echo "ERROR: Cannot use 'su -c':"
echo "$ adb shell su -c \"echo foo\""
echo $SU_TEST_OUT
echo "Check that 'su' binary is correctly installed on the device or omit"
echo " --use-su flag"
exit 1
fi
fi
echo '>> Remounting /system rw'
$ADB root
$ADB wait-for-device
$ADB remount
$ADB wait-for-device
adb_wait_for_device
adb_root
adb_wait_for_device
adb_remount
adb_wait_for_device
get_device_arch ARCH
echo "Target architecture: $ARCH"
@ -99,22 +174,24 @@ ASAN_RT="libclang_rt.asan-$ARCH-android.so"
if [[ x$revert == xyes ]]; then
echo '>> Uninstalling ASan'
if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then
if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
echo '>> Pre-L device detected.'
$ADB shell mv /system/bin/app_process.real /system/bin/app_process
$ADB shell rm /system/bin/asanwrapper
$ADB shell rm /system/lib/$ASAN_RT
adb_shell mv /system/bin/app_process.real /system/bin/app_process
adb_shell rm /system/bin/asanwrapper
else
$ADB shell rm /system/bin/app_process.wrap
$ADB shell rm /system/bin/asanwrapper
$ADB shell rm /system/lib/$ASAN_RT
$ADB shell rm /system/bin/app_process
$ADB shell ln -s /system/bin/app_process32 /system/bin/app_process
adb_shell rm /system/bin/app_process.wrap
adb_shell rm /system/bin/asanwrapper
adb_shell rm /system/bin/app_process
adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
fi
echo '>> Restarting shell'
$ADB shell stop
$ADB shell start
adb_shell stop
adb_shell start
# Remove the library on the last step to give a chance to the 'su' binary to
# be executed without problem.
adb_shell rm /system/lib/$ASAN_RT
echo '>> Done'
exit 0
@ -145,28 +222,28 @@ TMPDIROLD="$TMPDIRBASE/old"
TMPDIR="$TMPDIRBASE/new"
mkdir "$TMPDIROLD"
RELEASE=$($ADB shell getprop ro.build.version.release)
RELEASE=$(adb_shell getprop ro.build.version.release)
PRE_L=0
if echo "$RELEASE" | grep '^4\.' >&/dev/null; then
PRE_L=1
fi
if ! $ADB shell readlink /system/bin/app_process | grep 'app_process' >&/dev/null; then
if ! adb_shell ls -l /system/bin/app_process | grep -o '\->.*app_process' >&/dev/null; then
if $ADB pull /system/bin/app_process.real /dev/null >&/dev/null; then
if adb_pull /system/bin/app_process.real /dev/null >&/dev/null; then
echo '>> Old-style ASan installation detected. Reverting.'
$ADB shell mv /system/bin/app_process.real /system/bin/app_process
adb_shell mv /system/bin/app_process.real /system/bin/app_process
fi
echo '>> Pre-L device detected. Setting up app_process symlink.'
$ADB shell mv /system/bin/app_process /system/bin/app_process32
$ADB shell ln -s /system/bin/app_process32 /system/bin/app_process
adb_shell mv /system/bin/app_process /system/bin/app_process32
adb_shell ln -s /system/bin/app_process32 /system/bin/app_process
fi
echo '>> Copying files from the device'
$ADB pull /system/bin/app_process.wrap "$TMPDIROLD" || true
$ADB pull /system/bin/asanwrapper "$TMPDIROLD" || true
$ADB pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
adb_pull /system/bin/app_process.wrap "$TMPDIROLD" || true
adb_pull /system/bin/asanwrapper "$TMPDIROLD" || true
adb_pull /system/lib/"$ASAN_RT" "$TMPDIROLD" || true
cp -r "$TMPDIROLD" "$TMPDIR"
if [[ -f "$TMPDIR/app_process.wrap" ]]; then
@ -184,7 +261,7 @@ cp "$ASAN_RT_PATH/$ASAN_RT" "$TMPDIR/"
ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0
# On Android-L not allowing user segv handler breaks some applications.
if $ADB shell 'echo $LD_PRELOAD' | grep libsigchain.so >&/dev/null; then
if [[ PRE_L -eq 0 ]]; then
ASAN_OPTIONS="$ASAN_OPTIONS,allow_user_segv_handler=1"
fi
@ -212,52 +289,52 @@ EOF
if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then
echo '>> Pushing files to the device'
$ADB push "$TMPDIR/$ASAN_RT" /system/lib/
$ADB push "$TMPDIR/app_process.wrap" /system/bin/app_process.wrap
$ADB push "$TMPDIR/asanwrapper" /system/bin/asanwrapper
adb_push "$TMPDIR/$ASAN_RT" /system/lib/
adb_push "$TMPDIR/app_process.wrap" /system/bin
adb_push "$TMPDIR/asanwrapper" /system/bin
$ADB shell rm /system/bin/app_process
$ADB shell ln -s /system/bin/app_process.wrap /system/bin/app_process
adb_shell rm /system/bin/app_process
adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process
$ADB shell chown root.shell \
adb_shell chown root.shell \
/system/lib/"$ASAN_RT" \
/system/bin/app_process.wrap \
/system/bin/asanwrapper
$ADB shell chmod 644 \
adb_shell chmod 644 \
/system/lib/"$ASAN_RT"
$ADB shell chmod 755 \
adb_shell chmod 755 \
/system/bin/app_process.wrap \
/system/bin/asanwrapper
# Make SELinux happy by keeping app_process wrapper and the shell
# it runs on in zygote domain.
ENFORCING=0
if $ADB shell getenforce | grep Enforcing >/dev/null; then
if adb_shell getenforce | grep Enforcing >/dev/null; then
# Sometimes shell is not allowed to change file contexts.
# Temporarily switch to permissive.
ENFORCING=1
$ADB shell setenforce 0
adb_shell setenforce 0
fi
$ADB shell cp /system/bin/sh /system/bin/sh-from-zygote
adb_shell cp /system/bin/sh /system/bin/sh-from-zygote
if [[ PRE_L -eq 1 ]]; then
CTX=u:object_r:system_file:s0
else
CTX=u:object_r:zygote_exec:s0
fi
$ADB shell chcon $CTX \
adb_shell chcon $CTX \
/system/bin/sh-from-zygote \
/system/bin/app_process.wrap \
/system/bin/app_process32
if [ $ENFORCING == 1 ]; then
$ADB shell setenforce 1
adb_shell setenforce 1
fi
echo '>> Restarting shell (asynchronous)'
$ADB shell stop
$ADB shell start
adb_shell stop
adb_shell start
echo '>> Please wait until the device restarts'
else

View File

@ -11,11 +11,9 @@ import argparse
import bisect
import getopt
import os
import pty
import re
import subprocess
import sys
import termios
symbolizers = {}
DEBUG = False
@ -171,6 +169,9 @@ class UnbufferedLineConverter(object):
output. Uses pty to trick the child into providing unbuffered output.
"""
def __init__(self, args, close_stderr=False):
# Local imports so that the script can start on Windows.
import pty
import termios
pid, fd = pty.fork()
if pid == 0:
# We're the child. Transfer control to command.
@ -341,17 +342,23 @@ class BreakpadSymbolizer(Symbolizer):
class SymbolizationLoop(object):
def __init__(self, binary_name_filter=None, dsym_hint_producer=None):
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
if sys.platform == 'win32':
# ASan on Windows uses dbghelp.dll to symbolize in-process, which works
# even in sandboxed processes. Nothing needs to be done here.
self.process_line = self.process_line_echo
else:
# Used by clients who may want to supply a different binary name.
# E.g. in Chrome several binaries may share a single .dSYM.
self.binary_name_filter = binary_name_filter
self.dsym_hint_producer = dsym_hint_producer
self.system = os.uname()[0]
if self.system not in ['Linux', 'Darwin', 'FreeBSD']:
raise Exception('Unknown system')
self.llvm_symbolizers = {}
self.last_llvm_symbolizer = None
self.dsym_hints = set([])
self.frame_no = 0
self.process_line = self.process_line_posix
def symbolize_address(self, addr, binary, offset):
# On non-Darwin (i.e. on platforms without .dSYM debug info) always use
@ -366,12 +373,12 @@ class SymbolizationLoop(object):
# 3. otherwise create a new symbolizer and pass all currently known
# .dSYM hints to it.
if not binary in self.llvm_symbolizers:
use_last_symbolizer = True
use_new_symbolizer = True
if self.system == 'Darwin' and self.dsym_hint_producer:
dsym_hints_for_binary = set(self.dsym_hint_producer(binary))
use_last_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
use_new_symbolizer = bool(dsym_hints_for_binary - self.dsym_hints)
self.dsym_hints |= dsym_hints_for_binary
if self.last_llvm_symbolizer and use_last_symbolizer:
if self.last_llvm_symbolizer and not use_new_symbolizer:
self.llvm_symbolizers[binary] = self.last_llvm_symbolizer
else:
self.last_llvm_symbolizer = LLVMSymbolizerFactory(
@ -405,14 +412,14 @@ class SymbolizationLoop(object):
def process_logfile(self):
self.frame_no = 0
while True:
line = logfile.readline()
if not line:
break
for line in logfile:
processed = self.process_line(line)
print '\n'.join(processed)
def process_line(self, line):
def process_line_echo(self, line):
return [line.rstrip()]
def process_line_posix(self, line):
self.current_line = line.rstrip()
#0 0x7f6e35cf2e45 (/blah/foo.so+0x11fe45)
stack_trace_line_format = (
@ -437,20 +444,23 @@ class SymbolizationLoop(object):
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='''Example of use:
asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" -s "$HOME/SymbolFiles" < asan.log''')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='ASan symbolization script',
epilog='Example of use:\n'
'asan_symbolize.py -c "$HOME/opt/cross/bin/arm-linux-gnueabi-" '
'-s "$HOME/SymbolFiles" < asan.log')
parser.add_argument('path_to_cut', nargs='*',
help='pattern to be cut from the result file path ')
help='pattern to be cut from the result file path ')
parser.add_argument('-d','--demangle', action='store_true',
help='demangle function names')
help='demangle function names')
parser.add_argument('-s', metavar='SYSROOT',
help='set path to sysroot for sanitized binaries')
help='set path to sysroot for sanitized binaries')
parser.add_argument('-c', metavar='CROSS_COMPILE',
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin, type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
help='set prefix for binutils')
parser.add_argument('-l','--logfile', default=sys.stdin,
type=argparse.FileType('r'),
help='set log file name to parse, default is stdin')
args = parser.parse_args()
if args.path_to_cut:
fix_filename_patterns = args.path_to_cut

View File

@ -87,7 +87,7 @@ TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
}
TEST(AddressSanitizerInterface, GetHeapSizeTest) {
// asan_allocator2 does not keep huge chunks in free list, but unmaps them.
// ASan allocator does not keep huge chunks in free list, but unmaps them.
// The chunk should be greater than the quarantine size,
// otherwise it will be stuck in quarantine instead of being unmaped.
static const size_t kLargeMallocSize = (1 << 28) + 1; // 256M

View File

@ -31,18 +31,12 @@
// in this test. The static runtime library is linked explicitly (without
// -fsanitize=address), thus the interceptors do not work correctly on OS X.
#if !defined(_WIN32)
extern "C" {
// Set specific ASan options for uninstrumented unittest.
const char* __asan_default_options() {
return "allow_reexec=0";
}
} // extern "C"
#endif
// Make sure __asan_init is called before any test case is run.
struct AsanInitCaller {
AsanInitCaller() { __asan_init(); }
AsanInitCaller() {
__asan::DisableReexec();
__asan_init();
}
};
static AsanInitCaller asan_init_caller;

View File

@ -603,7 +603,8 @@ NOINLINE void SigLongJmpFunc1(sigjmp_buf buf) {
}
#if !defined(__ANDROID__) && !defined(__arm__) && \
!defined(__powerpc64__) && !defined(__powerpc__)
!defined(__powerpc64__) && !defined(__powerpc__) && \
!defined(__aarch64__)
// Does not work on Power and ARM:
// https://code.google.com/p/address-sanitizer/issues/detail?id=185
TEST(AddressSanitizer, BuiltinLongJmpTest) {
@ -1284,3 +1285,33 @@ TEST(AddressSanitizer, pthread_getschedparam) {
ASSERT_EQ(0, res);
}
#endif
#if SANITIZER_TEST_HAS_PRINTF_L
static int vsnprintf_l_wrapper(char *s, size_t n,
locale_t l, const char *format, ...) {
va_list va;
va_start(va, format);
int res = vsnprintf_l(s, n , l, format, va);
va_end(va);
return res;
}
TEST(AddressSanitizer, snprintf_l) {
char buff[5];
// Check that snprintf_l() works fine with Asan.
int res = snprintf_l(buff, 5,
_LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()");
EXPECT_EQ(12, res);
// Check that vsnprintf_l() works fine with Asan.
res = vsnprintf_l_wrapper(buff, 5,
_LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()");
EXPECT_EQ(13, res);
EXPECT_DEATH(snprintf_l(buff, 10,
_LIBCPP_GET_C_LOCALE, "%s", "snprintf_l()"),
"AddressSanitizer: stack-buffer-overflow");
EXPECT_DEATH(vsnprintf_l_wrapper(buff, 10,
_LIBCPP_GET_C_LOCALE, "%s", "vsnprintf_l()"),
"AddressSanitizer: stack-buffer-overflow");
}
#endif

View File

@ -28,20 +28,14 @@
#include <stdint.h>
#include <string.h>
#include "assembly.h"
// Clang objects if you redefine a builtin. This little hack allows us to
// define a function with the same name as an intrinsic.
#if __APPLE__
// mach-o has extra leading underscore
#pragma redefine_extname __atomic_load_c ___atomic_load
#pragma redefine_extname __atomic_store_c ___atomic_store
#pragma redefine_extname __atomic_exchange_c ___atomic_exchange
#pragma redefine_extname __atomic_compare_exchange_c ___atomic_compare_exchange
#else
#pragma redefine_extname __atomic_load_c __atomic_load
#pragma redefine_extname __atomic_store_c __atomic_store
#pragma redefine_extname __atomic_exchange_c __atomic_exchange
#pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
#endif
#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
#pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME(__atomic_compare_exchange)
/// Number of locks. This allocates one page on 32-bit platforms, two on
/// 64-bit. This can be specified externally if a different trade between

View File

@ -22,8 +22,53 @@
#include <machine/sysarch.h>
#endif
#if defined(__ANDROID__) && defined(__mips__)
#if defined(__mips__) && !defined(__FreeBSD__)
#include <sys/cachectl.h>
#include <sys/syscall.h>
#if defined(__ANDROID__) && defined(__LP64__)
/*
* clear_mips_cache - Invalidates instruction cache for Mips.
*/
static void clear_mips_cache(const void* Addr, size_t Size) {
asm volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
"beq %[Size], $zero, 20f\n" /* If size == 0, branch around. */
"nop\n"
"daddu %[Size], %[Addr], %[Size]\n" /* Calculate end address + 1 */
"rdhwr $v0, $1\n" /* Get step size for SYNCI.
$1 is $HW_SYNCI_Step */
"beq $v0, $zero, 20f\n" /* If no caches require
synchronization, branch
around. */
"nop\n"
"10:\n"
"synci 0(%[Addr])\n" /* Synchronize all caches around
address. */
"daddu %[Addr], %[Addr], $v0\n" /* Add step size. */
"sltu $at, %[Addr], %[Size]\n" /* Compare current with end
address. */
"bne $at, $zero, 10b\n" /* Branch if more to do. */
"nop\n"
"sync\n" /* Clear memory hazards. */
"20:\n"
"bal 30f\n"
"nop\n"
"30:\n"
"daddiu $ra, $ra, 12\n" /* $ra has a value of $pc here.
Add offset of 12 to point to the
instruction after the last nop.
*/
"jr.hb $ra\n" /* Return, clearing instruction
hazards. */
"nop\n"
".set pop\n"
: [Addr] "+r"(Addr), [Size] "+r"(Size)
:: "at", "ra", "v0", "memory"
);
}
#endif
#endif
#if defined(__ANDROID__) && defined(__arm__)
@ -52,7 +97,7 @@ void __clear_cache(void *start, void *end) {
sysarch(ARM_SYNC_ICACHE, &arg);
#elif defined(__ANDROID__)
const register int start_reg __asm("r0") = (int) (intptr_t) start;
register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
const register int flags __asm("r2") = 0;
const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
@ -64,10 +109,20 @@ void __clear_cache(void *start, void *end) {
#else
compilerrt_abort();
#endif
#elif defined(__ANDROID__) && defined(__mips__)
#elif defined(__mips__) && !defined(__FreeBSD__)
const uintptr_t start_int = (uintptr_t) start;
const uintptr_t end_int = (uintptr_t) end;
_flush_cache(start, (end_int - start_int), BCACHE);
#if defined(__ANDROID__) && defined(__LP64__)
// Call synci implementation for short address range.
const uintptr_t address_range_limit = 256;
if ((end_int - start_int) <= address_range_limit) {
clear_mips_cache(start, (end_int - start_int));
} else {
syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
}
#else
syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
#endif
#elif defined(__aarch64__) && !defined(__APPLE__)
uint64_t xstart = (uint64_t)(uintptr_t) start;
uint64_t xend = (uint64_t)(uintptr_t) end;

View File

@ -11,47 +11,7 @@
#include "int_lib.h"
/*
* _Unwind_* stuff based on C++ ABI public documentation
* http://refspecs.freestandards.org/abi-eh-1.21.html
*/
typedef enum {
_URC_NO_REASON = 0,
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
_URC_FATAL_PHASE2_ERROR = 2,
_URC_FATAL_PHASE1_ERROR = 3,
_URC_NORMAL_STOP = 4,
_URC_END_OF_STACK = 5,
_URC_HANDLER_FOUND = 6,
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8
} _Unwind_Reason_Code;
typedef enum {
_UA_SEARCH_PHASE = 1,
_UA_CLEANUP_PHASE = 2,
_UA_HANDLER_FRAME = 4,
_UA_FORCE_UNWIND = 8,
_UA_END_OF_STACK = 16
} _Unwind_Action;
typedef struct _Unwind_Context* _Unwind_Context_t;
struct _Unwind_Exception {
uint64_t exception_class;
void (*exception_cleanup)(_Unwind_Reason_Code reason,
struct _Unwind_Exception* exc);
uintptr_t private_1;
uintptr_t private_2;
};
COMPILER_RT_ABI const uint8_t* _Unwind_GetLanguageSpecificData(_Unwind_Context_t c);
COMPILER_RT_ABI void _Unwind_SetGR(_Unwind_Context_t c, int i, uintptr_t n);
COMPILER_RT_ABI void _Unwind_SetIP(_Unwind_Context_t, uintptr_t new_value);
COMPILER_RT_ABI uintptr_t _Unwind_GetIP(_Unwind_Context_t context);
COMPILER_RT_ABI uintptr_t _Unwind_GetRegionStart(_Unwind_Context_t context);
#include <unwind.h>
/*
* Pointer encodings documented at:
@ -185,12 +145,12 @@ static uintptr_t readEncodedPointer(const uint8_t** data, uint8_t encoding)
COMPILER_RT_ABI _Unwind_Reason_Code
__gcc_personality_sj0(int version, _Unwind_Action actions,
uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
_Unwind_Context_t context)
struct _Unwind_Context *context)
#else
COMPILER_RT_ABI _Unwind_Reason_Code
__gcc_personality_v0(int version, _Unwind_Action actions,
uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
_Unwind_Context_t context)
struct _Unwind_Context *context)
#endif
{
/* Since C does not have catch clauses, there is nothing to do during */
@ -199,7 +159,7 @@ __gcc_personality_v0(int version, _Unwind_Action actions,
return _URC_CONTINUE_UNWIND;
/* There is nothing to do if there is no LSDA for this frame. */
const uint8_t* lsda = _Unwind_GetLanguageSpecificData(context);
const uint8_t* lsda = (uint8_t*)_Unwind_GetLanguageSpecificData(context);
if ( lsda == (uint8_t*) 0 )
return _URC_CONTINUE_UNWIND;

View File

@ -56,7 +56,8 @@ typedef union
}s;
} udwords;
#if __LP64__
/* MIPS64 issue: PR 20098 */
#if defined(__LP64__) && !(defined(__mips__) && defined(__clang__))
#define CRT_HAS_128BIT
#endif

View File

@ -22,6 +22,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "dfsan/dfsan.h"
@ -256,7 +257,7 @@ dfsan_read_label(const void *addr, uptr size) {
return __dfsan_union_load(shadow_for(addr), size);
}
SANITIZER_INTERFACE_ATTRIBUTE
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) {
return &__dfsan_label_info[label];
}
@ -310,16 +311,24 @@ dfsan_dump_labels(int fd) {
}
}
static void InitializeFlags(Flags &f, const char *env) {
f.warn_unimplemented = true;
f.warn_nonzero_labels = false;
f.strict_data_dependencies = true;
f.dump_labels_at_exit = "";
void Flags::SetDefaults() {
#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "dfsan_flags.inc"
#undef DFSAN_FLAG
}
ParseFlag(env, &f.warn_unimplemented, "warn_unimplemented", "");
ParseFlag(env, &f.warn_nonzero_labels, "warn_nonzero_labels", "");
ParseFlag(env, &f.strict_data_dependencies, "strict_data_dependencies", "");
ParseFlag(env, &f.dump_labels_at_exit, "dump_labels_at_exit", "");
static void RegisterDfsanFlags(FlagParser *parser, Flags *f) {
#define DFSAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "dfsan_flags.inc"
#undef DFSAN_FLAG
}
static void InitializeFlags() {
FlagParser parser;
RegisterDfsanFlags(&parser, &flags());
flags().SetDefaults();
parser.ParseString(GetEnv("DFSAN_OPTIONS"));
}
static void dfsan_fini() {
@ -354,8 +363,7 @@ static void dfsan_init(int argc, char **argv, char **envp) {
if (!(init_addr >= kUnusedAddr && init_addr < kAppAddr))
Mprotect(kUnusedAddr, kAppAddr - kUnusedAddr);
InitializeFlags(flags(), GetEnv("DFSAN_OPTIONS"));
InitializeFlags();
InitializeInterceptors();
// Register the fini callback to run when the program terminates successfully

View File

@ -56,17 +56,11 @@ inline const dfsan_label *shadow_for(const void *ptr) {
}
struct Flags {
// Whether to warn on unimplemented functions.
bool warn_unimplemented;
// Whether to warn on non-zero labels.
bool warn_nonzero_labels;
// Whether to propagate labels only when there is an obvious data dependency
// (e.g., when comparing strings, ignore the fact that the output of the
// comparison might be data-dependent on the content of the strings). This
// applies only to the custom functions defined in 'custom.c'.
bool strict_data_dependencies;
// The path of the file where to dump the labels when the program terminates.
const char* dump_labels_at_exit;
#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "dfsan_flags.inc"
#undef DFSAN_FLAG
void SetDefaults();
};
extern Flags flags_data;

View File

@ -314,11 +314,12 @@ static void unpoison(const void *ptr, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE void *
__dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label,
dfsan_label flag_label, dfsan_label *ret_label) {
link_map *map = (link_map *)dlopen(filename, flag);
void *handle = dlopen(filename, flag);
link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle);
if (map)
ForEachMappedRegion(map, unpoison);
*ret_label = 0;
return (void *)map;
return handle;
}
struct pthread_create_info {

View File

@ -0,0 +1,32 @@
//===-- dfsan_flags.inc -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// DFSan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef DFSAN_FLAG
# error "Define DFSAN_FLAG prior to including this file!"
#endif
// DFSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
DFSAN_FLAG(bool, warn_unimplemented, true,
"Whether to warn on unimplemented functions.")
DFSAN_FLAG(bool, warn_nonzero_labels, false,
"Whether to warn on unimplemented functions.")
DFSAN_FLAG(
bool, strict_data_dependencies, true,
"Whether to propagate labels only when there is an obvious data dependency"
"(e.g., when comparing strings, ignore the fact that the output of the"
"comparison might be data-dependent on the content of the strings). This"
"applies only to the custom functions defined in 'custom.c'.")
DFSAN_FLAG(const char *, dump_labels_at_exit, "", "The path of the file where "
"to dump the labels when the "
"program terminates.")

View File

@ -15,6 +15,7 @@
#include "lsan.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
@ -34,13 +35,42 @@ bool WordIsPoisoned(uptr addr) {
using namespace __lsan; // NOLINT
static void InitializeFlags() {
// Set all the default values.
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf.malloc_context_size = 30;
cf.detect_leaks = true;
OverrideCommonFlags(cf);
}
Flags *f = flags();
f->SetDefaults();
FlagParser parser;
RegisterLsanFlags(&parser, f);
RegisterCommonFlags(&parser);
parser.ParseString(GetEnv("LSAN_OPTIONS"));
SetVerbosity(common_flags()->verbosity);
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
}
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
return;
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
InitCommonLsan(true);
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
InitTlsSize();
InitializeInterceptors();
@ -52,6 +82,9 @@ extern "C" void __lsan_init() {
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
lsan_inited = true;
lsan_init_is_running = false;
}

View File

@ -25,10 +25,6 @@ extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
struct ChunkMetadata {
bool allocated : 8; // Must be first.
ChunkTag tag : 2;
@ -36,8 +32,22 @@ struct ChunkMetadata {
u32 stack_trace_id;
};
#if defined(__mips64)
static const uptr kMaxAllowedMallocSize = 4UL << 30;
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
typedef CompactSizeClassMap SizeClassMap;
typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
@ -47,7 +57,7 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
allocator.Init();
allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
}
void AllocatorThreadFinish() {

View File

@ -16,6 +16,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@ -36,52 +37,17 @@ bool DisabledInThisThread() { return disable_counter > 0; }
Flags lsan_flags;
static void InitializeFlags(bool standalone) {
Flags *f = flags();
// Default values.
f->report_objects = false;
f->resolution = 0;
f->max_leaks = 0;
f->exitcode = 23;
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
f->use_tls = true;
f->use_root_regions = true;
f->use_unaligned = false;
f->use_poisoned = false;
f->log_pointers = false;
f->log_threads = false;
void Flags::SetDefaults() {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "lsan_flags.inc"
#undef LSAN_FLAG
}
const char *options = GetEnv("LSAN_OPTIONS");
if (options) {
ParseFlag(options, &f->use_registers, "use_registers", "");
ParseFlag(options, &f->use_globals, "use_globals", "");
ParseFlag(options, &f->use_stacks, "use_stacks", "");
ParseFlag(options, &f->use_tls, "use_tls", "");
ParseFlag(options, &f->use_root_regions, "use_root_regions", "");
ParseFlag(options, &f->use_unaligned, "use_unaligned", "");
ParseFlag(options, &f->use_poisoned, "use_poisoned", "");
ParseFlag(options, &f->report_objects, "report_objects", "");
ParseFlag(options, &f->resolution, "resolution", "");
CHECK_GE(&f->resolution, 0);
ParseFlag(options, &f->max_leaks, "max_leaks", "");
CHECK_GE(&f->max_leaks, 0);
ParseFlag(options, &f->log_pointers, "log_pointers", "");
ParseFlag(options, &f->log_threads, "log_threads", "");
ParseFlag(options, &f->exitcode, "exitcode", "");
}
// Set defaults for common flags (only in standalone mode) and parse
// them from LSAN_OPTIONS.
CommonFlags *cf = common_flags();
if (standalone) {
SetCommonFlagsDefaults(cf);
cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf->malloc_context_size = 30;
cf->detect_leaks = true;
}
ParseCommonFlagsFromString(cf, options);
void RegisterLsanFlags(FlagParser *parser, Flags *f) {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "lsan_flags.inc"
#undef LSAN_FLAG
}
#define LOG_POINTERS(...) \
@ -94,14 +60,23 @@ static void InitializeFlags(bool standalone) {
if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0);
static bool suppressions_inited = false;
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
void InitializeSuppressions() {
CHECK(!suppressions_inited);
SuppressionContext::InitIfNecessary();
CHECK_EQ(nullptr, suppression_ctx);
suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
SuppressionContext::Get()->Parse(__lsan_default_suppressions());
suppressions_inited = true;
suppression_ctx->Parse(__lsan_default_suppressions());
}
static SuppressionContext *GetSuppressionContext() {
CHECK(suppression_ctx);
return suppression_ctx;
}
struct RootRegion {
@ -117,8 +92,7 @@ void InitializeRootRegions() {
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
void InitCommonLsan(bool standalone) {
InitializeFlags(standalone);
void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
@ -141,9 +115,11 @@ static inline bool CanBeAHeapPointer(uptr p) {
// bound on heap addresses.
const uptr kMinAddress = 4 * 4096;
if (p < kMinAddress) return false;
#ifdef __x86_64__
#if defined(__x86_64__)
// Accept only canonical form user-space addresses.
return ((p >> 47) == 0);
#elif defined(__mips64)
return ((p >> 40) == 0);
#else
return true;
#endif
@ -367,7 +343,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
LsanMetadata m(chunk);
if (!m.allocated()) return;
if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
uptr resolution = flags()->resolution;
u32 resolution = flags()->resolution;
u32 stack_trace_id = 0;
if (resolution > 0) {
StackTrace stack = StackDepotGet(m.stack_trace_id());
@ -383,7 +359,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1);
SuppressionContext::Get()->GetMatched(&matched);
GetSuppressionContext()->GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
@ -462,17 +438,17 @@ static Suppression *GetSuppressionForAddr(uptr addr) {
// Suppress by module name.
const char *module_name;
uptr module_offset;
SuppressionContext *suppressions = GetSuppressionContext();
if (Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(addr, &module_name,
&module_offset) &&
SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
suppressions->Match(module_name, kSuppressionLeak, &s))
return s;
// Suppress by file or function name.
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
if (SuppressionContext::Get()->Match(cur->info.function, SuppressionLeak,
&s) ||
SuppressionContext::Get()->Match(cur->info.file, SuppressionLeak, &s)) {
if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
break;
}
}

View File

@ -21,12 +21,17 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64)
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips64)) \
&& (SANITIZER_WORDSIZE == 64)
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
#endif
namespace __sanitizer {
class FlagParser;
}
namespace __lsan {
// Chunk tags.
@ -38,44 +43,19 @@ enum ChunkTag {
};
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
#undef LSAN_FLAG
void SetDefaults();
uptr pointer_alignment() const {
return use_unaligned ? 1 : sizeof(uptr);
}
// Print addresses of leaked objects after main leak report.
bool report_objects;
// Aggregate two objects into one leak if this many stack frames match. If
// zero, the entire stack trace must match.
int resolution;
// The number of leaks reported.
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
// Flags controlling the root set of reachable memory.
// Global variables (.data and .bss).
bool use_globals;
// Thread stacks.
bool use_stacks;
// Thread registers.
bool use_registers;
// TLS and thread-specific storage.
bool use_tls;
// Regions added via __lsan_register_root_region().
bool use_root_regions;
// Consider unaligned pointers valid.
bool use_unaligned;
// Consider pointers found in poisoned memory to be valid.
bool use_poisoned;
// Debug logging.
bool log_pointers;
bool log_threads;
};
extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
void RegisterLsanFlags(FlagParser *parser, Flags *f);
struct Leak {
u32 id;
@ -131,7 +111,7 @@ enum IgnoreObjectResult {
};
// Functions called from the parent tool.
void InitCommonLsan(bool standalone);
void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();

View File

@ -0,0 +1,45 @@
//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// LSan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef LSAN_FLAG
# error "Define LSAN_FLAG prior to including this file!"
#endif
// LSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
LSAN_FLAG(bool, report_objects, false,
"Print addresses of leaked objects after main leak report.")
LSAN_FLAG(
int, resolution, 0,
"Aggregate two objects into one leak if this many stack frames match. If "
"zero, the entire stack trace must match.")
LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
LSAN_FLAG(int, exitcode, 23,
"If nonzero kill the process with this exit code upon finding leaks.")
// Flags controlling the root set of reachable memory.
LSAN_FLAG(bool, use_globals, true,
"Root set: include global variables (.data and .bss)")
LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
LSAN_FLAG(bool, use_tls, true,
"Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().")
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
LSAN_FLAG(bool, use_poisoned, false,
"Consider pointers found in poisoned memory to be valid.")
LSAN_FLAG(bool, log_pointers, false, "Debug logging")
LSAN_FLAG(bool, log_threads, false, "Debug logging")
LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")

View File

@ -16,16 +16,17 @@
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
#include "msan_thread.h"
#include "msan_poisoning.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
// ACHTUNG! No system header includes in this file.
using namespace __sanitizer;
@ -96,19 +97,81 @@ static const char *StackOriginDescr[kNumStackOriginDescrs];
static uptr StackOriginPC[kNumStackOriginDescrs];
static atomic_uint32_t NumStackOriginDescrs;
static void ParseFlagsFromString(Flags *f, const char *str) {
CommonFlags *cf = common_flags();
ParseCommonFlagsFromString(cf, str);
ParseFlag(str, &f->poison_heap_with_zeroes, "poison_heap_with_zeroes", "");
ParseFlag(str, &f->poison_stack_with_zeroes, "poison_stack_with_zeroes", "");
ParseFlag(str, &f->poison_in_malloc, "poison_in_malloc", "");
ParseFlag(str, &f->poison_in_free, "poison_in_free", "");
ParseFlag(str, &f->exit_code, "exit_code", "");
void Flags::SetDefaults() {
#define MSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "msan_flags.inc"
#undef MSAN_FLAG
}
// keep_going is an old name for halt_on_error,
// and it has inverse meaning.
class FlagHandlerKeepGoing : public FlagHandlerBase {
bool *halt_on_error_;
public:
explicit FlagHandlerKeepGoing(bool *halt_on_error)
: halt_on_error_(halt_on_error) {}
bool Parse(const char *value) final {
bool tmp;
FlagHandler<bool> h(&tmp);
if (!h.Parse(value)) return false;
*halt_on_error_ = !tmp;
return true;
}
};
static void RegisterMsanFlags(FlagParser *parser, Flags *f) {
#define MSAN_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "msan_flags.inc"
#undef MSAN_FLAG
FlagHandlerKeepGoing *fh_keep_going = new (FlagParser::Alloc) // NOLINT
FlagHandlerKeepGoing(&f->halt_on_error);
parser->RegisterHandler("keep_going", fh_keep_going,
"deprecated, use halt_on_error");
}
static void InitializeFlags() {
Flags *f = flags();
FlagParser parser;
RegisterMsanFlags(&parser, f);
RegisterCommonFlags(&parser);
SetCommonFlagsDefaults();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH");
cf.malloc_context_size = 20;
cf.handle_ioctl = true;
// FIXME: test and enable.
cf.check_printf = false;
cf.intercept_tls_get_addr = true;
OverrideCommonFlags(cf);
}
f->SetDefaults();
// Override from user-specified string.
if (__msan_default_options)
parser.ParseString(__msan_default_options());
const char *msan_options = GetEnv("MSAN_OPTIONS");
parser.ParseString(msan_options);
VPrintf(1, "MSAN_OPTIONS: %s\n", msan_options ? msan_options : "<empty>");
SetVerbosity(common_flags()->verbosity);
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
// Check flag values:
if (f->exit_code < 0 || f->exit_code > 127) {
Printf("Exit code not in [0, 128) range: %d\n", f->exit_code);
Die();
}
ParseFlag(str, &f->origin_history_size, "origin_history_size", "");
if (f->origin_history_size < 0 ||
f->origin_history_size > Origin::kMaxDepth) {
Printf(
@ -117,8 +180,6 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
f->origin_history_size, Origin::kMaxDepth);
Die();
}
ParseFlag(str, &f->origin_history_per_stack_limit,
"origin_history_per_stack_limit", "");
// Limiting to kStackDepotMaxUseCount / 2 to avoid overflow in
// StackDepotHandle::inc_use_count_unsafe.
if (f->origin_history_per_stack_limit < 0 ||
@ -129,51 +190,7 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
f->origin_history_per_stack_limit, kStackDepotMaxUseCount / 2);
Die();
}
ParseFlag(str, &f->report_umrs, "report_umrs", "");
ParseFlag(str, &f->wrap_signals, "wrap_signals", "");
ParseFlag(str, &f->print_stats, "print_stats", "");
ParseFlag(str, &f->atexit, "atexit", "");
ParseFlag(str, &f->store_context_size, "store_context_size", "");
if (f->store_context_size < 1) f->store_context_size = 1;
// keep_going is an old name for halt_on_error,
// and it has inverse meaning.
f->halt_on_error = !f->halt_on_error;
ParseFlag(str, &f->halt_on_error, "keep_going", "");
f->halt_on_error = !f->halt_on_error;
ParseFlag(str, &f->halt_on_error, "halt_on_error", "");
}
static void InitializeFlags(Flags *f, const char *options) {
CommonFlags *cf = common_flags();
SetCommonFlagsDefaults(cf);
cf->external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH");
cf->malloc_context_size = 20;
cf->handle_ioctl = true;
// FIXME: test and enable.
cf->check_printf = false;
cf->intercept_tls_get_addr = true;
internal_memset(f, 0, sizeof(*f));
f->poison_heap_with_zeroes = false;
f->poison_stack_with_zeroes = false;
f->poison_in_malloc = true;
f->poison_in_free = true;
f->exit_code = 77;
f->origin_history_size = Origin::kMaxDepth;
f->origin_history_per_stack_limit = 20000;
f->report_umrs = true;
f->wrap_signals = true;
f->print_stats = false;
f->atexit = false;
f->halt_on_error = !&__msan_keep_going;
f->store_context_size = 20;
// Override from user-specified string.
if (__msan_default_options)
ParseFlagsFromString(f, __msan_default_options());
ParseFlagsFromString(f, options);
}
void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp,
@ -259,6 +276,7 @@ u32 ChainOrigin(u32 id, StackTrace *stack) {
return id;
Origin o = Origin::FromRawId(id);
stack->tag = StackTrace::TAG_UNKNOWN;
Origin chained = Origin::CreateChainedOrigin(o, stack);
return chained.raw_id();
}
@ -336,9 +354,7 @@ void __msan_init() {
SetDieCallback(MsanDie);
InitTlsSize();
const char *msan_options = GetEnv("MSAN_OPTIONS");
InitializeFlags(&msan_flags, msan_options);
if (common_flags()->help) PrintFlagDescriptions();
InitializeFlags();
__sanitizer_set_report_path(common_flags()->log_path);
InitializeInterceptors();
@ -355,8 +371,6 @@ void __msan_init() {
ReExec();
}
VPrintf(1, "MSAN_OPTIONS: %s\n", msan_options ? msan_options : "<empty>");
__msan_clear_on_return();
if (__msan_get_track_origins())
VPrintf(1, "msan_track_origins\n");
@ -372,10 +386,7 @@ void __msan_init() {
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
if (common_flags()->coverage) {
__sanitizer_cov_init();
Atexit(__sanitizer_cov_dump);
}
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
MsanTSDInit(MsanTSDDtor);
@ -485,24 +496,7 @@ void __msan_load_unpoisoned(void *src, uptr size, void *dst) {
}
void __msan_set_origin(const void *a, uptr size, u32 origin) {
// Origin mapping is 4 bytes per 4 bytes of application memory.
// Here we extend the range such that its left and right bounds are both
// 4 byte aligned.
if (!__msan_get_track_origins()) return;
uptr x = MEM_TO_ORIGIN((uptr)a);
uptr beg = x & ~3UL; // align down.
uptr end = (x + size + 3) & ~3UL; // align up.
u64 origin64 = ((u64)origin << 32) | origin;
// This is like memset, but the value is 32-bit. We unroll by 2 to write
// 64 bits at once. May want to unroll further to get 128-bit stores.
if (beg & 7ULL) {
*(u32*)beg = origin;
beg += 4;
}
for (uptr addr = beg; addr < (end & ~7UL); addr += 8)
*(u64*)addr = origin64;
if (end & 7ULL)
*(u32*)(end - 4) = origin;
if (__msan_get_track_origins()) SetOrigin(a, size, origin);
}
// 'descr' is created at compile time and contains '----' in the beginning.

View File

@ -25,90 +25,90 @@
# define MSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1
#endif
/*
C/C++ on FreeBSD
0000 0000 0000 - 00ff ffff ffff: Low memory: main binary, MAP_32BIT mappings and modules
0100 0000 0000 - 0fff ffff ffff: Bad1
1000 0000 0000 - 30ff ffff ffff: Shadow
3100 0000 0000 - 37ff ffff ffff: Bad2
3800 0000 0000 - 58ff ffff ffff: Origins
5900 0000 0000 - 5fff ffff ffff: Bad3
6000 0000 0000 - 7fff ffff ffff: High memory: heap, modules and main thread stack
struct MappingDesc {
uptr start;
uptr end;
enum Type {
INVALID, APP, SHADOW, ORIGIN
} type;
const char *name;
};
C/C++ on Linux/PIE
0000 0000 0000 - 1fff ffff ffff: Bad1
2000 0000 0000 - 3fff ffff ffff: Shadow
4000 0000 0000 - 5fff ffff ffff: Origins
6000 0000 0000 - 7fff ffff ffff: Main memory
C/C++ on Mips
0000 0000 0000 - 009f ffff ffff: Bad1
00a0 0000 0000 - 00bf ffff ffff: Shadow
00c0 0000 0000 - 00df ffff ffff: Origins
00e0 0000 0000 - 00ff ffff ffff: Main memory
*/
#if SANITIZER_LINUX && defined(__mips64)
const uptr kLowMemBeg = 0;
const uptr kLowMemSize = 0;
const uptr kHighMemBeg = 0x00e000000000;
const uptr kHighMemSize = 0x002000000000;
const uptr kShadowBeg = 0x00a000000000;
const uptr kShadowSize = 0x002000000000;
const uptr kOriginsBeg = 0x00c000000000;
# define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL)
// Everything is above 0x00e000000000.
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x00a000000000ULL, MappingDesc::INVALID, "invalid"},
{0x00a000000000ULL, 0x00c000000000ULL, MappingDesc::SHADOW, "shadow"},
{0x00c000000000ULL, 0x00e000000000ULL, MappingDesc::ORIGIN, "origin"},
{0x00e000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app"}};
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x002000000000)
#elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64
const uptr kLowMemBeg = 0x000000000000;
const uptr kLowMemSize = 0x010000000000;
const uptr kHighMemBeg = 0x600000000000;
const uptr kHighMemSize = 0x200000000000;
const uptr kShadowBeg = 0x100000000000;
const uptr kShadowSize = 0x210000000000;
const uptr kOriginsBeg = 0x380000000000;
// Low memory: main binary, MAP_32BIT mappings and modules
// High memory: heap, modules and main thread stack
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "low memory"},
{0x010000000000ULL, 0x100000000000ULL, MappingDesc::INVALID, "invalid"},
{0x100000000000ULL, 0x310000000000ULL, MappingDesc::SHADOW, "shadow"},
{0x310000000000ULL, 0x380000000000ULL, MappingDesc::INVALID, "invalid"},
{0x380000000000ULL, 0x590000000000ULL, MappingDesc::ORIGIN, "origin"},
{0x590000000000ULL, 0x600000000000ULL, MappingDesc::INVALID, "invalid"},
{0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
// Maps low and high app ranges to contiguous space with zero base:
// Low: 0000 0000 0000 - 00ff ffff ffff -> 2000 0000 0000 - 20ff ffff ffff
// High: 6000 0000 0000 - 7fff ffff ffff -> 0000 0000 0000 - 1fff ffff ffff
# define LINEARIZE_MEM(mem) \
(((uptr)(mem) & ~0xc00000000000ULL) ^ 0x200000000000ULL)
# define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x100000000000ULL)
#define LINEARIZE_MEM(mem) \
(((uptr)(mem) & ~0xc00000000000ULL) ^ 0x200000000000ULL)
#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x100000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x280000000000)
#elif SANITIZER_LINUX && SANITIZER_WORDSIZE == 64
const uptr kLowMemBeg = 0;
const uptr kLowMemSize = 0;
const uptr kHighMemBeg = 0x600000000000;
const uptr kHighMemSize = 0x200000000000;
const uptr kShadowBeg = 0x200000000000;
const uptr kShadowSize = 0x200000000000;
const uptr kOriginsBeg = 0x400000000000;
# define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL)
// Requries PIE binary and ASLR enabled.
// Main thread stack and DSOs at 0x7f0000000000 (sometimes 0x7e0000000000).
// Heap at 0x600000000000.
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"},
{0x200000000000ULL, 0x400000000000ULL, MappingDesc::SHADOW, "shadow"},
{0x400000000000ULL, 0x600000000000ULL, MappingDesc::ORIGIN, "origin"},
{0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app"}};
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL)
#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x200000000000ULL)
#else
#error "Unsupported platform"
#endif
const uptr kBad1Beg = kLowMemBeg + kLowMemSize;
const uptr kBad1Size = kShadowBeg - kBad1Beg;
const uptr kBad2Beg = kShadowBeg + kShadowSize;
const uptr kBad2Size = kOriginsBeg - kBad2Beg;
const uptr kOriginsSize = kShadowSize;
const uptr kBad3Beg = kOriginsBeg + kOriginsSize;
const uptr kBad3Size = kHighMemBeg - kBad3Beg;
#define SHADOW_TO_ORIGIN(shadow) \
(((uptr)(shadow)) + (kOriginsBeg - kShadowBeg))
const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]);
#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW((mem))))
#define MEM_IS_APP(mem) \
((kLowMemSize > 0 && (uptr)(mem) < kLowMemSize) || \
(uptr)(mem) >= kHighMemBeg)
#ifndef __clang__
__attribute__((optimize("unroll-loops")))
#endif
inline bool addr_is_type(uptr addr, MappingDesc::Type mapping_type) {
// It is critical for performance that this loop is unrolled (because then it is
// simplified into just a few constant comparisons).
#ifdef __clang__
#pragma unroll
#endif
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
if (kMemoryLayout[i].type == mapping_type &&
addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end)
return true;
return false;
}
#define MEM_IS_SHADOW(mem) \
((uptr)(mem) >= kShadowBeg && (uptr)(mem) < kShadowBeg + kShadowSize)
#define MEM_IS_ORIGIN(mem) \
((uptr)(mem) >= kOriginsBeg && (uptr)(mem) < kOriginsBeg + kOriginsSize)
#define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP)
#define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW)
#define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN)
// These constants must be kept in sync with the ones in MemorySanitizer.cc.
const int kMsanParamTlsSize = 800;
@ -125,6 +125,7 @@ char *GetProcSelfMaps();
void InitializeInterceptors();
void MsanAllocatorThreadFinish();
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size);
void *MsanReallocate(StackTrace *stack, void *oldp, uptr size,
uptr alignment, bool zeroise);
void MsanDeallocate(StackTrace *stack, void *ptr);
@ -162,16 +163,12 @@ void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
void UnpoisonParam(uptr n);
void UnpoisonThreadLocalState();
u32 GetOriginIfPoisoned(uptr a, uptr size);
void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
void CopyOrigin(void *dst, const void *src, uptr size, StackTrace *stack);
void MovePoison(void *dst, const void *src, uptr size, StackTrace *stack);
void CopyPoison(void *dst, const void *src, uptr size, StackTrace *stack);
// Returns a "chained" origin id, pointing to the given stack trace followed by
// the previous origin id.
u32 ChainOrigin(u32 id, StackTrace *stack);
const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
#define GET_MALLOC_STACK_TRACE \
BufferedStackTrace stack; \
if (__msan_get_track_origins() && msan_inited) \

View File

@ -18,6 +18,7 @@
#include "msan_allocator.h"
#include "msan_origin.h"
#include "msan_thread.h"
#include "msan_poisoning.h"
namespace __msan {
@ -73,7 +74,7 @@ static inline void Init() {
if (inited) return;
__msan_init();
inited = true; // this must happen before any threads are created.
allocator.Init();
allocator.Init(common_flags()->allocator_may_return_null);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@ -92,7 +93,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
return AllocatorReturnNull();
return allocator.ReturnNullOrDie();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@ -112,6 +113,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
} else if (flags()->poison_in_malloc) {
__msan_poison(allocated, size);
if (__msan_get_track_origins()) {
stack->tag = StackTrace::TAG_ALLOC;
Origin o = Origin::CreateHeapOrigin(stack);
__msan_set_origin(allocated, size, o.raw_id());
}
@ -132,6 +134,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
if (flags()->poison_in_free) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
stack->tag = StackTrace::TAG_DEALLOC;
Origin o = Origin::CreateHeapOrigin(stack);
__msan_set_origin(p, size, o.raw_id());
}
@ -147,6 +150,13 @@ void MsanDeallocate(StackTrace *stack, void *p) {
}
}
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
Init();
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
return MsanReallocate(stack, 0, nmemb * size, sizeof(u64), true);
}
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
uptr alignment, bool zeroise) {
if (!old_p)
@ -161,15 +171,22 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
if (new_size <= actually_allocated_size) {
// We are not reallocating here.
meta->requested_size = new_size;
if (new_size > old_size)
__msan_poison((char*)old_p + old_size, new_size - old_size);
if (new_size > old_size) {
if (zeroise) {
__msan_clear_and_unpoison((char *)old_p + old_size,
new_size - old_size);
} else if (flags()->poison_in_malloc) {
stack->tag = StackTrace::TAG_ALLOC;
PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
}
}
return old_p;
}
uptr memcpy_size = Min(new_size, old_size);
void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
// Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
if (new_p) {
__msan_memcpy(new_p, old_p, memcpy_size);
CopyMemory(new_p, old_p, memcpy_size, stack);
MsanDeallocate(stack, old_p);
}
return new_p;

View File

@ -9,28 +9,18 @@
//
// This file is a part of MemorySanitizer.
//
// MemorySanitizer allocator.
//===----------------------------------------------------------------------===//
#ifndef MSAN_FLAGS_H
#define MSAN_FLAGS_H
namespace __msan {
// Flags.
struct Flags {
int exit_code;
int origin_history_size;
int origin_history_per_stack_limit;
bool poison_heap_with_zeroes; // default: false
bool poison_stack_with_zeroes; // default: false
bool poison_in_malloc; // default: true
bool poison_in_free; // default: true
bool report_umrs;
bool wrap_signals;
bool print_stats;
bool halt_on_error;
bool atexit;
int store_context_size; // like malloc_context_size, but for uninit stores
#define MSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "msan_flags.inc"
#undef MSAN_FLAG
void SetDefaults();
};
Flags *flags();

View File

@ -0,0 +1,33 @@
//===-- msan_flags.inc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// MSan runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef MSAN_FLAG
# error "Define MSAN_FLAG prior to including this file!"
#endif
// MSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
MSAN_FLAG(int, exit_code, 77, "")
MSAN_FLAG(int, origin_history_size, Origin::kMaxDepth, "")
MSAN_FLAG(int, origin_history_per_stack_limit, 20000, "")
MSAN_FLAG(bool, poison_heap_with_zeroes, false, "")
MSAN_FLAG(bool, poison_stack_with_zeroes, false, "")
MSAN_FLAG(bool, poison_in_malloc, true, "")
MSAN_FLAG(bool, poison_in_free, true, "")
MSAN_FLAG(bool, report_umrs, true, "")
MSAN_FLAG(bool, wrap_signals, true, "")
MSAN_FLAG(bool, print_stats, false, "")
MSAN_FLAG(bool, halt_on_error, !&__msan_keep_going, "")
MSAN_FLAG(bool, atexit, false, "")
MSAN_FLAG(int, store_context_size, 20,
"Like malloc_context_size, but for uninit stores.")

View File

@ -20,6 +20,7 @@
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
#include "msan_thread.h"
#include "msan_poisoning.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
@ -290,7 +291,7 @@ INTERCEPTOR(char *, strcpy, char *dest, const char *src) { // NOLINT
GET_STORE_STACK_TRACE;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(strcpy)(dest, src); // NOLINT
CopyPoison(dest, src, n + 1, &stack);
CopyShadowAndOrigin(dest, src, n + 1, &stack);
return res;
}
@ -301,7 +302,7 @@ INTERCEPTOR(char *, strncpy, char *dest, const char *src, SIZE_T n) { // NOLINT
if (copy_size < n)
copy_size++; // trailing \0
char *res = REAL(strncpy)(dest, src, n); // NOLINT
CopyPoison(dest, src, copy_size, &stack);
CopyShadowAndOrigin(dest, src, copy_size, &stack);
__msan_unpoison(dest + copy_size, n - copy_size);
return res;
}
@ -311,16 +312,18 @@ INTERCEPTOR(char *, stpcpy, char *dest, const char *src) { // NOLINT
GET_STORE_STACK_TRACE;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(stpcpy)(dest, src); // NOLINT
CopyPoison(dest, src, n + 1, &stack);
CopyShadowAndOrigin(dest, src, n + 1, &stack);
return res;
}
INTERCEPTOR(char *, strdup, char *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
// On FreeBSD strdup() leverages strlen().
InterceptorScope interceptor_scope;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(strdup)(src);
CopyPoison(res, src, n + 1, &stack);
CopyShadowAndOrigin(res, src, n + 1, &stack);
return res;
}
@ -330,7 +333,7 @@ INTERCEPTOR(char *, __strdup, char *src) {
GET_STORE_STACK_TRACE;
SIZE_T n = REAL(strlen)(src);
char *res = REAL(__strdup)(src);
CopyPoison(res, src, n + 1, &stack);
CopyShadowAndOrigin(res, src, n + 1, &stack);
return res;
}
#define MSAN_MAYBE_INTERCEPT___STRDUP INTERCEPT_FUNCTION(__strdup)
@ -341,9 +344,11 @@ INTERCEPTOR(char *, __strdup, char *src) {
INTERCEPTOR(char *, strndup, char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
// On FreeBSD strndup() leverages strnlen().
InterceptorScope interceptor_scope;
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(strndup)(src, n);
CopyPoison(res, src, copy_size, &stack);
CopyShadowAndOrigin(res, src, copy_size, &stack);
__msan_unpoison(res + copy_size, 1); // \0
return res;
}
@ -354,7 +359,7 @@ INTERCEPTOR(char *, __strndup, char *src, SIZE_T n) {
GET_STORE_STACK_TRACE;
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(__strndup)(src, n);
CopyPoison(res, src, copy_size, &stack);
CopyShadowAndOrigin(res, src, copy_size, &stack);
__msan_unpoison(res + copy_size, 1); // \0
return res;
}
@ -377,7 +382,7 @@ INTERCEPTOR(char *, strcat, char *dest, const char *src) { // NOLINT
SIZE_T src_size = REAL(strlen)(src);
SIZE_T dest_size = REAL(strlen)(dest);
char *res = REAL(strcat)(dest, src); // NOLINT
CopyPoison(dest + dest_size, src, src_size + 1, &stack);
CopyShadowAndOrigin(dest + dest_size, src, src_size + 1, &stack);
return res;
}
@ -387,7 +392,7 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) { // NOLINT
SIZE_T dest_size = REAL(strlen)(dest);
SIZE_T copy_size = REAL(strnlen)(src, n);
char *res = REAL(strncat)(dest, src, n); // NOLINT
CopyPoison(dest + dest_size, src, copy_size, &stack);
CopyShadowAndOrigin(dest + dest_size, src, copy_size, &stack);
__msan_unpoison(dest + dest_size + copy_size, 1); // \0
return res;
}
@ -576,7 +581,8 @@ INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wcscpy)(dest, src);
CopyPoison(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1), &stack);
CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1),
&stack);
return res;
}
@ -585,7 +591,7 @@ INTERCEPTOR(wchar_t *, wmemcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wmemcpy)(dest, src, n);
CopyPoison(dest, src, n * sizeof(wchar_t), &stack);
CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
return res;
}
@ -593,7 +599,7 @@ INTERCEPTOR(wchar_t *, wmempcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wmempcpy)(dest, src, n);
CopyPoison(dest, src, n * sizeof(wchar_t), &stack);
CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
return res;
}
@ -609,7 +615,7 @@ INTERCEPTOR(wchar_t *, wmemmove, wchar_t *dest, const wchar_t *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wmemmove)(dest, src, n);
MovePoison(dest, src, n * sizeof(wchar_t), &stack);
MoveShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
return res;
}
@ -699,7 +705,15 @@ INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) {
#define MSAN_MAYBE_INTERCEPT___FXSTAT64
#endif
#if !SANITIZER_FREEBSD
#if SANITIZER_FREEBSD
INTERCEPTOR(int, fstatat, int fd, char *pathname, void *buf, int flags) {
ENSURE_MSAN_INITED();
int res = REAL(fstatat)(fd, pathname, buf, flags);
if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(fstatat)
#else
INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf,
int flags) {
ENSURE_MSAN_INITED();
@ -707,9 +721,7 @@ INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf,
if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
#define MSAN_MAYBE_INTERCEPT___FXSTATAT INTERCEPT_FUNCTION(__fxstatat)
#else
#define MSAN_MAYBE_INTERCEPT___FXSTATAT
# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(__fxstatat)
#endif
#if !SANITIZER_FREEBSD
@ -725,7 +737,16 @@ INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf,
#define MSAN_MAYBE_INTERCEPT___FXSTATAT64
#endif
#if !SANITIZER_FREEBSD
#if SANITIZER_FREEBSD
INTERCEPTOR(int, stat, char *path, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(stat)(path, buf);
if (!res)
__msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
# define MSAN_INTERCEPT_STAT INTERCEPT_FUNCTION(stat)
#else
INTERCEPTOR(int, __xstat, int magic, char *path, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__xstat)(magic, path, buf);
@ -733,9 +754,7 @@ INTERCEPTOR(int, __xstat, int magic, char *path, void *buf) {
__msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
#define MSAN_MAYBE_INTERCEPT___XSTAT INTERCEPT_FUNCTION(__xstat)
#else
#define MSAN_MAYBE_INTERCEPT___XSTAT
# define MSAN_INTERCEPT_STAT INTERCEPT_FUNCTION(__xstat)
#endif
#if !SANITIZER_FREEBSD
@ -849,14 +868,29 @@ INTERCEPTOR(int, getrlimit64, int resource, void *rlim) {
#define MSAN_MAYBE_INTERCEPT_GETRLIMIT64
#endif
INTERCEPTOR(int, uname, void *utsname) {
#if SANITIZER_FREEBSD
// FreeBSD's <sys/utsname.h> define uname() as
// static __inline int uname(struct utsname *name) {
// return __xuname(SYS_NMLN, (void*)name);
// }
INTERCEPTOR(int, __xuname, int size, void *utsname) {
ENSURE_MSAN_INITED();
int res = REAL(uname)(utsname);
if (!res) {
int res = REAL(__xuname)(size, utsname);
if (!res)
__msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
}
return res;
}
#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(__xuname)
#else
INTERCEPTOR(int, uname, struct utsname *utsname) {
ENSURE_MSAN_INITED();
int res = REAL(uname)(utsname);
if (!res)
__msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
return res;
}
#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(uname)
#endif
INTERCEPTOR(int, gethostname, char *name, SIZE_T len) {
ENSURE_MSAN_INITED();
@ -918,17 +952,15 @@ INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags,
__msan_unpoison(buf, res);
if (srcaddr) {
SIZE_T sz = *addrlen;
__msan_unpoison(srcaddr, (sz < srcaddr_sz) ? sz : srcaddr_sz);
__msan_unpoison(srcaddr, Min(sz, srcaddr_sz));
}
}
return res;
}
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return AllocatorReturnNull();
GET_MALLOC_STACK_TRACE;
if (!msan_inited) {
if (UNLIKELY(!msan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const SIZE_T kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@ -939,7 +971,7 @@ INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
CHECK(allocated < kCallocPoolSize);
return mem;
}
return MsanReallocate(&stack, 0, nmemb * size, sizeof(u64), true);
return MsanCalloc(&stack, nmemb, size);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
@ -952,13 +984,11 @@ INTERCEPTOR(void *, malloc, SIZE_T size) {
return MsanReallocate(&stack, 0, size, sizeof(u64), false);
}
void __msan_allocated_memory(const void* data, uptr size) {
void __msan_allocated_memory(const void *data, uptr size) {
GET_MALLOC_STACK_TRACE;
if (flags()->poison_in_malloc)
__msan_poison(data, size);
if (__msan_get_track_origins()) {
Origin o = Origin::CreateHeapOrigin(&stack);
__msan_set_origin(data, size, o.raw_id());
if (flags()->poison_in_malloc) {
stack.tag = STACK_TRACE_TAG_POISON;
PoisonMemory(data, size, &stack);
}
}
@ -1328,6 +1358,9 @@ int OnExit() {
InterceptorScope interceptor_scope; \
__msan_unpoison(__errno_location(), sizeof(int)); /* NOLINT */ \
ENSURE_MSAN_INITED();
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
do { \
} while (false)
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
do { \
} while (false)
@ -1345,8 +1378,11 @@ int OnExit() {
} while (false) // FIXME
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) \
if (map) ForEachMappedRegion((link_map *)map, __msan_unpoison);
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
do { \
link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE((handle)); \
if (map) ForEachMappedRegion(map, __msan_unpoison); \
} while (false)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
@ -1360,53 +1396,26 @@ int OnExit() {
#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
static void PoisonShadow(uptr ptr, uptr size, u8 value) {
uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(ptr);
uptr shadow_end = MEM_TO_SHADOW(ptr + size);
if (value ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
uptr page_beg = RoundUpTo(shadow_beg, PageSize);
uptr page_end = RoundDownTo(shadow_end, PageSize);
if (page_beg >= page_end) {
REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
} else {
if (page_beg != shadow_beg) {
REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
}
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
MmapFixedNoReserve(page_beg, page_end - page_beg);
}
}
}
// These interface functions reside here so that they can use
// REAL(memset), etc.
void __msan_unpoison(const void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
PoisonShadow((uptr)a, size, 0);
SetShadow(a, size, 0);
}
void __msan_poison(const void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
PoisonShadow((uptr)a, size,
__msan::flags()->poison_heap_with_zeroes ? 0 : -1);
SetShadow(a, size, __msan::flags()->poison_heap_with_zeroes ? 0 : -1);
}
void __msan_poison_stack(void *a, uptr size) {
if (!MEM_IS_APP(a)) return;
PoisonShadow((uptr)a, size,
__msan::flags()->poison_stack_with_zeroes ? 0 : -1);
SetShadow(a, size, __msan::flags()->poison_stack_with_zeroes ? 0 : -1);
}
void __msan_clear_and_unpoison(void *a, uptr size) {
REAL(memset)(a, 0, size);
PoisonShadow((uptr)a, size, 0);
SetShadow(a, size, 0);
}
void *__msan_memcpy(void *dest, const void *src, SIZE_T n) {
@ -1415,7 +1424,7 @@ void *__msan_memcpy(void *dest, const void *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
void *res = REAL(memcpy)(dest, src, n);
CopyPoison(dest, src, n, &stack);
CopyShadowAndOrigin(dest, src, n, &stack);
return res;
}
@ -1434,7 +1443,7 @@ void *__msan_memmove(void *dest, const void *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
void *res = REAL(memmove)(dest, src, n);
MovePoison(dest, src, n, &stack);
MoveShadowAndOrigin(dest, src, n, &stack);
return res;
}
@ -1445,96 +1454,6 @@ void __msan_unpoison_string(const char* s) {
namespace __msan {
u32 GetOriginIfPoisoned(uptr addr, uptr size) {
unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
for (uptr i = 0; i < size; ++i)
if (s[i])
return *(u32 *)SHADOW_TO_ORIGIN(((uptr)s + i) & ~3UL);
return 0;
}
void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
u32 src_origin) {
uptr dst_s = MEM_TO_SHADOW(addr);
uptr src_s = src_shadow;
uptr src_s_end = src_s + size;
for (; src_s < src_s_end; ++dst_s, ++src_s)
if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s &~3UL) = src_origin;
}
void CopyOrigin(void *dst, const void *src, uptr size, StackTrace *stack) {
if (!__msan_get_track_origins()) return;
if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
uptr d = (uptr)dst;
uptr beg = d & ~3UL;
// Copy left unaligned origin if that memory is poisoned.
if (beg < d) {
u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
if (o) {
if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
*(u32 *)MEM_TO_ORIGIN(beg) = o;
}
beg += 4;
}
uptr end = (d + size) & ~3UL;
// If both ends fall into the same 4-byte slot, we are done.
if (end < beg) return;
// Copy right unaligned origin if that memory is poisoned.
if (end < d + size) {
u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
if (o) {
if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
*(u32 *)MEM_TO_ORIGIN(end) = o;
}
}
if (beg < end) {
// Align src up.
uptr s = ((uptr)src + 3) & ~3UL;
// FIXME: factor out to msan_copy_origin_aligned
if (__msan_get_track_origins() > 1) {
u32 *src = (u32 *)MEM_TO_ORIGIN(s);
u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
u32 src_o = 0;
u32 dst_o = 0;
for (; src < src_end; ++src, ++src_s, ++dst) {
if (!*src_s) continue;
if (*src != src_o) {
src_o = *src;
dst_o = ChainOrigin(src_o, stack);
}
*dst = dst_o;
}
} else {
REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
end - beg);
}
}
}
void MovePoison(void *dst, const void *src, uptr size, StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
if (src == dst) return;
REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
CopyOrigin(dst, src, size, stack);
}
void CopyPoison(void *dst, const void *src, uptr size, StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
CopyOrigin(dst, src, size, stack);
}
void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
@ -1617,8 +1536,8 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(gettimeofday);
INTERCEPT_FUNCTION(fcvt);
MSAN_MAYBE_INTERCEPT___FXSTAT;
MSAN_MAYBE_INTERCEPT___FXSTATAT;
MSAN_MAYBE_INTERCEPT___XSTAT;
MSAN_INTERCEPT_FSTATAT;
MSAN_INTERCEPT_STAT;
MSAN_MAYBE_INTERCEPT___LXSTAT;
MSAN_MAYBE_INTERCEPT___FXSTAT64;
MSAN_MAYBE_INTERCEPT___FXSTATAT64;
@ -1631,7 +1550,7 @@ void InitializeInterceptors() {
MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED;
INTERCEPT_FUNCTION(getrlimit);
MSAN_MAYBE_INTERCEPT_GETRLIMIT64;
INTERCEPT_FUNCTION(uname);
MSAN_INTERCEPT_UNAME;
INTERCEPT_FUNCTION(gethostname);
MSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT;

View File

@ -64,41 +64,45 @@ static bool ProtectMemoryRange(uptr beg, uptr size) {
return true;
}
static void CheckMemoryLayoutSanity() {
uptr prev_end = 0;
for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
uptr start = kMemoryLayout[i].start;
uptr end = kMemoryLayout[i].end;
MappingDesc::Type type = kMemoryLayout[i].type;
CHECK_LT(start, end);
CHECK_EQ(prev_end, start);
CHECK(addr_is_type(start, type));
CHECK(addr_is_type((start + end) / 2, type));
CHECK(addr_is_type(end - 1, type));
if (type == MappingDesc::APP) {
uptr addr = start;
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
addr = (start + end) / 2;
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
addr = end - 1;
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
}
prev_end = end;
}
}
bool InitShadow(bool map_shadow, bool init_origins) {
// Let user know mapping parameters first.
VPrintf(1, "__msan_init %p\n", &__msan_init);
ReportMapRange("Low Memory ", kLowMemBeg, kLowMemSize);
ReportMapRange("Bad1 ", kBad1Beg, kBad1Size);
ReportMapRange("Shadow ", kShadowBeg, kShadowSize);
ReportMapRange("Bad2 ", kBad2Beg, kBad2Size);
ReportMapRange("Origins ", kOriginsBeg, kOriginsSize);
ReportMapRange("Bad3 ", kBad3Beg, kBad3Size);
ReportMapRange("High Memory", kHighMemBeg, kHighMemSize);
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
kMemoryLayout[i].end - 1);
// Check mapping sanity (the invariant).
CHECK_EQ(kLowMemBeg, 0);
CHECK_EQ(kBad1Beg, kLowMemBeg + kLowMemSize);
CHECK_EQ(kShadowBeg, kBad1Beg + kBad1Size);
CHECK_GT(kShadowSize, 0);
CHECK_GE(kShadowSize, kLowMemSize + kHighMemSize);
CHECK_EQ(kBad2Beg, kShadowBeg + kShadowSize);
CHECK_EQ(kOriginsBeg, kBad2Beg + kBad2Size);
CHECK_EQ(kOriginsSize, kShadowSize);
CHECK_EQ(kBad3Beg, kOriginsBeg + kOriginsSize);
CHECK_EQ(kHighMemBeg, kBad3Beg + kBad3Size);
CHECK_GT(kHighMemSize, 0);
CHECK_GE(kHighMemBeg + kHighMemSize, kHighMemBeg); // Tests for no overflow.
if (kLowMemSize > 0) {
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(kLowMemBeg)));
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(kLowMemBeg + kLowMemSize - 1)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(kLowMemBeg)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(kLowMemBeg + kLowMemSize - 1)));
}
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(kHighMemBeg)));
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(kHighMemBeg + kHighMemSize - 1)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(kHighMemBeg)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(kHighMemBeg + kHighMemSize - 1)));
CheckMemoryLayoutSanity();
if (!MEM_IS_APP(&__msan_init)) {
Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
@ -106,29 +110,23 @@ bool InitShadow(bool map_shadow, bool init_origins) {
return false;
}
if (!CheckMemoryRangeAvailability(kShadowBeg, kShadowSize) ||
(init_origins &&
!CheckMemoryRangeAvailability(kOriginsBeg, kOriginsSize)) ||
!CheckMemoryRangeAvailability(kBad1Beg, kBad1Size) ||
!CheckMemoryRangeAvailability(kBad2Beg, kBad2Size) ||
!CheckMemoryRangeAvailability(kBad3Beg, kBad3Size)) {
return false;
for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
uptr start = kMemoryLayout[i].start;
uptr end = kMemoryLayout[i].end;
uptr size= end - start;
MappingDesc::Type type = kMemoryLayout[i].type;
if ((map_shadow && type == MappingDesc::SHADOW) ||
(init_origins && type == MappingDesc::ORIGIN)) {
if (!CheckMemoryRangeAvailability(start, size)) return false;
if ((uptr)MmapFixedNoReserve(start, size) != start) return false;
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(start, size);
} else if (type == MappingDesc::INVALID) {
if (!CheckMemoryRangeAvailability(start, size)) return false;
if (!ProtectMemoryRange(start, size)) return false;
}
}
if (!ProtectMemoryRange(kBad1Beg, kBad1Size) ||
!ProtectMemoryRange(kBad2Beg, kBad2Size) ||
!ProtectMemoryRange(kBad3Beg, kBad3Size)) {
return false;
}
if (map_shadow) {
void *shadow = MmapFixedNoReserve(kShadowBeg, kShadowSize);
if (shadow != (void*)kShadowBeg) return false;
}
if (init_origins) {
void *origins = MmapFixedNoReserve(kOriginsBeg, kOriginsSize);
if (origins != (void*)kOriginsBeg) return false;
}
return true;
}
@ -137,7 +135,7 @@ void MsanDie() {
__sanitizer_cov_dump();
if (death_callback)
death_callback();
_exit(flags()->exit_code);
internal__exit(flags()->exit_code);
}
static void MsanAtExit(void) {
@ -157,20 +155,26 @@ void InstallAtExitHandler() {
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
void MsanTSDInit(void (*destructor)(void *tsd)) {
CHECK(!tsd_key_inited);
tsd_key_inited = true;
CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
}
void *MsanTSDGet() {
CHECK(tsd_key_inited);
return pthread_getspecific(tsd_key);
static THREADLOCAL MsanThread* msan_current_thread;
MsanThread *GetCurrentThread() {
return msan_current_thread;
}
void MsanTSDSet(void *tsd) {
void SetCurrentThread(MsanThread *t) {
// Make sure we do not reset the current MsanThread.
CHECK_EQ(0, msan_current_thread);
msan_current_thread = t;
// Make sure that MsanTSDDtor gets called at the end.
CHECK(tsd_key_inited);
pthread_setspecific(tsd_key, tsd);
pthread_setspecific(tsd_key, (void *)t);
}
void MsanTSDDtor(void *tsd) {
@ -180,6 +184,9 @@ void MsanTSDDtor(void *tsd) {
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
return;
}
msan_current_thread = nullptr;
// Make sure that signal handler can not see a stale current thread pointer.
atomic_signal_fence(memory_order_seq_cst);
MsanThread::TSDDtor(tsd);
}

View File

@ -0,0 +1,174 @@
//===-- msan_poisoning.cc ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#include "msan_poisoning.h"
#include "interception/interception.h"
#include "msan_origin.h"
#include "sanitizer_common/sanitizer_common.h"
DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
DECLARE_REAL(void *, memmove, void *dest, const void *src, uptr n)
namespace __msan {
u32 GetOriginIfPoisoned(uptr addr, uptr size) {
unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
for (uptr i = 0; i < size; ++i)
if (s[i]) return *(u32 *)SHADOW_TO_ORIGIN(((uptr)s + i) & ~3UL);
return 0;
}
void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
u32 src_origin) {
uptr dst_s = MEM_TO_SHADOW(addr);
uptr src_s = src_shadow;
uptr src_s_end = src_s + size;
for (; src_s < src_s_end; ++dst_s, ++src_s)
if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s & ~3UL) = src_origin;
}
void CopyOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
uptr d = (uptr)dst;
uptr beg = d & ~3UL;
// Copy left unaligned origin if that memory is poisoned.
if (beg < d) {
u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
if (o) {
if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
*(u32 *)MEM_TO_ORIGIN(beg) = o;
}
beg += 4;
}
uptr end = (d + size) & ~3UL;
// If both ends fall into the same 4-byte slot, we are done.
if (end < beg) return;
// Copy right unaligned origin if that memory is poisoned.
if (end < d + size) {
u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
if (o) {
if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
*(u32 *)MEM_TO_ORIGIN(end) = o;
}
}
if (beg < end) {
// Align src up.
uptr s = ((uptr)src + 3) & ~3UL;
// FIXME: factor out to msan_copy_origin_aligned
if (__msan_get_track_origins() > 1) {
u32 *src = (u32 *)MEM_TO_ORIGIN(s);
u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
u32 src_o = 0;
u32 dst_o = 0;
for (; src < src_end; ++src, ++src_s, ++dst) {
if (!*src_s) continue;
if (*src != src_o) {
src_o = *src;
dst_o = ChainOrigin(src_o, stack);
}
*dst = dst_o;
}
} else {
REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
end - beg);
}
}
}
void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
if (src == dst) return;
REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
}
void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
}
void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack) {
REAL(memcpy)(dst, src, size);
CopyShadowAndOrigin(dst, src, size, stack);
}
void SetShadow(const void *ptr, uptr size, u8 value) {
uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(ptr);
uptr shadow_end = MEM_TO_SHADOW((uptr)ptr + size);
if (value ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void *)shadow_beg, value, shadow_end - shadow_beg);
} else {
uptr page_beg = RoundUpTo(shadow_beg, PageSize);
uptr page_end = RoundDownTo(shadow_end, PageSize);
if (page_beg >= page_end) {
REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
} else {
if (page_beg != shadow_beg) {
REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
}
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
MmapFixedNoReserve(page_beg, page_end - page_beg);
}
}
}
void SetOrigin(const void *dst, uptr size, u32 origin) {
// Origin mapping is 4 bytes per 4 bytes of application memory.
// Here we extend the range such that its left and right bounds are both
// 4 byte aligned.
uptr x = MEM_TO_ORIGIN((uptr)dst);
uptr beg = x & ~3UL; // align down.
uptr end = (x + size + 3) & ~3UL; // align up.
u64 origin64 = ((u64)origin << 32) | origin;
// This is like memset, but the value is 32-bit. We unroll by 2 to write
// 64 bits at once. May want to unroll further to get 128-bit stores.
if (beg & 7ULL) {
*(u32 *)beg = origin;
beg += 4;
}
for (uptr addr = beg; addr < (end & ~7UL); addr += 8) *(u64 *)addr = origin64;
if (end & 7ULL) *(u32 *)(end - 4) = origin;
}
void PoisonMemory(const void *dst, uptr size, StackTrace *stack) {
SetShadow(dst, size, (u8)-1);
if (__msan_get_track_origins()) {
Origin o = Origin::CreateHeapOrigin(stack);
SetOrigin(dst, size, o.raw_id());
}
}
} // namespace __msan

View File

@ -0,0 +1,59 @@
//===-- msan_poisoning.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of MemorySanitizer.
//
//===----------------------------------------------------------------------===//
#ifndef MSAN_POISONING_H
#define MSAN_POISONING_H
#include "msan.h"
namespace __msan {
// Return origin for the first poisoned byte in the memory range, or 0.
u32 GetOriginIfPoisoned(uptr addr, uptr size);
// Walk [addr, addr+size) app memory region, copying origin tags from the
// corresponding positions in [src_origin, src_origin+size) where the
// corresponding shadow in [src_shadow, src_shadow+size) is non-zero.
void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
// Copy origin from src (app address) to dst (app address), creating chained
// origin ids as necessary, without overriding origin for fully initialized
// quads.
void CopyOrigin(const void *dst, const void *src, uptr size, StackTrace *stack);
// memmove() shadow and origin. Dst and src are application addresses.
// See CopyOrigin() for the origin copying logic.
void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack);
// memcpy() shadow and origin. Dst and src are application addresses.
// See CopyOrigin() for the origin copying logic.
void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack);
// memcpy() app memory, and do "the right thing" to the corresponding shadow and
// origin regions.
void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack);
// Fill shadow will value. Ptr is an application address.
void SetShadow(const void *ptr, uptr size, u8 value);
// Set origin for the memory region.
void SetOrigin(const void *dst, uptr size, u32 origin);
// Mark memory region uninitialized, with origins.
void PoisonMemory(const void *dst, uptr size, StackTrace *stack);
} // namespace __msan
#endif // MSAN_POISONING_H

View File

@ -75,8 +75,23 @@ static void DescribeOrigin(u32 id) {
DescribeStackOrigin(so, pc);
} else {
StackTrace stack = o.getStackTraceForHeapOrigin();
Printf(" %sUninitialized value was created by a heap allocation%s\n",
d.Origin(), d.End());
switch (stack.tag) {
case StackTrace::TAG_ALLOC:
Printf(" %sUninitialized value was created by a heap allocation%s\n",
d.Origin(), d.End());
break;
case StackTrace::TAG_DEALLOC:
Printf(" %sUninitialized value was created by a heap deallocation%s\n",
d.Origin(), d.End());
break;
case STACK_TRACE_TAG_POISON:
Printf(" %sMemory was marked as uninitialized%s\n", d.Origin(),
d.End());
break;
default:
Printf(" %sUninitialized value was created%s\n", d.Origin(), d.End());
break;
}
stack.Print();
}
}
@ -255,7 +270,7 @@ void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
d.End());
if (__sanitizer::common_flags()->verbosity > 0)
if (__sanitizer::Verbosity())
DescribeMemoryRange(start, size);
}

View File

@ -79,15 +79,4 @@ thread_return_t MsanThread::ThreadStart() {
return res;
}
MsanThread *GetCurrentThread() {
return reinterpret_cast<MsanThread *>(MsanTSDGet());
}
void SetCurrentThread(MsanThread *t) {
// Make sure we do not reset the current MsanThread.
CHECK_EQ(0, MsanTSDGet());
MsanTSDSet(t);
CHECK_EQ(t, MsanTSDGet());
}
} // namespace __msan

View File

@ -21,13 +21,25 @@
#include "sanitizer/allocator_interface.h"
#include "sanitizer/msan_interface.h"
#if defined(__FreeBSD__)
# define _KERNEL // To declare 'shminfo' structure.
# include <sys/shm.h>
# undef _KERNEL
extern "C" {
// <sys/shm.h> doesn't declare these functions in _KERNEL mode.
void *shmat(int, const void *, int);
int shmget(key_t, size_t, int);
int shmctl(int, int, struct shmid_ds *);
int shmdt(const void *);
}
#endif
#include <inttypes.h>
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <wchar.h>
#include <math.h>
#include <malloc.h>
#include <arpa/inet.h>
#include <dlfcn.h>
@ -43,20 +55,40 @@
#include <sys/resource.h>
#include <sys/ioctl.h>
#include <sys/statvfs.h>
#include <sys/sysinfo.h>
#include <sys/utsname.h>
#include <sys/mman.h>
#include <sys/vfs.h>
#include <dirent.h>
#include <pwd.h>
#include <sys/socket.h>
#include <netdb.h>
#include <wordexp.h>
#include <mntent.h>
#include <netinet/ether.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#if !defined(__FreeBSD__)
# include <malloc.h>
# include <sys/sysinfo.h>
# include <sys/vfs.h>
# include <mntent.h>
# include <netinet/ether.h>
#else
# include <signal.h>
# include <netinet/in.h>
# include <pthread_np.h>
# include <sys/uio.h>
# include <sys/mount.h>
# include <sys/sysctl.h>
# include <net/ethernet.h>
# define f_namelen f_namemax // FreeBSD names this statfs field so.
# define cpu_set_t cpuset_t
extern "C" {
// FreeBSD's <ssp/string.h> defines mempcpy() to be a macro expanding into
// a __builtin___mempcpy_chk() call, but since Msan RTL defines it as an
// ordinary function, we can declare it here to complete the tests.
void *mempcpy(void *dest, const void *src, size_t n);
}
#endif
#if defined(__i386__) || defined(__x86_64__)
# include <emmintrin.h>
# define MSAN_HAS_M128 1
@ -68,7 +100,23 @@
# include <immintrin.h>
#endif
static const size_t kPageSize = 4096;
// On FreeBSD procfs is not enabled by default.
#if defined(__FreeBSD__)
# define FILE_TO_READ "/bin/cat"
# define DIR_TO_READ "/bin"
# define SUBFILE_TO_READ "cat"
# define SYMLINK_TO_READ "/usr/bin/tar"
# define SUPERUSER_GROUP "wheel"
#else
# define FILE_TO_READ "/proc/self/stat"
# define DIR_TO_READ "/proc/self"
# define SUBFILE_TO_READ "stat"
# define SYMLINK_TO_READ "/proc/self/exe"
# define SUPERUSER_GROUP "root"
#endif
const size_t kPageSize = 4096;
const size_t kMaxPathLength = 4096;
typedef unsigned char U1;
typedef unsigned short U2; // NOLINT
@ -122,11 +170,11 @@ void ExpectPoisonedWithOrigin(const T& t, unsigned origin) {
EXPECT_EQ(origin, __msan_get_origin((void*)&t));
}
#define EXPECT_NOT_POISONED(x) ExpectNotPoisoned(x)
#define EXPECT_NOT_POISONED(x) EXPECT_EQ(true, TestForNotPoisoned((x)))
template<typename T>
void ExpectNotPoisoned(const T& t) {
EXPECT_EQ(-1, __msan_test_shadow((void*)&t, sizeof(t)));
bool TestForNotPoisoned(const T& t) {
return __msan_test_shadow((void*)&t, sizeof(t)) == -1;
}
static U8 poisoned_array[100];
@ -493,10 +541,9 @@ static char *DynRetTestStr;
TEST(MemorySanitizer, DynRet) {
ReturnPoisoned<S8>();
EXPECT_NOT_POISONED(clearenv());
EXPECT_NOT_POISONED(atoi("0"));
}
TEST(MemorySanitizer, DynRet1) {
ReturnPoisoned<S8>();
}
@ -551,7 +598,7 @@ TEST(MemorySanitizer, strerror) {
TEST(MemorySanitizer, strerror_r) {
errno = 0;
char buf[1000];
char *res = strerror_r(EINVAL, buf, sizeof(buf));
char *res = (char*) (size_t) strerror_r(EINVAL, buf, sizeof(buf));
ASSERT_EQ(0, errno);
if (!res) res = buf; // POSIX version success.
EXPECT_NOT_POISONED(strlen(res));
@ -559,7 +606,7 @@ TEST(MemorySanitizer, strerror_r) {
TEST(MemorySanitizer, fread) {
char *x = new char[32];
FILE *f = fopen("/proc/self/stat", "r");
FILE *f = fopen(FILE_TO_READ, "r");
ASSERT_TRUE(f != NULL);
fread(x, 1, 32, f);
EXPECT_NOT_POISONED(x[0]);
@ -571,7 +618,7 @@ TEST(MemorySanitizer, fread) {
TEST(MemorySanitizer, read) {
char *x = new char[32];
int fd = open("/proc/self/stat", O_RDONLY);
int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = read(fd, x, 32);
ASSERT_EQ(sz, 32);
@ -584,7 +631,7 @@ TEST(MemorySanitizer, read) {
TEST(MemorySanitizer, pread) {
char *x = new char[32];
int fd = open("/proc/self/stat", O_RDONLY);
int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = pread(fd, x, 32, 0);
ASSERT_EQ(sz, 32);
@ -602,11 +649,11 @@ TEST(MemorySanitizer, readv) {
iov[0].iov_len = 5;
iov[1].iov_base = buf + 10;
iov[1].iov_len = 2000;
int fd = open("/proc/self/stat", O_RDONLY);
int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = readv(fd, iov, 2);
ASSERT_GE(sz, 0);
ASSERT_LT(sz, 5 + 2000);
ASSERT_LE(sz, 5 + 2000);
ASSERT_GT((size_t)sz, iov[0].iov_len);
EXPECT_POISONED(buf[0]);
EXPECT_NOT_POISONED(buf[1]);
@ -626,11 +673,11 @@ TEST(MemorySanitizer, preadv) {
iov[0].iov_len = 5;
iov[1].iov_base = buf + 10;
iov[1].iov_len = 2000;
int fd = open("/proc/self/stat", O_RDONLY);
int fd = open(FILE_TO_READ, O_RDONLY);
ASSERT_GT(fd, 0);
int sz = preadv(fd, iov, 2, 3);
ASSERT_GE(sz, 0);
ASSERT_LT(sz, 5 + 2000);
ASSERT_LE(sz, 5 + 2000);
ASSERT_GT((size_t)sz, iov[0].iov_len);
EXPECT_POISONED(buf[0]);
EXPECT_NOT_POISONED(buf[1]);
@ -652,15 +699,14 @@ TEST(MemorySanitizer, DISABLED_ioctl) {
TEST(MemorySanitizer, readlink) {
char *x = new char[1000];
readlink("/proc/self/exe", x, 1000);
readlink(SYMLINK_TO_READ, x, 1000);
EXPECT_NOT_POISONED(x[0]);
delete [] x;
}
TEST(MemorySanitizer, stat) {
struct stat* st = new struct stat;
int res = stat("/proc/self/stat", st);
int res = stat(FILE_TO_READ, st);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(st->st_dev);
EXPECT_NOT_POISONED(st->st_mode);
@ -669,9 +715,9 @@ TEST(MemorySanitizer, stat) {
TEST(MemorySanitizer, fstatat) {
struct stat* st = new struct stat;
int dirfd = open("/proc/self", O_RDONLY);
int dirfd = open(DIR_TO_READ, O_RDONLY);
ASSERT_GT(dirfd, 0);
int res = fstatat(dirfd, "stat", st, 0);
int res = fstatat(dirfd, SUBFILE_TO_READ, st, 0);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(st->st_dev);
EXPECT_NOT_POISONED(st->st_mode);
@ -763,6 +809,8 @@ TEST(MemorySanitizer, poll) {
close(pipefd[1]);
}
// There is no ppoll() on FreeBSD.
#if !defined (__FreeBSD__)
TEST(MemorySanitizer, ppoll) {
int* pipefd = new int[2];
int res = pipe(pipefd);
@ -787,6 +835,7 @@ TEST(MemorySanitizer, ppoll) {
close(pipefd[0]);
close(pipefd[1]);
}
#endif
TEST(MemorySanitizer, poll_positive) {
int* pipefd = new int[2];
@ -851,8 +900,11 @@ TEST(MemorySanitizer, accept) {
res = fcntl(connect_socket, F_SETFL, O_NONBLOCK);
ASSERT_EQ(0, res);
res = connect(connect_socket, (struct sockaddr *)&sai, sizeof(sai));
ASSERT_EQ(-1, res);
ASSERT_EQ(EINPROGRESS, errno);
// On FreeBSD this connection completes immediately.
if (res != 0) {
ASSERT_EQ(-1, res);
ASSERT_EQ(EINPROGRESS, errno);
}
__msan_poison(&sai, sizeof(sai));
int new_sock = accept(listen_socket, (struct sockaddr *)&sai, &sz);
@ -973,7 +1025,6 @@ TEST(MemorySanitizer, recvmsg) {
ASSERT_EQ(0, res);
ASSERT_EQ(sizeof(client_sai), sz);
const char *s = "message text";
struct iovec iov;
iov.iov_base = (void *)s;
@ -1125,12 +1176,15 @@ TEST(MemorySanitizer, getcwd_gnu) {
free(res);
}
// There's no get_current_dir_name() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, get_current_dir_name) {
char* res = get_current_dir_name();
ASSERT_TRUE(res != NULL);
EXPECT_NOT_POISONED(res[0]);
free(res);
}
#endif
TEST(MemorySanitizer, shmctl) {
int id = shmget(IPC_PRIVATE, 4096, 0644 | IPC_CREAT);
@ -1141,6 +1195,8 @@ TEST(MemorySanitizer, shmctl) {
ASSERT_GT(res, -1);
EXPECT_NOT_POISONED(ds);
// FreeBSD does not support shmctl(IPC_INFO) and shmctl(SHM_INFO).
#if !defined(__FreeBSD__)
struct shminfo si;
res = shmctl(id, IPC_INFO, (struct shmid_ds *)&si);
ASSERT_GT(res, -1);
@ -1150,6 +1206,7 @@ TEST(MemorySanitizer, shmctl) {
res = shmctl(id, SHM_INFO, (struct shmid_ds *)&s_i);
ASSERT_GT(res, -1);
EXPECT_NOT_POISONED(s_i);
#endif
res = shmctl(id, IPC_RMID, 0);
ASSERT_GT(res, -1);
@ -1157,7 +1214,7 @@ TEST(MemorySanitizer, shmctl) {
TEST(MemorySanitizer, shmat) {
void *p = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, p);
((char *)p)[10] = *GetPoisoned<U1>();
@ -1183,6 +1240,8 @@ TEST(MemorySanitizer, shmat) {
ASSERT_GT(res, -1);
}
// There's no random_r() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, random_r) {
int32_t x;
char z[64];
@ -1198,6 +1257,7 @@ TEST(MemorySanitizer, random_r) {
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(x);
}
#endif
TEST(MemorySanitizer, confstr) {
char buf[3];
@ -1215,6 +1275,16 @@ TEST(MemorySanitizer, confstr) {
ASSERT_EQ(res, strlen(buf2) + 1);
}
TEST(MemorySanitizer, opendir) {
DIR *dir = opendir(".");
closedir(dir);
char name[10] = ".";
__msan_poison(name, sizeof(name));
EXPECT_UMR(dir = opendir(name));
closedir(dir);
}
TEST(MemorySanitizer, readdir) {
DIR *dir = opendir(".");
struct dirent *d = readdir(dir);
@ -1251,6 +1321,8 @@ TEST(MemorySanitizer, realpath_null) {
free(res);
}
// There's no canonicalize_file_name() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, canonicalize_file_name) {
const char* relpath = ".";
char* res = canonicalize_file_name(relpath);
@ -1258,6 +1330,7 @@ TEST(MemorySanitizer, canonicalize_file_name) {
EXPECT_NOT_POISONED(res[0]);
free(res);
}
#endif
extern char **environ;
@ -1655,26 +1728,35 @@ TEST(MemorySanitizer, modfl) {
EXPECT_NOT_POISONED(y);
}
// There's no sincos() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sincos) {
double s, c;
sincos(0.2, &s, &c);
EXPECT_NOT_POISONED(s);
EXPECT_NOT_POISONED(c);
}
#endif
// There's no sincosf() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sincosf) {
float s, c;
sincosf(0.2, &s, &c);
EXPECT_NOT_POISONED(s);
EXPECT_NOT_POISONED(c);
}
#endif
// There's no sincosl() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sincosl) {
long double s, c;
sincosl(0.2, &s, &c);
EXPECT_NOT_POISONED(s);
EXPECT_NOT_POISONED(c);
}
#endif
TEST(MemorySanitizer, remquo) {
int quo;
@ -1729,13 +1811,18 @@ TEST(MemorySanitizer, lgammaf_r) {
EXPECT_NOT_POISONED(sgn);
}
// There's no lgammal_r() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, lgammal_r) {
int sgn;
long double res = lgammal_r(1.1, &sgn);
ASSERT_NE(0.0, res);
EXPECT_NOT_POISONED(sgn);
}
#endif
// There's no drand48_r() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, drand48_r) {
struct drand48_data buf;
srand48_r(0, &buf);
@ -1743,7 +1830,10 @@ TEST(MemorySanitizer, drand48_r) {
drand48_r(&buf, &d);
EXPECT_NOT_POISONED(d);
}
#endif
// There's no lrand48_r() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, lrand48_r) {
struct drand48_data buf;
srand48_r(0, &buf);
@ -1751,6 +1841,7 @@ TEST(MemorySanitizer, lrand48_r) {
lrand48_r(&buf, &d);
EXPECT_NOT_POISONED(d);
}
#endif
TEST(MemorySanitizer, sprintf) { // NOLINT
char buff[10];
@ -2015,6 +2106,8 @@ TEST(MemorySanitizer, localtime_r) {
EXPECT_NE(0U, strlen(time.tm_zone));
}
// There's no getmntent() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, getmntent) {
FILE *fp = setmntent("/etc/fstab", "r");
struct mntent *mnt = getmntent(fp);
@ -2027,7 +2120,10 @@ TEST(MemorySanitizer, getmntent) {
EXPECT_NOT_POISONED(mnt->mnt_passno);
fclose(fp);
}
#endif
// There's no getmntent_r() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, getmntent_r) {
FILE *fp = setmntent("/etc/fstab", "r");
struct mntent mntbuf;
@ -2042,6 +2138,7 @@ TEST(MemorySanitizer, getmntent_r) {
EXPECT_NOT_POISONED(mnt->mnt_passno);
fclose(fp);
}
#endif
TEST(MemorySanitizer, ether) {
const char *asc = "11:22:33:44:55:66";
@ -2081,6 +2178,8 @@ TEST(MemorySanitizer, mmap) {
}
}
// There's no fcvt() on FreeBSD.
#if !defined(__FreeBSD__)
// FIXME: enable and add ecvt.
// FIXME: check why msandr does nt handle fcvt.
TEST(MemorySanitizer, fcvt) {
@ -2096,7 +2195,10 @@ TEST(MemorySanitizer, fcvt) {
EXPECT_NOT_POISONED(str[0]);
ASSERT_NE(0U, strlen(str));
}
#endif
// There's no fcvt_long() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fcvt_long) {
int a, b;
break_optimization(&a);
@ -2110,7 +2212,7 @@ TEST(MemorySanitizer, fcvt_long) {
EXPECT_NOT_POISONED(str[0]);
ASSERT_NE(0U, strlen(str));
}
#endif
TEST(MemorySanitizer, memchr) {
char x[10];
@ -2712,9 +2814,20 @@ TEST(MemorySanitizer, getrusage) {
EXPECT_NOT_POISONED(usage.ru_nivcsw);
}
#ifdef __GLIBC__
extern char *program_invocation_name;
#else // __GLIBC__
#if defined(__FreeBSD__)
static void GetProgramPath(char *buf, size_t sz) {
int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
int res = sysctl(mib, 4, buf, &sz, NULL, 0);
ASSERT_EQ(0, res);
}
#elif defined(__GLIBC__)
static void GetProgramPath(char *buf, size_t sz) {
extern char *program_invocation_name;
int res = snprintf(buf, sz, "%s", program_invocation_name);
ASSERT_GE(res, 0);
ASSERT_LT((size_t)res, sz);
}
#else
# error "TODO: port this"
#endif
@ -2749,21 +2862,29 @@ static int dl_phdr_callback(struct dl_phdr_info *info, size_t size, void *data)
// Compute the path to our loadable DSO. We assume it's in the same
// directory. Only use string routines that we intercept so far to do this.
static int PathToLoadable(char *buf, size_t sz) {
const char *basename = "libmsan_loadable.x86_64.so";
char *argv0 = program_invocation_name;
char *last_slash = strrchr(argv0, '/');
assert(last_slash);
int res =
snprintf(buf, sz, "%.*s/%s", int(last_slash - argv0), argv0, basename);
assert(res >= 0);
return (size_t)res < sz ? 0 : res;
static void GetPathToLoadable(char *buf, size_t sz) {
char program_path[kMaxPathLength];
GetProgramPath(program_path, sizeof(program_path));
const char *last_slash = strrchr(program_path, '/');
ASSERT_NE(nullptr, last_slash);
size_t dir_len = (size_t)(last_slash - program_path);
#if defined(__x86_64__)
static const char basename[] = "libmsan_loadable.x86_64.so";
#elif defined(__MIPSEB__) || defined(MIPSEB)
static const char basename[] = "libmsan_loadable.mips64.so";
#elif defined(__mips64)
static const char basename[] = "libmsan_loadable.mips64el.so";
#endif
int res = snprintf(buf, sz, "%.*s/%s",
(int)dir_len, program_path, basename);
ASSERT_GE(res, 0);
ASSERT_LT((size_t)res, sz);
}
TEST(MemorySanitizer, dl_iterate_phdr) {
char path[4096];
int res = PathToLoadable(path, sizeof(path));
ASSERT_EQ(0, res);
char path[kMaxPathLength];
GetPathToLoadable(path, sizeof(path));
// Having at least one dlopen'ed library in the process makes this more
// entertaining.
@ -2773,15 +2894,13 @@ TEST(MemorySanitizer, dl_iterate_phdr) {
int count = 0;
int result = dl_iterate_phdr(dl_phdr_callback, &count);
ASSERT_GT(count, 0);
dlclose(lib);
}
TEST(MemorySanitizer, dlopen) {
char path[4096];
int res = PathToLoadable(path, sizeof(path));
ASSERT_EQ(0, res);
char path[kMaxPathLength];
GetPathToLoadable(path, sizeof(path));
// We need to clear shadow for globals when doing dlopen. In order to test
// this, we have to poison the shadow for the DSO before we load it. In
@ -2806,19 +2925,22 @@ TEST(MemorySanitizer, dlopen) {
// Regression test for a crash in dlopen() interceptor.
TEST(MemorySanitizer, dlopenFailed) {
const char *path = "/libmsan_loadable_does_not_exist.x86_64.so";
const char *path = "/libmsan_loadable_does_not_exist.so";
void *lib = dlopen(path, RTLD_LAZY);
ASSERT_TRUE(lib == NULL);
}
#endif // MSAN_TEST_DISABLE_DLOPEN
// There's no sched_getaffinity() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sched_getaffinity) {
cpu_set_t mask;
int res = sched_getaffinity(getpid(), sizeof(mask), &mask);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(mask);
}
#endif
TEST(MemorySanitizer, scanf) {
const char *input = "42 hello";
@ -3048,11 +3170,14 @@ TEST(MemorySanitizer, posix_memalign) {
free(p);
}
// There's no memalign() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, memalign) {
void *p = memalign(4096, 13);
EXPECT_EQ(0U, (uintptr_t)p % kPageSize);
free(p);
}
#endif
TEST(MemorySanitizer, valloc) {
void *a = valloc(100);
@ -3060,6 +3185,8 @@ TEST(MemorySanitizer, valloc) {
free(a);
}
// There's no pvalloc() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, pvalloc) {
void *p = pvalloc(kPageSize + 100);
EXPECT_EQ(0U, (uintptr_t)p % kPageSize);
@ -3071,6 +3198,7 @@ TEST(MemorySanitizer, pvalloc) {
EXPECT_EQ(kPageSize, __sanitizer_get_allocated_size(p));
free(p);
}
#endif
TEST(MemorySanitizer, inet_pton) {
const char *s = "1:0:0:0:0:0:0:8";
@ -3114,12 +3242,15 @@ TEST(MemorySanitizer, gethostname) {
EXPECT_NOT_POISONED(strlen(buf));
}
// There's no sysinfo() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, sysinfo) {
struct sysinfo info;
int res = sysinfo(&info);
ASSERT_EQ(0, res);
EXPECT_NOT_POISONED(info);
}
#endif
TEST(MemorySanitizer, getpwuid) {
struct passwd *p = getpwuid(0); // root
@ -3174,8 +3305,10 @@ TEST(MemorySanitizer, getgrnam_r) {
struct group grp;
struct group *grpres;
char buf[10000];
int res = getgrnam_r("root", &grp, buf, sizeof(buf), &grpres);
int res = getgrnam_r(SUPERUSER_GROUP, &grp, buf, sizeof(buf), &grpres);
ASSERT_EQ(0, res);
// Note that getgrnam_r() returns 0 if the matching group is not found.
ASSERT_NE(nullptr, grpres);
EXPECT_NOT_POISONED(grp.gr_name);
ASSERT_TRUE(grp.gr_name != NULL);
EXPECT_NOT_POISONED(grp.gr_name[0]);
@ -3207,6 +3340,8 @@ TEST(MemorySanitizer, getpwent_r) {
EXPECT_NOT_POISONED(pwdres);
}
// There's no fgetpwent() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fgetpwent) {
FILE *fp = fopen("/etc/passwd", "r");
struct passwd *p = fgetpwent(fp);
@ -3217,6 +3352,7 @@ TEST(MemorySanitizer, fgetpwent) {
EXPECT_NOT_POISONED(p->pw_uid);
fclose(fp);
}
#endif
TEST(MemorySanitizer, getgrent) {
setgrent();
@ -3228,6 +3364,8 @@ TEST(MemorySanitizer, getgrent) {
EXPECT_NOT_POISONED(p->gr_gid);
}
// There's no fgetgrent() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fgetgrent) {
FILE *fp = fopen("/etc/group", "r");
struct group *grp = fgetgrent(fp);
@ -3242,6 +3380,7 @@ TEST(MemorySanitizer, fgetgrent) {
}
fclose(fp);
}
#endif
TEST(MemorySanitizer, getgrent_r) {
struct group grp;
@ -3257,6 +3396,8 @@ TEST(MemorySanitizer, getgrent_r) {
EXPECT_NOT_POISONED(grpres);
}
// There's no fgetgrent_r() on FreeBSD.
#if !defined(__FreeBSD__)
TEST(MemorySanitizer, fgetgrent_r) {
FILE *fp = fopen("/etc/group", "r");
struct group grp;
@ -3272,6 +3413,7 @@ TEST(MemorySanitizer, fgetgrent_r) {
EXPECT_NOT_POISONED(grpres);
fclose(fp);
}
#endif
TEST(MemorySanitizer, getgroups) {
int n = getgroups(0, 0);
@ -3399,7 +3541,7 @@ TEST(MemorySanitizer, VolatileBitfield) {
}
TEST(MemorySanitizer, UnalignedLoad) {
char x[32];
char x[32] __attribute__((aligned(8)));
U4 origin = __LINE__;
for (unsigned i = 0; i < sizeof(x) / 4; ++i)
__msan_set_origin(x + 4 * i, 4, origin + i);
@ -3433,7 +3575,7 @@ TEST(MemorySanitizer, UnalignedLoad) {
}
TEST(MemorySanitizer, UnalignedStore16) {
char x[5];
char x[5] __attribute__((aligned(4)));
U2 y2 = 0;
U4 origin = __LINE__;
__msan_poison(&y2, 1);
@ -3444,11 +3586,10 @@ TEST(MemorySanitizer, UnalignedStore16) {
EXPECT_POISONED_O(x[1], origin);
EXPECT_NOT_POISONED(x[2]);
EXPECT_POISONED_O(x[3], origin);
EXPECT_POISONED_O(x[4], origin);
}
TEST(MemorySanitizer, UnalignedStore32) {
char x[8];
char x[8] __attribute__((aligned(4)));
U4 y4 = 0;
U4 origin = __LINE__;
__msan_poison(&y4, 2);
@ -3466,7 +3607,7 @@ TEST(MemorySanitizer, UnalignedStore32) {
}
TEST(MemorySanitizer, UnalignedStore64) {
char x[16];
char x[16] __attribute__((aligned(8)));
U8 y8 = 0;
U4 origin = __LINE__;
__msan_poison(&y8, 3);
@ -3489,7 +3630,7 @@ TEST(MemorySanitizer, UnalignedStore64) {
}
TEST(MemorySanitizer, UnalignedStore16_precise) {
char x[8];
char x[8] __attribute__((aligned(4)));
U2 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@ -3512,7 +3653,7 @@ TEST(MemorySanitizer, UnalignedStore16_precise) {
}
TEST(MemorySanitizer, UnalignedStore16_precise2) {
char x[8];
char x[8] __attribute__((aligned(4)));
U2 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@ -3535,7 +3676,7 @@ TEST(MemorySanitizer, UnalignedStore16_precise2) {
}
TEST(MemorySanitizer, UnalignedStore64_precise) {
char x[12];
char x[12] __attribute__((aligned(8)));
U8 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@ -3567,7 +3708,7 @@ TEST(MemorySanitizer, UnalignedStore64_precise) {
}
TEST(MemorySanitizer, UnalignedStore64_precise2) {
char x[12];
char x[12] __attribute__((aligned(8)));
U8 y = 0;
U4 originx1 = __LINE__;
U4 originx2 = __LINE__;
@ -3597,7 +3738,7 @@ TEST(MemorySanitizer, UnalignedStore64_precise2) {
EXPECT_POISONED_O(x[11], originx3);
}
#if defined(__clang__)
#if (defined(__x86_64__) && defined(__clang__))
namespace {
typedef U1 V16x8 __attribute__((__vector_size__(16)));
typedef U2 V8x16 __attribute__((__vector_size__(16)));
@ -4116,7 +4257,8 @@ TEST(MemorySanitizer, LargeAllocatorUnpoisonsOnFree) {
// Allocate the page that was released to the OS in free() with the real mmap,
// bypassing the interceptor.
char *q = (char *)real_mmap(p, 4096, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
char *q = (char *)real_mmap(p, 4096, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE((char *)0, q);
ASSERT_TRUE(q <= p);

View File

@ -11,6 +11,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/errno.h>
#define UNCONST(ptr) ((void *)(uintptr_t)(ptr))
@ -175,7 +176,11 @@ int __llvm_profile_write_file(void) {
return -1;
/* Write the file. */
return writeFileWithName(__llvm_profile_CurrentFilename);
int rc = writeFileWithName(__llvm_profile_CurrentFilename);
if (rc && getenv("LLVM_PROFILE_VERBOSE_ERRORS"))
fprintf(stderr, "LLVM Profile: Failed to write file \"%s\": %s\n",
__llvm_profile_CurrentFilename, strerror(errno));
return rc;
}
static void writeFileWithoutReturn(void) {

View File

@ -14,7 +14,6 @@
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
namespace __sanitizer {
@ -61,7 +60,7 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
internal_allocator_instance->Init();
internal_allocator_instance->Init(/* may_return_null*/ false);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@ -140,14 +139,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
void *AllocatorReturnNull() {
if (common_flags()->allocator_may_return_null)
return 0;
void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0);
return 0;
Die();
}
} // namespace __sanitizer

View File

@ -23,8 +23,8 @@
namespace __sanitizer {
// Depending on allocator_may_return_null either return 0 or crash.
void *AllocatorReturnNull();
// Prints error message and kills the program.
void NORETURN ReportAllocatorCannotReturnNull();
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
@ -211,6 +211,7 @@ class AllocatorStats {
void Init() {
internal_memset(this, 0, sizeof(*this));
}
void InitLinkerInitialized() {}
void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
@ -240,11 +241,14 @@ class AllocatorStats {
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
void Init() {
internal_memset(this, 0, sizeof(*this));
void InitLinkerInitialized() {
next_ = this;
prev_ = this;
}
void Init() {
internal_memset(this, 0, sizeof(*this));
InitLinkerInitialized();
}
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
@ -1002,9 +1006,14 @@ struct SizeClassAllocatorLocalCache {
template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator {
public:
void Init() {
internal_memset(this, 0, sizeof(*this));
void InitLinkerInitialized(bool may_return_null) {
page_size_ = GetPageSizeCached();
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
void Init(bool may_return_null) {
internal_memset(this, 0, sizeof(*this));
InitLinkerInitialized(may_return_null);
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@ -1012,7 +1021,9 @@ class LargeMmapAllocator {
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
map_size += alignment;
if (map_size < size) return AllocatorReturnNull(); // Overflow.
// Overflow.
if (map_size < size)
return ReturnNullOrDie();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
CHECK(IsAligned(map_beg, page_size_));
@ -1048,6 +1059,16 @@ class LargeMmapAllocator {
return reinterpret_cast<void*>(res);
}
void *ReturnNullOrDie() {
if (atomic_load(&may_return_null_, memory_order_acquire))
return 0;
ReportAllocatorCannotReturnNull();
}
void SetMayReturnNull(bool may_return_null) {
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
@ -1226,6 +1247,7 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
@ -1239,19 +1261,32 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
void Init() {
void InitCommon(bool may_return_null) {
primary_.Init();
secondary_.Init();
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
void InitLinkerInitialized(bool may_return_null) {
secondary_.InitLinkerInitialized(may_return_null);
stats_.InitLinkerInitialized();
InitCommon(may_return_null);
}
void Init(bool may_return_null) {
secondary_.Init(may_return_null);
stats_.Init();
InitCommon(may_return_null);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
bool cleared = false) {
bool cleared = false, bool check_rss_limit = false) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
if (size + alignment < size)
return AllocatorReturnNull();
return ReturnNullOrDie();
if (check_rss_limit && RssLimitIsExceeded())
return ReturnNullOrDie();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
@ -1267,6 +1302,30 @@ class CombinedAllocator {
return res;
}
bool MayReturnNull() const {
return atomic_load(&may_return_null_, memory_order_acquire);
}
void *ReturnNullOrDie() {
if (MayReturnNull())
return 0;
ReportAllocatorCannotReturnNull();
}
void SetMayReturnNull(bool may_return_null) {
secondary_.SetMayReturnNull(may_return_null);
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
bool RssLimitIsExceeded() {
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
}
void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
memory_order_release);
}
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
@ -1379,6 +1438,8 @@ class CombinedAllocator {
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
atomic_uint8_t may_return_null_;
atomic_uint8_t rss_limit_is_exceeded_;
};
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.

View File

@ -49,6 +49,15 @@ void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0);
void InternalFree(void *p, InternalAllocatorCache *cache = 0);
InternalAllocator *internal_allocator();
enum InternalAllocEnum {
INTERNAL_ALLOC
};
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
InternalAllocEnum) {
return InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H

View File

@ -12,13 +12,17 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool";
atomic_uint32_t current_verbosity;
uptr GetPageSizeCached() {
static uptr PageSize;
if (!PageSize)
@ -94,19 +98,23 @@ uptr stoptheworld_tracer_pid = 0;
// writing to the same log file.
uptr stoptheworld_tracer_ppid = 0;
static DieCallbackType DieCallback;
static DieCallbackType InternalDieCallback, UserDieCallback;
void SetDieCallback(DieCallbackType callback) {
DieCallback = callback;
InternalDieCallback = callback;
}
void SetUserDieCallback(DieCallbackType callback) {
UserDieCallback = callback;
}
DieCallbackType GetDieCallback() {
return DieCallback;
return InternalDieCallback;
}
void NORETURN Die() {
if (DieCallback) {
DieCallback();
}
if (UserDieCallback)
UserDieCallback();
if (InternalDieCallback)
InternalDieCallback();
internal__exit(1);
}
@ -125,8 +133,8 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
Die();
}
uptr ReadFileToBuffer(const char *file_name, char **buff,
uptr *buff_size, uptr max_len) {
uptr ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr max_len, int *errno_p) {
uptr PageSize = GetPageSizeCached();
uptr kMinFileLen = PageSize;
uptr read_len = 0;
@ -135,7 +143,7 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
// The files we usually open are not seekable, so try different buffer sizes.
for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
uptr openrv = OpenFile(file_name, /*write*/ false);
if (internal_iserror(openrv)) return 0;
if (internal_iserror(openrv, errno_p)) return 0;
fd_t fd = openrv;
UnmapOrDie(*buff, *buff_size);
*buff = (char*)MmapOrDie(size, __func__);
@ -145,6 +153,10 @@ uptr ReadFileToBuffer(const char *file_name, char **buff,
bool reached_eof = false;
while (read_len + PageSize <= size) {
uptr just_read = internal_read(fd, *buff + read_len, PageSize);
if (internal_iserror(just_read, errno_p)) {
UnmapOrDie(*buff, *buff_size);
return 0;
}
if (just_read == 0) {
reached_eof = true;
break;
@ -233,20 +245,28 @@ void ReportErrorSummary(const char *error_type, const char *file,
LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
full_name_ = internal_strdup(module_name);
base_address_ = base_address;
n_ranges_ = 0;
ranges_.clear();
}
void LoadedModule::clear() {
InternalFree(full_name_);
while (!ranges_.empty()) {
AddressRange *r = ranges_.front();
ranges_.pop_front();
InternalFree(r);
}
}
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
ranges_[n_ranges_].beg = beg;
ranges_[n_ranges_].end = end;
exec_[n_ranges_] = executable;
n_ranges_++;
void *mem = InternalAlloc(sizeof(AddressRange));
AddressRange *r = new(mem) AddressRange(beg, end, executable);
ranges_.push_back(r);
}
bool LoadedModule::containsAddress(uptr address) const {
for (uptr i = 0; i < n_ranges_; i++) {
if (ranges_[i].beg <= address && address < ranges_[i].end)
for (Iterator iter = ranges(); iter.hasNext();) {
const AddressRange *r = iter.next();
if (r->beg <= address && address < r->end)
return true;
}
return false;
@ -268,6 +288,48 @@ void DecreaseTotalMmap(uptr size) {
atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
}
bool TemplateMatch(const char *templ, const char *str) {
if (str == 0 || str[0] == 0)
return false;
bool start = false;
if (templ && templ[0] == '^') {
start = true;
templ++;
}
bool asterisk = false;
while (templ && templ[0]) {
if (templ[0] == '*') {
templ++;
start = false;
asterisk = true;
continue;
}
if (templ[0] == '$')
return str[0] == 0 || asterisk;
if (str[0] == 0)
return false;
char *tpos = (char*)internal_strchr(templ, '*');
char *tpos1 = (char*)internal_strchr(templ, '$');
if (tpos == 0 || (tpos1 && tpos1 < tpos))
tpos = tpos1;
if (tpos != 0)
tpos[0] = 0;
const char *str0 = str;
const char *spos = internal_strstr(str, templ);
str = spos + internal_strlen(templ);
templ = tpos;
if (tpos)
tpos[0] = tpos == tpos1 ? '$' : '*';
if (spos == 0)
return false;
if (start && spos != str0)
return false;
start = false;
asterisk = false;
}
return true;
}
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
@ -280,4 +342,9 @@ void __sanitizer_set_report_path(const char *path) {
void __sanitizer_report_error_summary(const char *error_summary) {
Printf("%s\n", error_summary);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_death_callback(void (*callback)(void)) {
SetUserDieCallback(callback);
}
} // extern "C"

View File

@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// This file is shared between run-time libraries of sanitizers.
//
// It declares common functions and classes that are used in both runtimes.
// Implementation of some functions are provided in sanitizer_common, while
// others must be defined by run-time library itself.
@ -16,10 +16,12 @@
#ifndef SANITIZER_COMMON_H
#define SANITIZER_COMMON_H
#include "sanitizer_flags.h"
#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
#include "sanitizer_flags.h"
namespace __sanitizer {
struct StackTrace;
@ -40,6 +42,14 @@ const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
extern const char *SanitizerToolName; // Can be changed by the tool.
extern atomic_uint32_t current_verbosity;
INLINE void SetVerbosity(int verbosity) {
atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
}
INLINE int Verbosity() {
return atomic_load(&current_verbosity, memory_order_relaxed);
}
uptr GetPageSize();
uptr GetPageSizeCached();
uptr GetMmapGranularity();
@ -67,6 +77,8 @@ void FlushUnneededShadowMemory(uptr addr, uptr size);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
uptr GetRSS();
void NoHugePagesInRegion(uptr addr, uptr length);
void DontDumpShadowMemory(uptr addr, uptr length);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
@ -135,11 +147,11 @@ void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
#define VReport(level, ...) \
do { \
if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \
if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
} while (0)
#define VPrintf(level, ...) \
do { \
if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \
if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
} while (0)
// Can be used to prevent mixing error reports from different sanitizers.
@ -179,8 +191,8 @@ uptr OpenFile(const char *filename, bool write);
// The resulting buffer is mmaped and stored in '*buff'.
// The size of the mmaped region is stored in '*buff_size',
// Returns the number of read bytes or 0 if file can not be opened.
uptr ReadFileToBuffer(const char *file_name, char **buff,
uptr *buff_size, uptr max_len);
uptr ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr max_len, int *errno_p = nullptr);
// Maps given file to virtual memory, and returns pointer to it
// (or NULL if the mapping failes). Stores the size of mmaped region
// in '*buff_size'.
@ -214,10 +226,13 @@ void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void SetSandboxingCallback(void (*f)());
void CovUpdateMapping(uptr caller_pc = 0);
void CoverageUpdateMapping();
void CovBeforeFork();
void CovAfterFork(int child_pid);
void InitializeCoverage(bool enabled, const char *coverage_dir);
void ReInitializeCoverage(bool enabled, const char *coverage_dir);
void InitTlsSize();
uptr GetTlsSize();
@ -227,6 +242,7 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
bool TemplateMatch(const char *templ, const char *str);
// Exit
void NORETURN Abort();
@ -245,11 +261,18 @@ bool SanitizerGetThreadName(char *name, int max_len);
// to do tool-specific job.
typedef void (*DieCallbackType)(void);
void SetDieCallback(DieCallbackType);
void SetUserDieCallback(DieCallbackType);
DieCallbackType GetDieCallback();
typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
u64, u64);
void SetCheckFailedCallback(CheckFailedCallbackType callback);
// Callback will be called if soft_rss_limit_mb is given and the limit is
// exceeded (exceeded==true) or if rss went down below the limit
// (exceeded==false).
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
bool IsDeadlySignal(int signum);
@ -376,14 +399,14 @@ INLINE int ToLower(int c) {
// small vectors.
// WARNING: The current implementation supports only POD types.
template<typename T>
class InternalMmapVector {
class InternalMmapVectorNoCtor {
public:
explicit InternalMmapVector(uptr initial_capacity) {
void Initialize(uptr initial_capacity) {
capacity_ = Max(initial_capacity, (uptr)1);
size_ = 0;
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
}
~InternalMmapVector() {
void Destroy() {
UnmapOrDie(data_, capacity_ * sizeof(T));
}
T &operator[](uptr i) {
@ -434,15 +457,24 @@ class InternalMmapVector {
UnmapOrDie(old_data, capacity_ * sizeof(T));
capacity_ = new_capacity;
}
// Disallow evil constructors.
InternalMmapVector(const InternalMmapVector&);
void operator=(const InternalMmapVector&);
T *data_;
uptr capacity_;
uptr size_;
};
template<typename T>
class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
public:
explicit InternalMmapVector(uptr initial_capacity) {
InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
}
~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
// Disallow evil constructors.
InternalMmapVector(const InternalMmapVector&);
void operator=(const InternalMmapVector&);
};
// HeapSort for arrays and InternalMmapVector.
template<class Container, class Compare>
void InternalSort(Container *v, uptr size, Compare comp) {
@ -501,28 +533,30 @@ uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
class LoadedModule {
public:
LoadedModule(const char *module_name, uptr base_address);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
uptr n_ranges() const { return n_ranges_; }
uptr address_range_start(int i) const { return ranges_[i].beg; }
uptr address_range_end(int i) const { return ranges_[i].end; }
bool address_range_executable(int i) const { return exec_[i]; }
private:
struct AddressRange {
AddressRange *next;
uptr beg;
uptr end;
bool executable;
AddressRange(uptr beg, uptr end, bool executable)
: next(nullptr), beg(beg), end(end), executable(executable) {}
};
char *full_name_;
typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
Iterator ranges() const { return Iterator(&ranges_); }
private:
char *full_name_; // Owned.
uptr base_address_;
static const uptr kMaxNumberOfAddressRanges = 6;
AddressRange ranges_[kMaxNumberOfAddressRanges];
bool exec_[kMaxNumberOfAddressRanges];
uptr n_ranges_;
IntrusiveList<AddressRange> ranges_;
};
// OS-dependent function that fills array with descriptions of at most
@ -556,6 +590,10 @@ INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
INLINE void SanitizerInitializeUnwinder() {}
#endif
void *internal_start_thread(void(*func)(void*), void *arg);
void internal_join_thread(void *th);
void MaybeStartBackgroudThread();
// Make the compiler think that something is going on there.
// Use this inside a loop that looks like memset/memcpy/etc to prevent the
// compiler from recognising it and turning it into an actual call to

View File

@ -17,6 +17,7 @@
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_INITIALIZE_RANGE
// COMMON_INTERCEPTOR_DIR_ACQUIRE
// COMMON_INTERCEPTOR_FD_ACQUIRE
// COMMON_INTERCEPTOR_FD_RELEASE
// COMMON_INTERCEPTOR_FD_ACCESS
@ -43,6 +44,8 @@
#if SANITIZER_FREEBSD
#define pthread_setname_np pthread_set_name_np
#define inet_aton __inet_aton
#define inet_pton __inet_pton
#endif
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
@ -82,7 +85,7 @@
#endif
#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) {}
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {}
#endif
#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
@ -915,6 +918,16 @@ INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format,
va_list ap)
VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
#if SANITIZER_INTERCEPT_PRINTF_L
INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc,
const char *format, va_list ap)
VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap)
INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc,
const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format)
#endif // SANITIZER_INTERCEPT_PRINTF_L
INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
@ -991,6 +1004,14 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
#define INIT_PRINTF
#endif
#if SANITIZER_INTERCEPT_PRINTF_L
#define INIT_PRINTF_L \
COMMON_INTERCEPT_FUNCTION(snprintf_l); \
COMMON_INTERCEPT_FUNCTION(vsnprintf_l);
#else
#define INIT_PRINTF_L
#endif
#if SANITIZER_INTERCEPT_ISOC99_PRINTF
#define INIT_ISOC99_PRINTF \
COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \
@ -1007,8 +1028,12 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
#if SANITIZER_INTERCEPT_IOCTL
#include "sanitizer_common_interceptors_ioctl.inc"
INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
void *ctx;
va_list ap;
va_start(ap, request);
void *arg = va_arg(ap, void *);
va_end(ap);
COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg);
CHECK(ioctl_initialized);
@ -1017,6 +1042,10 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) {
// This effectively disables ioctl handling in TSan.
if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
// Although request is unsigned long, the rest of the interceptor uses it
// as just "unsigned" to save space, because we know that all values fit in
// "unsigned" - they are compile-time constants.
const ioctl_desc *desc = ioctl_lookup(request);
ioctl_desc decoded_desc;
if (!desc) {
@ -2139,6 +2168,16 @@ INTERCEPTOR(int, sysinfo, void *info) {
#endif
#if SANITIZER_INTERCEPT_READDIR
INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
__sanitizer_dirent *res = REAL(opendir)(path);
if (res != 0)
COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
return res;
}
INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
@ -2167,6 +2206,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
}
#define INIT_READDIR \
COMMON_INTERCEPT_FUNCTION(opendir); \
COMMON_INTERCEPT_FUNCTION(readdir); \
COMMON_INTERCEPT_FUNCTION(readdir_r);
#else
@ -2560,6 +2600,19 @@ INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
#define INIT_SCHED_GETAFFINITY
#endif
#if SANITIZER_INTERCEPT_SCHED_GETPARAM
INTERCEPTOR(int, sched_getparam, int pid, void *param) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param);
int res = REAL(sched_getparam)(pid, param);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz);
return res;
}
#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam);
#else
#define INIT_SCHED_GETPARAM
#endif
#if SANITIZER_INTERCEPT_STRERROR
INTERCEPTOR(char *, strerror, int errnum) {
void *ctx;
@ -3868,6 +3921,12 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
// If you see any crashes around this functions, there are 2 known issues with
// it: 1. __tls_get_addr can be called with mis-aligned stack due to:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
// 2. It can be called recursively if sanitizer code uses __tls_get_addr
// to access thread local variables (it should not happen normally,
// because sanitizers use initial-exec tls model).
INTERCEPTOR(void *, __tls_get_addr, void *arg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
@ -4762,6 +4821,7 @@ static void InitializeCommonInterceptors() {
INIT_SCANF;
INIT_ISOC99_SCANF;
INIT_PRINTF;
INIT_PRINTF_L;
INIT_ISOC99_PRINTF;
INIT_FREXP;
INIT_FREXPF_FREXPL;
@ -4812,6 +4872,7 @@ static void InitializeCommonInterceptors() {
INIT_CANONICALIZE_FILE_NAME;
INIT_CONFSTR;
INIT_SCHED_GETAFFINITY;
INIT_SCHED_GETPARAM;
INIT_STRERROR;
INIT_STRERROR_R;
INIT_XPG_STRERROR_R;

View File

@ -13,6 +13,7 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
@ -59,6 +60,71 @@ void ReportErrorSummary(const char *error_type, StackTrace *stack) {
#endif
}
static void (*SoftRssLimitExceededCallback)(bool exceeded);
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
SoftRssLimitExceededCallback = Callback;
}
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
while (true) {
SleepForMillis(100);
uptr current_rss_mb = GetRSS() >> 20;
if (Verbosity()) {
// If RSS has grown 10% since last time, print some information.
if (prev_reported_rss * 11 / 10 < current_rss_mb) {
Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
prev_reported_rss = current_rss_mb;
}
// If stack depot has grown 10% since last time, print it too.
StackDepotStats *stack_depot_stats = StackDepotGetStats();
if (prev_reported_stack_depot_size * 11 / 10 <
stack_depot_stats->allocated) {
Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
SanitizerToolName,
stack_depot_stats->n_uniq_ids,
stack_depot_stats->allocated >> 20);
prev_reported_stack_depot_size = stack_depot_stats->allocated;
}
}
// Check RSS against the limit.
if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
DumpProcessMap();
Die();
}
if (soft_rss_limit_mb) {
if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
reached_soft_rss_limit = true;
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(true);
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(false);
}
}
}
}
void MaybeStartBackgroudThread() {
if (!SANITIZER_LINUX) return; // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
}
} // namespace __sanitizer
void NOINLINE

View File

@ -2297,7 +2297,8 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64))
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@ -2316,7 +2317,8 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64))
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.

View File

@ -12,14 +12,16 @@
//
// Compiler instrumentation:
// For every interesting basic block the compiler injects the following code:
// if (Guard) {
// if (Guard < 0) {
// __sanitizer_cov(&Guard);
// }
// At the module start up time __sanitizer_cov_module_init sets the guards
// to consecutive negative numbers (-1, -2, -3, ...).
// It's fine to call __sanitizer_cov more than once for a given block.
//
// Run-time:
// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
// and atomically set Guard to 1.
// and atomically set Guard to -Guard.
// - __sanitizer_cov_dump: dump the coverage data to disk.
// For every module of the current process that has coverage data
// this will create a file module_name.PID.sancov. The file format is simple:
@ -56,23 +58,32 @@ static atomic_uintptr_t coverage_counter;
static bool cov_sandboxed = false;
static int cov_fd = kInvalidFd;
static unsigned int cov_max_block_size = 0;
static bool coverage_enabled = false;
static const char *coverage_dir;
namespace __sanitizer {
class CoverageData {
public:
void Init();
void Enable();
void Disable();
void ReInit();
void BeforeFork();
void AfterFork(int child_pid);
void Extend(uptr npcs);
void Add(uptr pc, u8 *guard);
void Add(uptr pc, u32 *guard);
void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
uptr cache_size);
void DumpCallerCalleePairs();
void DumpTrace();
ALWAYS_INLINE
void TraceBasicaBlock(uptr *cache);
void TraceBasicBlock(s32 *id);
void InitializeGuardArray(s32 *guards);
void InitializeGuards(s32 *guards, uptr n, const char *module_name);
void ReinitializeGuards();
uptr *data();
uptr size();
@ -80,7 +91,7 @@ class CoverageData {
private:
// Maximal size pc array may ever grow.
// We MmapNoReserve this space to ensure that the array is contiguous.
static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 24, 1 << 27);
// The amount file mapping for the pc array is grown by.
static const uptr kPcArrayMmapSize = 64 * 1024;
@ -96,45 +107,44 @@ class CoverageData {
// Descriptor of the file mapped pc array.
int pc_fd;
// Vector of coverage guard arrays, protected by mu.
InternalMmapVectorNoCtor<s32*> guard_array_vec;
// Vector of module (compilation unit) names.
InternalMmapVectorNoCtor<const char*> comp_unit_name_vec;
// Caller-Callee (cc) array, size and current index.
static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
uptr **cc_array;
atomic_uintptr_t cc_array_index;
atomic_uintptr_t cc_array_size;
// Tracing (tr) pc and event arrays, their size and current index.
// Tracing event array, size and current pointer.
// We record all events (basic block entries) in a global buffer of u32
// values. Each such value is an index in the table of TracedPc objects.
// values. Each such value is the index in pc_array.
// So far the tracing is highly experimental:
// - not thread-safe;
// - does not support long traces;
// - not tuned for performance.
struct TracedPc {
uptr pc;
const char *module_name;
uptr module_offset;
};
static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
u32 *tr_event_array;
uptr tr_event_array_size;
uptr tr_event_array_index;
u32 *tr_event_pointer;
static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
TracedPc *tr_pc_array;
uptr tr_pc_array_size;
uptr tr_pc_array_index;
StaticSpinMutex mu;
void DirectOpen();
void ReInit();
};
static CoverageData coverage_data;
void CovUpdateMapping(const char *path, uptr caller_pc = 0);
void CoverageData::DirectOpen() {
InternalScopedString path(kMaxPathLength);
internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
common_flags()->coverage_dir, internal_getpid());
coverage_dir, internal_getpid());
pc_fd = OpenFile(path.data(), true);
if (internal_iserror(pc_fd)) {
Report(" Coverage: failed to open %s for writing\n", path.data());
@ -142,19 +152,23 @@ void CoverageData::DirectOpen() {
}
pc_array_mapped_size = 0;
CovUpdateMapping();
CovUpdateMapping(coverage_dir);
}
void CoverageData::Init() {
pc_fd = kInvalidFd;
}
void CoverageData::Enable() {
if (pc_array)
return;
pc_array = reinterpret_cast<uptr *>(
MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
pc_fd = kInvalidFd;
atomic_store(&pc_array_index, 0, memory_order_relaxed);
if (common_flags()->coverage_direct) {
atomic_store(&pc_array_size, 0, memory_order_relaxed);
atomic_store(&pc_array_index, 0, memory_order_relaxed);
} else {
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
atomic_store(&pc_array_index, 0, memory_order_relaxed);
}
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
@ -162,30 +176,72 @@ void CoverageData::Init() {
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
atomic_store(&cc_array_index, 0, memory_order_relaxed);
tr_event_array = reinterpret_cast<u32 *>(
MmapNoReserveOrDie(sizeof(tr_event_array[0]) * kTrEventArrayMaxSize,
"CovInit::tr_event_array"));
// Allocate tr_event_array with a guard page at the end.
tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
"CovInit::tr_event_array"));
Mprotect(reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
GetMmapGranularity());
tr_event_array_size = kTrEventArrayMaxSize;
tr_event_array_index = 0;
tr_event_pointer = tr_event_array;
}
tr_pc_array = reinterpret_cast<TracedPc *>(MmapNoReserveOrDie(
sizeof(tr_pc_array[0]) * kTrEventArrayMaxSize, "CovInit::tr_pc_array"));
tr_pc_array_size = kTrEventArrayMaxSize;
tr_pc_array_index = 0;
void CoverageData::InitializeGuardArray(s32 *guards) {
Enable(); // Make sure coverage is enabled at this point.
s32 n = guards[0];
for (s32 j = 1; j <= n; j++) {
uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
guards[j] = -static_cast<s32>(idx + 1);
}
}
void CoverageData::Disable() {
if (pc_array) {
internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
pc_array = nullptr;
}
if (cc_array) {
internal_munmap(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
cc_array = nullptr;
}
if (tr_event_array) {
internal_munmap(tr_event_array,
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
GetMmapGranularity());
tr_event_array = nullptr;
tr_event_pointer = nullptr;
}
if (pc_fd != kInvalidFd) {
internal_close(pc_fd);
pc_fd = kInvalidFd;
}
}
void CoverageData::ReinitializeGuards() {
// Assuming single thread.
atomic_store(&pc_array_index, 0, memory_order_relaxed);
for (uptr i = 0; i < guard_array_vec.size(); i++)
InitializeGuardArray(guard_array_vec[i]);
}
void CoverageData::ReInit() {
internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
if (pc_fd != kInvalidFd) internal_close(pc_fd);
if (common_flags()->coverage_direct) {
// In memory-mapped mode we must extend the new file to the known array
// size.
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
Init();
if (size) Extend(size);
} else {
Init();
Disable();
if (coverage_enabled) {
if (common_flags()->coverage_direct) {
// In memory-mapped mode we must extend the new file to the known array
// size.
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
Enable();
if (size) Extend(size);
if (coverage_enabled) CovUpdateMapping(coverage_dir);
} else {
Enable();
}
}
// Re-initialize the guards.
// We are single-threaded now, no need to grab any lock.
CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
ReinitializeGuards();
}
void CoverageData::BeforeFork() {
@ -203,15 +259,16 @@ void CoverageData::Extend(uptr npcs) {
if (!common_flags()->coverage_direct) return;
SpinMutexLock l(&mu);
if (pc_fd == kInvalidFd) DirectOpen();
CHECK_NE(pc_fd, kInvalidFd);
uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
size += npcs * sizeof(uptr);
if (size > pc_array_mapped_size) {
if (coverage_enabled && size > pc_array_mapped_size) {
if (pc_fd == kInvalidFd) DirectOpen();
CHECK_NE(pc_fd, kInvalidFd);
uptr new_mapped_size = pc_array_mapped_size;
while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
// Extend the file and map the new space at the end of pc_array.
uptr res = internal_ftruncate(pc_fd, new_mapped_size);
@ -220,29 +277,47 @@ void CoverageData::Extend(uptr npcs) {
Printf("failed to extend raw coverage file: %d\n", err);
Die();
}
void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size,
uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
void *p = MapWritableFileToMemory((void *)next_map_base,
new_mapped_size - pc_array_mapped_size,
pc_fd, pc_array_mapped_size);
CHECK_EQ(p, pc_array + pc_array_mapped_size);
CHECK_EQ((uptr)p, next_map_base);
pc_array_mapped_size = new_mapped_size;
}
atomic_store(&pc_array_size, size, memory_order_release);
}
// Atomically add the pc to the vector. The atomically set the guard to 1.
// If the function is called more than once for a given PC it will
// be inserted multiple times, which is fine.
void CoverageData::Add(uptr pc, u8 *guard) {
void CoverageData::InitializeGuards(s32 *guards, uptr n,
const char *module_name) {
// The array 'guards' has n+1 elements, we use the element zero
// to store 'n'.
CHECK_LT(n, 1 << 30);
guards[0] = static_cast<s32>(n);
InitializeGuardArray(guards);
SpinMutexLock l(&mu);
comp_unit_name_vec.push_back(module_name);
guard_array_vec.push_back(guards);
}
// If guard is negative, atomically set it to -guard and store the PC in
// pc_array.
void CoverageData::Add(uptr pc, u32 *guard) {
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
if (guard_value >= 0) return;
atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
if (!pc_array) return;
uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
uptr idx = -guard_value - 1;
if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
return; // May happen after fork when pc_array_index becomes 0.
CHECK_LT(idx * sizeof(uptr),
atomic_load(&pc_array_size, memory_order_acquire));
pc_array[idx] = pc;
atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
// Set the guard.
atomic_uint8_t *atomic_guard = reinterpret_cast<atomic_uint8_t*>(guard);
atomic_store(atomic_guard, 1, memory_order_relaxed);
}
// Registers a pair caller=>callee.
@ -338,18 +413,19 @@ static void CovWritePacked(int pid, const char *module, const void *blob,
// If packed = true and name == 0: <pid>.<sancov>.<packed>.
// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
// user-supplied).
static int CovOpenFile(bool packed, const char* name) {
static int CovOpenFile(bool packed, const char *name,
const char *extension = "sancov") {
InternalScopedString path(kMaxPathLength);
if (!packed) {
CHECK(name);
path.append("%s/%s.%zd.sancov", common_flags()->coverage_dir, name,
internal_getpid());
path.append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
extension);
} else {
if (!name)
path.append("%s/%zd.sancov.packed", common_flags()->coverage_dir,
internal_getpid());
path.append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
extension);
else
path.append("%s/%s.sancov.packed", common_flags()->coverage_dir, name);
path.append("%s/%s.%s.packed", coverage_dir, name, extension);
}
uptr fd = OpenFile(path.data(), true);
if (internal_iserror(fd)) {
@ -361,35 +437,49 @@ static int CovOpenFile(bool packed, const char* name) {
// Dump trace PCs and trace events into two separate files.
void CoverageData::DumpTrace() {
uptr max_idx = tr_event_array_index;
uptr max_idx = tr_event_pointer - tr_event_array;
if (!max_idx) return;
auto sym = Symbolizer::GetOrInit();
if (!sym)
return;
InternalScopedString out(32 << 20);
for (uptr i = 0; i < max_idx; i++) {
u32 pc_idx = tr_event_array[i];
TracedPc *t = &tr_pc_array[pc_idx];
if (!t->module_name) {
const char *module_name = "<unknown>";
uptr module_address = 0;
sym->GetModuleNameAndOffsetForPC(t->pc, &module_name, &module_address);
t->module_name = internal_strdup(module_name);
t->module_offset = module_address;
out.append("%s 0x%zx\n", t->module_name, t->module_offset);
}
for (uptr i = 0, n = size(); i < n; i++) {
const char *module_name = "<unknown>";
uptr module_address = 0;
sym->GetModuleNameAndOffsetForPC(pc_array[i], &module_name,
&module_address);
out.append("%s 0x%zx\n", module_name, module_address);
}
int fd = CovOpenFile(false, "trace-points");
if (fd < 0) return;
internal_write(fd, out.data(), out.length());
internal_close(fd);
fd = CovOpenFile(false, "trace-compunits");
if (fd < 0) return;
out.clear();
for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
out.append("%s\n", comp_unit_name_vec[i]);
internal_write(fd, out.data(), out.length());
internal_close(fd);
fd = CovOpenFile(false, "trace-events");
if (fd < 0) return;
internal_write(fd, tr_event_array, max_idx * sizeof(tr_event_array[0]));
uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
// The trace file could be huge, and may not be written with a single syscall.
while (bytes_to_write) {
uptr actually_written = internal_write(fd, event_bytes, bytes_to_write);
if (actually_written <= bytes_to_write) {
bytes_to_write -= actually_written;
event_bytes += actually_written;
} else {
break;
}
}
internal_close(fd);
VReport(1, " CovDump: Trace: %zd PCs written\n", tr_pc_array_index);
VReport(1, " CovDump: Trace: %zd Events written\n", tr_event_array_index);
VReport(1, " CovDump: Trace: %zd PCs written\n", size());
VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
}
// This function dumps the caller=>callee pairs into a file as a sequence of
@ -434,28 +524,45 @@ void CoverageData::DumpCallerCalleePairs() {
// Record the current PC into the event buffer.
// Every event is a u32 value (index in tr_pc_array_index) so we compute
// it once and then cache in the provided 'cache' storage.
void CoverageData::TraceBasicaBlock(uptr *cache) {
CHECK(common_flags()->coverage);
uptr idx = *cache;
if (!idx) {
CHECK_LT(tr_pc_array_index, kTrPcArrayMaxSize);
idx = tr_pc_array_index++;
TracedPc *t = &tr_pc_array[idx];
t->pc = GET_CALLER_PC();
*cache = idx;
CHECK_LT(idx, 1U << 31);
//
// This function will eventually be inlined by the compiler.
void CoverageData::TraceBasicBlock(s32 *id) {
// Will trap here if
// 1. coverage is not enabled at run-time.
// 2. The array tr_event_array is full.
*tr_event_pointer = static_cast<u32>(*id - 1);
tr_event_pointer++;
}
static void CovDumpAsBitSet() {
if (!common_flags()->coverage_bitset) return;
if (!coverage_data.size()) return;
int fd = CovOpenFile(/* packed */false, "combined", "bitset-sancov");
if (fd < 0) return;
uptr n = coverage_data.size();
uptr n_set_bits = 0;
InternalScopedBuffer<char> out(n);
for (uptr i = 0; i < n; i++) {
uptr pc = coverage_data.data()[i];
out[i] = pc ? '1' : '0';
if (pc)
n_set_bits++;
}
CHECK_LT(tr_event_array_index, tr_event_array_size);
tr_event_array[tr_event_array_index] = static_cast<u32>(idx);
tr_event_array_index++;
internal_write(fd, out.data(), n);
internal_close(fd);
VReport(1, " CovDump: bitset of %zd bits written, %zd bits are set\n", n,
n_set_bits);
}
// Dump the coverage on disk.
static void CovDump() {
if (!common_flags()->coverage || common_flags()->coverage_direct) return;
if (!coverage_enabled || common_flags()->coverage_direct) return;
#if !SANITIZER_WINDOWS
if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
return;
CovDumpAsBitSet();
coverage_data.DumpTrace();
if (!common_flags()->coverage_pcs) return;
uptr size = coverage_data.size();
InternalMmapVector<u32> offsets(size);
uptr *vb = coverage_data.data();
@ -491,8 +598,8 @@ static void CovDump() {
} else {
// One file per module per process.
path.clear();
path.append("%s/%s.%zd.sancov", common_flags()->coverage_dir,
module_name, internal_getpid());
path.append("%s/%s.%zd.sancov", coverage_dir, module_name,
internal_getpid());
int fd = CovOpenFile(false /* packed */, module_name);
if (fd > 0) {
internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
@ -506,13 +613,12 @@ static void CovDump() {
if (cov_fd >= 0)
internal_close(cov_fd);
coverage_data.DumpCallerCalleePairs();
coverage_data.DumpTrace();
#endif // !SANITIZER_WINDOWS
}
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
if (!args) return;
if (!common_flags()->coverage) return;
if (!coverage_enabled) return;
cov_sandboxed = args->coverage_sandboxed;
if (!cov_sandboxed) return;
cov_fd = args->coverage_fd;
@ -524,7 +630,7 @@ void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
int MaybeOpenCovFile(const char *name) {
CHECK(name);
if (!common_flags()->coverage) return -1;
if (!coverage_enabled) return -1;
return CovOpenFile(true /* packed */, name);
}
@ -536,28 +642,60 @@ void CovAfterFork(int child_pid) {
coverage_data.AfterFork(child_pid);
}
void InitializeCoverage(bool enabled, const char *dir) {
if (coverage_enabled)
return; // May happen if two sanitizer enable coverage in the same process.
coverage_enabled = enabled;
coverage_dir = dir;
coverage_data.Init();
if (enabled) coverage_data.Enable();
#if !SANITIZER_WINDOWS
if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
#endif
}
void ReInitializeCoverage(bool enabled, const char *dir) {
coverage_enabled = enabled;
coverage_dir = dir;
coverage_data.ReInit();
}
void CoverageUpdateMapping() {
if (coverage_enabled)
CovUpdateMapping(coverage_dir);
}
} // namespace __sanitizer
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u8 *guard) {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
guard);
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
if (__sanitizer::atomic_load(atomic_guard, memory_order_relaxed))
__sanitizer_cov(guard);
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
callee, callee_cache16, 16);
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
coverage_enabled = true;
coverage_dir = common_flags()->coverage_dir;
coverage_data.Init();
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) {
if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
if (SANITIZER_ANDROID) {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_module_init(s32 *guards, uptr npcs, const char *module_name) {
coverage_data.InitializeGuards(guards, npcs, module_name);
if (!common_flags()->coverage_direct) return;
if (SANITIZER_ANDROID && coverage_enabled) {
// dlopen/dlclose interceptors do not work on Android, so we rely on
// Extend() calls to update .sancov.map.
CovUpdateMapping(GET_CALLER_PC());
CovUpdateMapping(coverage_dir, GET_CALLER_PC());
}
coverage_data.Extend(npcs);
}
@ -571,11 +709,23 @@ uptr __sanitizer_get_total_unique_coverage() {
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov_trace_func_enter(uptr *cache) {
coverage_data.TraceBasicaBlock(cache);
void __sanitizer_cov_trace_func_enter(s32 *id) {
coverage_data.TraceBasicBlock(id);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cov_trace_basic_block(uptr *cache) {
coverage_data.TraceBasicaBlock(cache);
void __sanitizer_cov_trace_basic_block(s32 *id) {
coverage_data.TraceBasicBlock(id);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_reset_coverage() {
coverage_data.ReinitializeGuards();
internal_bzero_aligned16(
coverage_data.data(),
RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_coverage_guards(uptr **data) {
*data = coverage_data.data();
return coverage_data.size();
}
} // extern "C"

View File

@ -62,8 +62,8 @@ struct CachedMapping {
static CachedMapping cached_mapping;
static StaticSpinMutex mapping_mu;
void CovUpdateMapping(uptr caller_pc) {
if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
if (!common_flags()->coverage_direct) return;
SpinMutexLock l(&mapping_mu);
@ -71,36 +71,41 @@ void CovUpdateMapping(uptr caller_pc) {
return;
InternalScopedString text(kMaxTextSize);
InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
CHECK(modules.data());
int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
/* filter */ 0);
text.append("%d\n", sizeof(uptr) * 8);
for (int i = 0; i < n_modules; ++i) {
const char *module_name = StripModuleName(modules[i].full_name());
for (unsigned j = 0; j < modules[i].n_ranges(); ++j) {
if (modules[i].address_range_executable(j)) {
uptr start = modules[i].address_range_start(j);
uptr end = modules[i].address_range_end(j);
uptr base = modules[i].base_address();
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
if (caller_pc && caller_pc >= start && caller_pc < end)
cached_mapping.SetModuleRange(start, end);
{
InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
CHECK(modules.data());
int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
/* filter */ 0);
text.append("%d\n", sizeof(uptr) * 8);
for (int i = 0; i < n_modules; ++i) {
const char *module_name = StripModuleName(modules[i].full_name());
uptr base = modules[i].base_address();
for (auto iter = modules[i].ranges(); iter.hasNext();) {
const auto *range = iter.next();
if (range->executable) {
uptr start = range->beg;
uptr end = range->end;
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
if (caller_pc && caller_pc >= start && caller_pc < end)
cached_mapping.SetModuleRange(start, end);
}
}
modules[i].clear();
}
}
int err;
InternalScopedString tmp_path(64 +
internal_strlen(common_flags()->coverage_dir));
InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
"%s/%zd.sancov.map.tmp", common_flags()->coverage_dir,
internal_getpid());
"%s/%zd.sancov.map.tmp", coverage_dir,
internal_getpid());
CHECK_LE(res, tmp_path.size());
uptr map_fd = OpenFile(tmp_path.data(), true);
if (internal_iserror(map_fd)) {
Report(" Coverage: failed to open %s for writing\n", tmp_path.data());
if (internal_iserror(map_fd, &err)) {
Report(" Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
err);
Die();
}
@ -111,9 +116,9 @@ void CovUpdateMapping(uptr caller_pc) {
}
internal_close(map_fd);
InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir));
InternalScopedString path(64 + internal_strlen(coverage_dir));
res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
common_flags()->coverage_dir, internal_getpid());
coverage_dir, internal_getpid());
CHECK_LE(res, path.size());
res = internal_rename(tmp_path.data(), path.data());
if (internal_iserror(res, &err)) {

View File

@ -50,6 +50,8 @@ class DeadlockDetectorTLS {
if (epoch_ == current_epoch) return;
bv_.clear();
epoch_ = current_epoch;
n_recursive_locks = 0;
n_all_locks_ = 0;
}
uptr getEpoch() const { return epoch_; }
@ -83,7 +85,8 @@ class DeadlockDetectorTLS {
}
}
// Printf("remLock: %zx %zx\n", lock_id, epoch_);
CHECK(bv_.clearBit(lock_id));
if (!bv_.clearBit(lock_id))
return; // probably addLock happened before flush
if (n_all_locks_) {
for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
@ -175,6 +178,7 @@ class DeadlockDetector {
recycled_nodes_.clear();
available_nodes_.setAll();
g_.clear();
n_edges_ = 0;
return getAvailableNode(data);
}

View File

@ -0,0 +1,153 @@
//===-- sanitizer_flag_parser.cc ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_flag_parser.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_flags.h"
#include "sanitizer_flag_parser.h"
namespace __sanitizer {
LowLevelAllocator FlagParser::Alloc;
class UnknownFlags {
static const int kMaxUnknownFlags = 20;
const char *unknown_flags_[kMaxUnknownFlags];
int n_unknown_flags_;
public:
void Add(const char *name) {
CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
unknown_flags_[n_unknown_flags_++] = name;
}
void Report() {
if (!n_unknown_flags_) return;
Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
for (int i = 0; i < n_unknown_flags_; ++i)
Printf(" %s\n", unknown_flags_[i]);
n_unknown_flags_ = 0;
}
};
UnknownFlags unknown_flags;
void ReportUnrecognizedFlags() {
unknown_flags.Report();
}
char *FlagParser::ll_strndup(const char *s, uptr n) {
uptr len = internal_strnlen(s, n);
char *s2 = (char*)Alloc.Allocate(len + 1);
internal_memcpy(s2, s, len);
s2[len] = 0;
return s2;
}
void FlagParser::PrintFlagDescriptions() {
Printf("Available flags for %s:\n", SanitizerToolName);
for (int i = 0; i < n_flags_; ++i)
Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
}
void FlagParser::fatal_error(const char *err) {
Printf("ERROR: %s\n", err);
Die();
}
bool FlagParser::is_space(char c) {
return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
c == '\r';
}
void FlagParser::skip_whitespace() {
while (is_space(buf_[pos_])) ++pos_;
}
void FlagParser::parse_flag() {
uptr name_start = pos_;
while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
if (buf_[pos_] != '=') fatal_error("expected '='");
char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
uptr value_start = ++pos_;
char *value;
if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
char quote = buf_[pos_++];
while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
if (buf_[pos_] == 0) fatal_error("unterminated string");
value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
++pos_; // consume the closing quote
} else {
while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
fatal_error("expected separator or eol");
value = ll_strndup(buf_ + value_start, pos_ - value_start);
}
bool res = run_handler(name, value);
if (!res) fatal_error("Flag parsing failed.");
}
void FlagParser::parse_flags() {
while (true) {
skip_whitespace();
if (buf_[pos_] == 0) break;
parse_flag();
}
// Do a sanity check for certain flags.
if (common_flags_dont_use.malloc_context_size < 1)
common_flags_dont_use.malloc_context_size = 1;
}
void FlagParser::ParseString(const char *s) {
if (!s) return;
// Backup current parser state to allow nested ParseString() calls.
const char *old_buf_ = buf_;
uptr old_pos_ = pos_;
buf_ = s;
pos_ = 0;
parse_flags();
buf_ = old_buf_;
pos_ = old_pos_;
}
bool FlagParser::run_handler(const char *name, const char *value) {
for (int i = 0; i < n_flags_; ++i) {
if (internal_strcmp(name, flags_[i].name) == 0)
return flags_[i].handler->Parse(value);
}
// Unrecognized flag. This is not a fatal error, we may print a warning later.
unknown_flags.Add(name);
return true;
}
void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
const char *desc) {
CHECK_LT(n_flags_, kMaxFlags);
flags_[n_flags_].name = name;
flags_[n_flags_].desc = desc;
flags_[n_flags_].handler = handler;
++n_flags_;
}
FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
}
} // namespace __sanitizer

View File

@ -0,0 +1,121 @@
//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_FLAG_REGISTRY_H
#define SANITIZER_FLAG_REGISTRY_H
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_common.h"
namespace __sanitizer {
class FlagHandlerBase {
public:
virtual bool Parse(const char *value) { return false; }
};
template <typename T>
class FlagHandler : public FlagHandlerBase {
T *t_;
public:
explicit FlagHandler(T *t) : t_(t) {}
bool Parse(const char *value) final;
};
template <>
inline bool FlagHandler<bool>::Parse(const char *value) {
if (internal_strcmp(value, "0") == 0 ||
internal_strcmp(value, "no") == 0 ||
internal_strcmp(value, "false") == 0) {
*t_ = false;
return true;
}
if (internal_strcmp(value, "1") == 0 ||
internal_strcmp(value, "yes") == 0 ||
internal_strcmp(value, "true") == 0) {
*t_ = true;
return true;
}
Printf("ERROR: Invalid value for bool option: '%s'\n", value);
return false;
}
template <>
inline bool FlagHandler<const char *>::Parse(const char *value) {
*t_ = internal_strdup(value);
return true;
}
template <>
inline bool FlagHandler<int>::Parse(const char *value) {
char *value_end;
*t_ = internal_simple_strtoll(value, &value_end, 10);
bool ok = *value_end == 0;
if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
return ok;
}
template <>
inline bool FlagHandler<uptr>::Parse(const char *value) {
char *value_end;
*t_ = internal_simple_strtoll(value, &value_end, 10);
bool ok = *value_end == 0;
if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
return ok;
}
class FlagParser {
static const int kMaxFlags = 200;
struct Flag {
const char *name;
const char *desc;
FlagHandlerBase *handler;
} *flags_;
int n_flags_;
const char *buf_;
uptr pos_;
public:
FlagParser();
void RegisterHandler(const char *name, FlagHandlerBase *handler,
const char *desc);
void ParseString(const char *s);
void PrintFlagDescriptions();
static LowLevelAllocator Alloc;
private:
void fatal_error(const char *err);
bool is_space(char c);
void skip_whitespace();
void parse_flags();
void parse_flag();
bool run_handler(const char *name, const char *value);
char *ll_strndup(const char *s, uptr n);
};
template <typename T>
static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
T *var) {
FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var); // NOLINT
parser->RegisterHandler(name, fh, desc);
}
void ReportUnrecognizedFlags();
} // namespace __sanitizer
#endif // SANITIZER_FLAG_REGISTRY_H

View File

@ -16,6 +16,7 @@
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
#include "sanitizer_flag_parser.h"
namespace __sanitizer {
@ -34,274 +35,53 @@ IntrusiveList<FlagDescription> flag_descriptions;
# define SANITIZER_NEEDS_SEGV 1
#endif
void SetCommonFlagsDefaults(CommonFlags *f) {
f->symbolize = true;
f->external_symbolizer_path = 0;
f->allow_addr2line = false;
f->strip_path_prefix = "";
f->fast_unwind_on_check = false;
f->fast_unwind_on_fatal = false;
f->fast_unwind_on_malloc = true;
f->handle_ioctl = false;
f->malloc_context_size = 1;
f->log_path = "stderr";
f->verbosity = 0;
f->detect_leaks = true;
f->leak_check_at_exit = true;
f->allocator_may_return_null = false;
f->print_summary = true;
f->check_printf = true;
// TODO(glider): tools may want to set different defaults for handle_segv.
f->handle_segv = SANITIZER_NEEDS_SEGV;
f->allow_user_segv_handler = false;
f->use_sigaltstack = true;
f->detect_deadlocks = false;
f->clear_shadow_mmap_threshold = 64 * 1024;
f->color = "auto";
f->legacy_pthread_cond = false;
f->intercept_tls_get_addr = false;
f->coverage = false;
f->coverage_direct = SANITIZER_ANDROID;
f->coverage_dir = ".";
f->full_address_space = false;
f->suppressions = "";
f->print_suppressions = true;
f->disable_coredump = (SANITIZER_WORDSIZE == 64);
f->symbolize_inline_frames = true;
f->stack_trace_format = "DEFAULT";
void CommonFlags::SetDefaults() {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "sanitizer_flags.inc"
#undef COMMON_FLAG
}
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
ParseFlag(str, &f->symbolize, "symbolize",
"If set, use the online symbolizer from common sanitizer runtime to turn "
"virtual addresses to file/line locations.");
ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path",
"Path to external symbolizer. If empty, the tool will search $PATH for "
"the symbolizer.");
ParseFlag(str, &f->allow_addr2line, "allow_addr2line",
"If set, allows online symbolizer to run addr2line binary to symbolize "
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
"unavailable.");
ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix",
"Strips this prefix from file paths in error reports.");
ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check",
"If available, use the fast frame-pointer-based unwinder on "
"internal CHECK failures.");
ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal",
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.");
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc",
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.");
ParseFlag(str, &f->handle_ioctl, "handle_ioctl",
"Intercept and handle ioctl requests.");
ParseFlag(str, &f->malloc_context_size, "malloc_context_size",
"Max number of stack frames kept for each allocation/deallocation.");
ParseFlag(str, &f->log_path, "log_path",
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
"\"stderr\". The default is \"stderr\".");
ParseFlag(str, &f->verbosity, "verbosity",
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).");
ParseFlag(str, &f->detect_leaks, "detect_leaks",
"Enable memory leak detection.");
ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit",
"Invoke leak checking in an atexit handler. Has no effect if "
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
"handler has a chance to run.");
ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null",
"If false, the allocator will crash instead of returning 0 on "
"out-of-memory.");
ParseFlag(str, &f->print_summary, "print_summary",
"If false, disable printing error summaries in addition to error "
"reports.");
ParseFlag(str, &f->check_printf, "check_printf",
"Check printf arguments.");
ParseFlag(str, &f->handle_segv, "handle_segv",
"If set, registers the tool's custom SEGV handler (both SIGBUS and "
"SIGSEGV on OSX).");
ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler",
"If set, allows user to register a SEGV handler even if the tool "
"registers one.");
ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack",
"If set, uses alternate stack for signal handling.");
ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks",
"If set, deadlock detection is enabled.");
ParseFlag(str, &f->clear_shadow_mmap_threshold,
"clear_shadow_mmap_threshold",
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
"memset(). This is the threshold size in bytes.");
ParseFlag(str, &f->color, "color",
"Colorize reports: (always|never|auto).");
ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond",
"Enables support for dynamic libraries linked with libpthread 2.2.5.");
ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr",
"Intercept __tls_get_addr.");
ParseFlag(str, &f->help, "help", "Print the flag descriptions.");
ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb",
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
"not a user-facing flag, used mosly for testing the tools");
ParseFlag(str, &f->coverage, "coverage",
"If set, coverage information will be dumped at program shutdown (if the "
"coverage instrumentation was enabled at compile time).");
ParseFlag(str, &f->coverage_direct, "coverage_direct",
"If set, coverage information will be dumped directly to a memory "
"mapped file. This way data is not lost even if the process is "
"suddenly killed.");
ParseFlag(str, &f->coverage_dir, "coverage_dir",
"Target directory for coverage dumps. Defaults to the current "
"directory.");
ParseFlag(str, &f->full_address_space, "full_address_space",
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized");
ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name.");
ParseFlag(str, &f->print_suppressions, "print_suppressions",
"Print matched suppressions at exit.");
ParseFlag(str, &f->disable_coredump, "disable_coredump",
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
"dumping a 16T+ core file. Ignored on OSes that don't dump core by"
"default and for sanitizers that don't reserve lots of virtual memory.");
ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames",
"Print inlined frames in stacktraces. Defaults to true.");
ParseFlag(str, &f->stack_trace_format, "stack_trace_format",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
f->malloc_context_size = 1;
void CommonFlags::CopyFrom(const CommonFlags &other) {
internal_memcpy(this, &other, sizeof(*this));
}
static bool GetFlagValue(const char *env, const char *name,
const char **value, int *value_length) {
if (env == 0)
return false;
const char *pos = 0;
for (;;) {
pos = internal_strstr(env, name);
if (pos == 0)
class FlagHandlerInclude : public FlagHandlerBase {
static const uptr kMaxIncludeSize = 1 << 15;
FlagParser *parser_;
public:
explicit FlagHandlerInclude(FlagParser *parser) : parser_(parser) {}
bool Parse(const char *value) final {
char *data;
uptr data_mapped_size;
int err;
uptr len =
ReadFileToBuffer(value, &data, &data_mapped_size,
Max(kMaxIncludeSize, GetPageSizeCached()), &err);
if (!len) {
Printf("Failed to read options from '%s': error %d\n", value, err);
return false;
const char *name_end = pos + internal_strlen(name);
if ((pos != env &&
((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) ||
*name_end != '=') {
// Seems to be middle of another flag name or value.
env = pos + 1;
continue;
}
pos = name_end;
break;
parser_->ParseString(data);
UnmapOrDie(data, data_mapped_size);
return true;
}
const char *end;
if (pos[0] != '=') {
end = pos;
} else {
pos += 1;
if (pos[0] == '"') {
pos += 1;
end = internal_strchr(pos, '"');
} else if (pos[0] == '\'') {
pos += 1;
end = internal_strchr(pos, '\'');
} else {
// Read until the next space or colon.
end = pos + internal_strcspn(pos, " :");
}
if (end == 0)
end = pos + internal_strlen(pos);
}
*value = pos;
*value_length = end - pos;
return true;
};
void RegisterIncludeFlag(FlagParser *parser, CommonFlags *cf) {
FlagHandlerInclude *fh_include =
new (FlagParser::Alloc) FlagHandlerInclude(parser); // NOLINT
parser->RegisterHandler("include", fh_include,
"read more options from the given file");
}
static bool StartsWith(const char *flag, int flag_length, const char *value) {
if (!flag || !value)
return false;
int value_length = internal_strlen(value);
return (flag_length >= value_length) &&
(0 == internal_strncmp(flag, value, value_length));
}
void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &cf->Name);
#include "sanitizer_flags.inc"
#undef COMMON_FLAG
static LowLevelAllocator allocator_for_flags;
// The linear scan is suboptimal, but the number of flags is relatively small.
bool FlagInDescriptionList(const char *name) {
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
while (it.hasNext()) {
if (!internal_strcmp(it.next()->name, name)) return true;
}
return false;
}
void AddFlagDescription(const char *name, const char *description) {
if (FlagInDescriptionList(name)) return;
FlagDescription *new_description = new(allocator_for_flags) FlagDescription;
new_description->name = name;
new_description->description = description;
flag_descriptions.push_back(new_description);
}
// TODO(glider): put the descriptions inside CommonFlags.
void PrintFlagDescriptions() {
IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions);
Printf("Available flags for %s:\n", SanitizerToolName);
while (it.hasNext()) {
FlagDescription *descr = it.next();
Printf("\t%s\n\t\t- %s\n", descr->name, descr->description);
}
}
void ParseFlag(const char *env, bool *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
if (StartsWith(value, value_length, "0") ||
StartsWith(value, value_length, "no") ||
StartsWith(value, value_length, "false"))
*flag = false;
if (StartsWith(value, value_length, "1") ||
StartsWith(value, value_length, "yes") ||
StartsWith(value, value_length, "true"))
*flag = true;
}
void ParseFlag(const char *env, int *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<int>(internal_atoll(value));
}
void ParseFlag(const char *env, uptr *flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
*flag = static_cast<uptr>(internal_atoll(value));
}
void ParseFlag(const char *env, const char **flag,
const char *name, const char *descr) {
const char *value;
int value_length;
AddFlagDescription(name, descr);
if (!GetFlagValue(env, name, &value, &value_length))
return;
// Copy the flag value. Don't use locks here, as flags are parsed at
// tool startup.
char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1));
internal_memcpy(value_copy, value, value_length);
value_copy[value_length] = '\0';
*flag = value_copy;
RegisterIncludeFlag(parser, cf);
}
} // namespace __sanitizer

View File

@ -18,62 +18,38 @@
namespace __sanitizer {
void ParseFlag(const char *env, bool *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, int *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, uptr *flag,
const char *name, const char *descr);
void ParseFlag(const char *env, const char **flag,
const char *name, const char *descr);
struct CommonFlags {
bool symbolize;
const char *external_symbolizer_path;
bool allow_addr2line;
const char *strip_path_prefix;
bool fast_unwind_on_check;
bool fast_unwind_on_fatal;
bool fast_unwind_on_malloc;
bool handle_ioctl;
int malloc_context_size;
const char *log_path;
int verbosity;
bool detect_leaks;
bool leak_check_at_exit;
bool allocator_may_return_null;
bool print_summary;
bool check_printf;
bool handle_segv;
bool allow_user_segv_handler;
bool use_sigaltstack;
bool detect_deadlocks;
uptr clear_shadow_mmap_threshold;
const char *color;
bool legacy_pthread_cond;
bool intercept_tls_get_addr;
bool help;
uptr mmap_limit_mb;
bool coverage;
bool coverage_direct;
const char *coverage_dir;
bool full_address_space;
const char *suppressions;
bool print_suppressions;
bool disable_coredump;
bool symbolize_inline_frames;
const char *stack_trace_format;
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "sanitizer_flags.inc"
#undef COMMON_FLAG
void SetDefaults();
void CopyFrom(const CommonFlags &other);
};
inline CommonFlags *common_flags() {
extern CommonFlags common_flags_dont_use;
// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
extern CommonFlags common_flags_dont_use;
inline const CommonFlags *common_flags() {
return &common_flags_dont_use;
}
void SetCommonFlagsDefaults(CommonFlags *f);
void ParseCommonFlagsFromString(CommonFlags *f, const char *str);
void PrintFlagDescriptions();
inline void SetCommonFlagsDefaults() {
common_flags_dont_use.SetDefaults();
}
// This function can only be used to setup tool-specific overrides for
// CommonFlags defaults. Generally, it should only be used right after
// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
// only during the flags initialization (i.e. before they are used for
// the first time).
inline void OverrideCommonFlags(const CommonFlags &cf) {
common_flags_dont_use.CopyFrom(cf);
}
class FlagParser;
void RegisterCommonFlags(FlagParser *parser,
CommonFlags *cf = &common_flags_dont_use);
void RegisterIncludeFlag(FlagParser *parser, CommonFlags *cf);
} // namespace __sanitizer
#endif // SANITIZER_FLAGS_H

View File

@ -0,0 +1,148 @@
//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file describes common flags available in all sanitizers.
//
//===----------------------------------------------------------------------===//
#ifndef COMMON_FLAG
#error "Define COMMON_FLAG prior to including this file!"
#endif
// COMMON_FLAG(Type, Name, DefaultValue, Description)
// Supported types: bool, const char *, int, uptr.
// Default value must be a compile-time constant.
// Description must be a string literal.
COMMON_FLAG(
bool, symbolize, true,
"If set, use the online symbolizer from common sanitizer runtime to turn "
"virtual addresses to file/line locations.")
COMMON_FLAG(
const char *, external_symbolizer_path, 0,
"Path to external symbolizer. If empty, the tool will search $PATH for "
"the symbolizer.")
COMMON_FLAG(
bool, allow_addr2line, false,
"If set, allows online symbolizer to run addr2line binary to symbolize "
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
"unavailable.")
COMMON_FLAG(const char *, strip_path_prefix, "",
"Strips this prefix from file paths in error reports.")
COMMON_FLAG(bool, fast_unwind_on_check, false,
"If available, use the fast frame-pointer-based unwinder on "
"internal CHECK failures.")
COMMON_FLAG(bool, fast_unwind_on_fatal, false,
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.")
COMMON_FLAG(bool, fast_unwind_on_malloc, true,
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.")
COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
COMMON_FLAG(int, malloc_context_size, 1,
"Max number of stack frames kept for each allocation/deallocation.")
COMMON_FLAG(
const char *, log_path, "stderr",
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
"\"stderr\". The default is \"stderr\".")
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
COMMON_FLAG(
bool, leak_check_at_exit, true,
"Invoke leak checking in an atexit handler. Has no effect if "
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
"handler has a chance to run.")
COMMON_FLAG(bool, allocator_may_return_null, false,
"If false, the allocator will crash instead of returning 0 on "
"out-of-memory.")
COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
"If set, registers the tool's custom SEGV handler (both SIGBUS and "
"SIGSEGV on OSX).")
COMMON_FLAG(bool, allow_user_segv_handler, false,
"If set, allows user to register a SEGV handler even if the tool "
"registers one.")
COMMON_FLAG(bool, use_sigaltstack, true,
"If set, uses alternate stack for signal handling.")
COMMON_FLAG(bool, detect_deadlocks, false,
"If set, deadlock detection is enabled.")
COMMON_FLAG(
uptr, clear_shadow_mmap_threshold, 64 * 1024,
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
"memset(). This is the threshold size in bytes.")
COMMON_FLAG(const char *, color, "auto",
"Colorize reports: (always|never|auto).")
COMMON_FLAG(
bool, legacy_pthread_cond, false,
"Enables support for dynamic libraries linked with libpthread 2.2.5.")
COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
COMMON_FLAG(uptr, mmap_limit_mb, 0,
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
"not a user-facing flag, used mosly for testing the tools")
COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
"Hard RSS limit in Mb."
" If non-zero, a background thread is spawned at startup"
" which periodically reads RSS and aborts the process if the"
" limit is reached")
COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
"Soft RSS limit in Mb."
" If non-zero, a background thread is spawned at startup"
" which periodically reads RSS. If the limit is reached"
" all subsequent malloc/new calls will fail or return NULL"
" (depending on the value of allocator_may_return_null)"
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
COMMON_FLAG(
bool, coverage, false,
"If set, coverage information will be dumped at program shutdown (if the "
"coverage instrumentation was enabled at compile time).")
// On by default, but works only if coverage == true.
COMMON_FLAG(bool, coverage_pcs, true,
"If set (and if 'coverage' is set too), the coverage information "
"will be dumped as a set of PC offsets for every module.")
COMMON_FLAG(bool, coverage_bitset, false,
"If set (and if 'coverage' is set too), the coverage information "
"will also be dumped as a bitset to a separate file.")
COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
"If set, coverage information will be dumped directly to a memory "
"mapped file. This way data is not lost even if the process is "
"suddenly killed.")
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
COMMON_FLAG(bool, print_suppressions, true,
"Print matched suppressions at exit.")
COMMON_FLAG(
bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
"Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
"dumping a 16T+ core file. Ignored on OSes that don't dump core by"
"default and for sanitizers that don't reserve lots of virtual memory.")
COMMON_FLAG(bool, use_madv_dontdump, true,
"If set, instructs kernel to not store the (huge) shadow "
"in core file.")
COMMON_FLAG(bool, symbolize_inline_frames, true,
"Print inlined frames in stacktraces. Defaults to true.")
COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.")
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
"If true, the shadow is not allowed to use huge pages. ")

View File

@ -0,0 +1,58 @@
//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is shared between run-time libraries of sanitizers.
//
// This header declares the sanitizer runtime interface functions.
// The runtime library has to define these functions so the instrumented program
// could call them.
//
// See also include/sanitizer/common_interface_defs.h
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_INTERFACE_INTERNAL_H
#define SANITIZER_INTERFACE_INTERNAL_H
#include "sanitizer_internal_defs.h"
extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
typedef struct {
int coverage_sandboxed;
__sanitizer::sptr coverage_fd;
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Notify the tools that the sandbox is going to be turned on.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H

View File

@ -15,6 +15,10 @@
#include "sanitizer_platform.h"
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
#endif
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
#if SANITIZER_WINDOWS
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
@ -48,11 +52,6 @@
# define __has_feature(x) 0
#endif
// Enable sanitizer compilation for pre-C++11
#if __cplusplus < 201103L
#define nullptr 0
#endif
// For portability reasons we do not include stddef.h, stdint.h or any other
// system header, but we do need some basic types that are not defined
// in a portable way by the language itself.
@ -86,8 +85,9 @@ typedef int fd_t;
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
// like pread and mmap, as opposed to pread64 and mmap64.
// Mac and Linux/x86-64 are special.
#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__))
// FreeBSD, Mac and Linux/x86-64 are special.
#if SANITIZER_FREEBSD || SANITIZER_MAC || \
(SANITIZER_LINUX && defined(__x86_64__))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
@ -101,41 +101,6 @@ typedef u32 operator_new_size_type;
#endif
} // namespace __sanitizer
extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
typedef struct {
int coverage_sandboxed;
__sanitizer::sptr coverage_fd;
unsigned int coverage_max_block_size;
} __sanitizer_sandbox_arguments;
// Notify the tools that the sandbox is going to be turned on.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
// This function is called by the tool when it has just finished reporting
// an error. 'error_summary' is a one-line string that summarizes
// the error message. This function can be overridden by the client.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u8 *guard);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
const void *end,
const void *old_mid,
const void *new_mid);
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
const void *end);
} // extern "C"
using namespace __sanitizer; // NOLINT
// ----------- ATTENTION -------------
@ -245,7 +210,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
#if TSAN_DEBUG
#if SANITIZER_DEBUG
#define DCHECK(a) CHECK(a)
#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
#define DCHECK_NE(a, b) CHECK_NE(a, b)
@ -325,4 +290,11 @@ extern "C" void* _ReturnAddress(void);
} while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
}
// Forces the compiler to generate a frame pointer in the function.
#define ENABLE_FRAME_POINTER \
do { \
volatile uptr enable_fp; \
enable_fp = GET_CURRENT_FRAME(); \
} while (0)
#endif // SANITIZER_DEFS_H

View File

@ -28,6 +28,15 @@ void *internal_memchr(const void *s, int c, uptr n) {
return 0;
}
void *internal_memrchr(const void *s, int c, uptr n) {
const char *t = (const char *)s;
void *res = nullptr;
for (uptr i = 0; i < n; ++i, ++t) {
if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
}
return res;
}
int internal_memcmp(const void* s1, const void* s2, uptr n) {
const char *t1 = (const char *)s1;
const char *t2 = (const char *)s2;
@ -101,6 +110,14 @@ char* internal_strdup(const char *s) {
return s2;
}
char* internal_strndup(const char *s, uptr n) {
uptr len = internal_strnlen(s, n);
char *s2 = (char*)InternalAlloc(len + 1);
internal_memcpy(s2, s, len);
s2[len] = 0;
return s2;
}
int internal_strcmp(const char *s1, const char *s2) {
while (true) {
unsigned c1 = *s1;

View File

@ -26,6 +26,7 @@ namespace __sanitizer {
// String functions
s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n);
@ -38,6 +39,7 @@ char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
uptr internal_strcspn(const char *s, const char *reject);
char *internal_strdup(const char *s);
char *internal_strndup(const char *s, uptr n);
uptr internal_strlen(const char *s);
char *internal_strncat(char *dst, const char *src, uptr n);
int internal_strncmp(const char *s1, const char *s2, uptr n);
@ -98,6 +100,25 @@ int internal_fork();
// Threading
uptr internal_sched_yield();
// These functions call appropriate pthread_ functions directly, bypassing
// the interceptor. They are weak and may not be present in some tools.
SANITIZER_WEAK_ATTRIBUTE
int real_pthread_create(void *th, void *attr, void *(*callback)(void *),
void *param);
SANITIZER_WEAK_ATTRIBUTE
int real_pthread_join(void *th, void **ret);
#define DEFINE_REAL_PTHREAD_FUNCTIONS \
namespace __sanitizer { \
int real_pthread_create(void *th, void *attr, void *(*callback)(void *), \
void *param) { \
return REAL(pthread_create)(th, attr, callback, param); \
} \
int real_pthread_join(void *th, void **ret) { \
return REAL(pthread_join(th, ret)); \
} \
} // namespace __sanitizer
// Error handling
bool internal_iserror(uptr retval, int *rverrno = 0);

View File

@ -19,24 +19,18 @@ namespace __sanitizer {
LibIgnore::LibIgnore(LinkerInitialized) {
}
void LibIgnore::Init(const SuppressionContext &supp) {
void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
BlockingMutexLock lock(&mutex_);
CHECK_EQ(count_, 0);
const uptr n = supp.SuppressionCount();
for (uptr i = 0; i < n; i++) {
const Suppression *s = supp.SuppressionAt(i);
if (s->type != SuppressionLib)
continue;
if (count_ >= kMaxLibs) {
Report("%s: too many called_from_lib suppressions (max: %d)\n",
SanitizerToolName, kMaxLibs);
Die();
}
Lib *lib = &libs_[count_++];
lib->templ = internal_strdup(s->templ);
lib->name = 0;
lib->loaded = false;
if (count_ >= kMaxLibs) {
Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
kMaxLibs);
Die();
}
Lib *lib = &libs_[count_++];
lib->templ = internal_strdup(name_templ);
lib->name = nullptr;
lib->real_name = nullptr;
lib->loaded = false;
}
void LibIgnore::OnLibraryLoaded(const char *name) {

View File

@ -8,8 +8,8 @@
//===----------------------------------------------------------------------===//
//
// LibIgnore allows to ignore all interceptors called from a particular set
// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions
// from the provided SuppressionContext; finds code ranges for the libraries;
// of dynamic libraries. LibIgnore can be initialized with several templates
// of names of libraries to be ignored. It finds code ranges for the libraries;
// and checks whether the provided PC value belongs to the code ranges.
//
//===----------------------------------------------------------------------===//
@ -19,7 +19,6 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_suppressions.h"
#include "sanitizer_atomic.h"
#include "sanitizer_mutex.h"
@ -29,8 +28,8 @@ class LibIgnore {
public:
explicit LibIgnore(LinkerInitialized);
// Fetches all "called_from_lib" suppressions from the SuppressionContext.
void Init(const SuppressionContext &supp);
// Must be called during initialization.
void AddIgnoredLibrary(const char *name_templ);
// Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name);

View File

@ -31,6 +31,17 @@
#include <asm/param.h>
#endif
// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
// access stat from asm/stat.h, without conflicting with definition in
// sys/stat.h, we use this trick.
#if defined(__mips64)
#include <sys/types.h>
#define stat kernel_stat
#include <asm/stat.h>
#undef stat
#endif
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
@ -98,14 +109,16 @@ namespace __sanitizer {
#endif
// --------------- sanitizer_libc.h
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset) {
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
u64 offset) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
#else
// mmap2 specifies file offset in 4096-byte units.
CHECK(IsAligned(offset, 4096));
return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
offset);
offset / 4096);
#endif
}
@ -179,6 +192,26 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
}
#endif
#if defined(__mips64)
static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
out->st_ino = in->st_ino;
out->st_mode = in->st_mode;
out->st_nlink = in->st_nlink;
out->st_uid = in->st_uid;
out->st_gid = in->st_gid;
out->st_rdev = in->st_rdev;
out->st_size = in->st_size;
out->st_blksize = in->st_blksize;
out->st_blocks = in->st_blocks;
out->st_atime = in->st_atime_nsec;
out->st_mtime = in->st_mtime_nsec;
out->st_ctime = in->st_ctime_nsec;
out->st_ino = in->st_ino;
}
#endif
uptr internal_stat(const char *path, void *buf) {
#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(stat), path, buf);
@ -186,7 +219,15 @@ uptr internal_stat(const char *path, void *buf) {
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, 0);
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
# if defined(__mips64)
// For mips64, stat syscall fills buffer in the format of kernel_stat
struct kernel_stat kbuf;
int res = internal_syscall(SYSCALL(stat), path, &kbuf);
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
return res;
# else
return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
# endif
#else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
@ -381,33 +422,6 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
}
#endif
uptr GetRSS() {
uptr fd = OpenFile("/proc/self/statm", false);
if ((sptr)fd < 0)
return 0;
char buf[64];
uptr len = internal_read(fd, buf, sizeof(buf) - 1);
internal_close(fd);
if ((sptr)len <= 0)
return 0;
buf[len] = 0;
// The format of the file is:
// 1084 89 69 11 0 79 0
// We need the second number which is RSS in 4K units.
char *pos = buf;
// Skip the first number.
while (*pos >= '0' && *pos <= '9')
pos++;
// Skip whitespaces.
while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
pos++;
// Read the number.
uptr rss = 0;
while (*pos >= '0' && *pos <= '9')
rss = rss * 10 + *pos++ - '0';
return rss * 4096;
}
static void GetArgsAndEnv(char*** argv, char*** envp) {
#if !SANITIZER_GO
if (&__libc_stack_end) {
@ -435,32 +449,18 @@ void ReExec() {
Die();
}
// Stub implementation of GetThreadStackAndTls for Go.
#if SANITIZER_GO
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
*tls_size = 0;
}
#endif // SANITIZER_GO
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
MtxSleeping = 2
};
BlockingMutex::BlockingMutex(LinkerInitialized) {
CHECK_EQ(owner_, 0);
}
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
void BlockingMutex::Lock() {
CHECK_EQ(owner_, 0);
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
return;
@ -760,6 +760,7 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
#if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
CHECK_NE(map, nullptr);
#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
@ -859,6 +860,13 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "rsp", "memory", "r11", "rcx");
return res;
}
#elif defined(__mips__)
// TODO(sagarthakur): clone function is to be rewritten in assembly.
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
return clone(fn, child_stack, flags, arg, parent_tidptr,
newtls, child_tidptr);
}
#endif // defined(__x86_64__) && SANITIZER_LINUX
#if SANITIZER_ANDROID
@ -896,9 +904,30 @@ void GetExtraActivationFlags(char *buf, uptr size) {
#endif
bool IsDeadlySignal(int signum) {
return (signum == SIGSEGV) && common_flags()->handle_segv;
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
#ifndef SANITIZER_GO
void *internal_start_thread(void(*func)(void *arg), void *arg) {
// Start the thread with signals blocked, otherwise it can steal user signals.
__sanitizer_sigset_t set, old;
internal_sigfillset(&set);
internal_sigprocmask(SIG_SETMASK, &set, &old);
void *th;
real_pthread_create(&th, 0, (void*(*)(void *arg))func, arg);
internal_sigprocmask(SIG_SETMASK, &old, 0);
return th;
}
void internal_join_thread(void *th) {
real_pthread_join(th, 0);
}
#else
void *internal_start_thread(void (*func)(void *), void *arg) { return 0; }
void internal_join_thread(void *th) {}
#endif
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -43,7 +43,7 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#if defined(__x86_64__)
#if defined(__x86_64__) || defined(__mips__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif

View File

@ -58,8 +58,10 @@ real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
} // extern "C"
static int my_pthread_attr_getstack(void *attr, void **addr, size_t *size) {
if (real_pthread_attr_getstack)
#if !SANITIZER_GO
if (&real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
#endif
return pthread_attr_getstack((pthread_attr_t *)attr, addr, size);
}
@ -67,8 +69,10 @@ SANITIZER_WEAK_ATTRIBUTE int
real_sigaction(int signum, const void *act, void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) {
if (real_sigaction)
#if !SANITIZER_GO
if (&real_sigaction)
return real_sigaction(signum, act, oldact);
#endif
return sigaction(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
}
@ -120,6 +124,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
*stack_bottom = (uptr)stackaddr;
}
#if !SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (f == 0)
@ -130,6 +135,7 @@ bool SetEnv(const char *name, const char *value) {
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
}
#endif
bool SanitizerSetThreadName(const char *name) {
#ifdef PR_SET_NAME
@ -162,8 +168,22 @@ static uptr g_tls_size;
# define DL_INTERNAL_FUNCTION
#endif
#if defined(__mips__)
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
const uptr kTcbHead = 16;
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
(ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
InitTlsSize();
g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
return kTlsPreTcbSize;
}
#endif
void InitTlsSize() {
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
get_tls_func get_tls;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
@ -178,7 +198,8 @@ void InitTlsSize() {
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID
}
#if (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__)) \
&& SANITIZER_LINUX
// sizeof(struct thread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
@ -186,6 +207,7 @@ uptr ThreadDescriptorSize() {
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
if (val)
return val;
#if defined(__x86_64__) || defined(__i386__)
#ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
@ -208,6 +230,8 @@ uptr ThreadDescriptorSize() {
val = FIRST_32_SECOND_64(1168, 1776);
else if (minor <= 12)
val = FIRST_32_SECOND_64(1168, 2288);
else if (minor == 13)
val = FIRST_32_SECOND_64(1168, 2304);
else
val = FIRST_32_SECOND_64(1216, 2304);
}
@ -215,6 +239,13 @@ uptr ThreadDescriptorSize() {
atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
return val;
}
#endif
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
if (val)
atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
return val;
#endif
return 0;
}
@ -232,12 +263,24 @@ uptr ThreadSelf() {
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__mips__)
// MIPS uses TLS variant I. The thread pointer (in hardware register $29)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
// TCB and the size of pthread_descr.
const uptr kTlsTcbOffset = 0x7000;
uptr thread_pointer;
asm volatile(".set push;\
.set mips64r2;\
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
# else
# error "unsupported CPU arch"
# endif
return descr_addr;
}
#endif // (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX
#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
#if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() {
@ -259,6 +302,7 @@ uptr ThreadSelf() {
}
#endif // SANITIZER_FREEBSD
#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX
# if defined(__x86_64__) || defined(__i386__)
@ -266,6 +310,9 @@ static void GetTls(uptr *addr, uptr *size) {
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
# elif defined(__mips__)
*addr = ThreadSelf();
*size = GetTlsSize();
# else
*addr = 0;
*size = 0;
@ -287,7 +334,9 @@ static void GetTls(uptr *addr, uptr *size) {
# error "Unknown OS"
#endif
}
#endif
#if !SANITIZER_GO
uptr GetTlsSize() {
#if SANITIZER_FREEBSD
uptr addr, size;
@ -297,9 +346,14 @@ uptr GetTlsSize() {
return g_tls_size;
#endif
}
#endif
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
#if SANITIZER_GO
// Stub implementation for Go.
*stk_addr = *stk_size = *tls_addr = *tls_size = 0;
#else
GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
@ -316,6 +370,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*tls_addr = *stk_addr + *stk_size;
}
}
#endif
}
void AdjustStackSize(void *attr_) {
@ -420,6 +475,45 @@ void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
#endif
}
// getrusage does not give us the current RSS, only the max RSS.
// Still, this is better than nothing if /proc/self/statm is not available
// for some reason, e.g. due to a sandbox.
static uptr GetRSSFromGetrusage() {
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox.
return 0;
return usage.ru_maxrss << 10; // ru_maxrss is in Kb.
}
uptr GetRSS() {
if (!common_flags()->can_use_proc_maps_statm)
return GetRSSFromGetrusage();
uptr fd = OpenFile("/proc/self/statm", false);
if ((sptr)fd < 0)
return GetRSSFromGetrusage();
char buf[64];
uptr len = internal_read(fd, buf, sizeof(buf) - 1);
internal_close(fd);
if ((sptr)len <= 0)
return 0;
buf[len] = 0;
// The format of the file is:
// 1084 89 69 11 0 79 0
// We need the second number which is RSS in pages.
char *pos = buf;
// Skip the first number.
while (*pos >= '0' && *pos <= '9')
pos++;
// Skip whitespaces.
while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
pos++;
// Read the number.
uptr rss = 0;
while (*pos >= '0' && *pos <= '9')
rss = rss * 10 + *pos++ - '0';
return rss * GetPageSizeCached();
}
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX

View File

@ -115,21 +115,25 @@ struct IntrusiveList {
}
}
class Iterator {
template<class ListTy, class ItemTy>
class IteratorBase {
public:
explicit Iterator(IntrusiveList<Item> *list)
explicit IteratorBase(ListTy *list)
: list_(list), current_(list->first_) { }
Item *next() {
Item *ret = current_;
ItemTy *next() {
ItemTy *ret = current_;
if (current_) current_ = current_->next;
return ret;
}
bool hasNext() const { return current_ != 0; }
private:
IntrusiveList<Item> *list_;
Item *current_;
ListTy *list_;
ItemTy *current_;
};
typedef IteratorBase<IntrusiveList<Item>, Item> Iterator;
typedef IteratorBase<const IntrusiveList<Item>, const Item> ConstIterator;
// private, don't use directly.
uptr size_;
Item *first_;

View File

@ -109,6 +109,10 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
return readlink(path, buf, bufsize);
}
uptr internal_unlink(const char *path) {
return unlink(path);
}
uptr internal_sched_yield() {
return sched_yield();
}
@ -213,10 +217,6 @@ uptr GetPageSize() {
return sysconf(_SC_PAGESIZE);
}
BlockingMutex::BlockingMutex(LinkerInitialized) {
// We assume that OS_SPINLOCK_INIT is zero
}
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
@ -298,7 +298,11 @@ MacosVersion GetMacosVersionInternal() {
case '2': return MACOS_VERSION_MOUNTAIN_LION;
case '3': return MACOS_VERSION_MAVERICKS;
case '4': return MACOS_VERSION_YOSEMITE;
default: return MACOS_VERSION_UNKNOWN;
default:
if (IsDigit(version[1]))
return MACOS_VERSION_UNKNOWN_NEWER;
else
return MACOS_VERSION_UNKNOWN;
}
}
default: return MACOS_VERSION_UNKNOWN;
@ -321,6 +325,9 @@ uptr GetRSS() {
return 0;
}
void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
void internal_join_thread(void *th) { }
} // namespace __sanitizer
#endif // SANITIZER_MAC

View File

@ -27,6 +27,7 @@ enum MacosVersion {
MACOS_VERSION_MOUNTAIN_LION,
MACOS_VERSION_MAVERICKS,
MACOS_VERSION_YOSEMITE,
MACOS_VERSION_UNKNOWN_NEWER
};
MacosVersion GetMacosVersion();

View File

@ -73,7 +73,13 @@ class SpinMutex : public StaticSpinMutex {
class BlockingMutex {
public:
#if SANITIZER_WINDOWS
// Windows does not currently support LinkerInitialized
explicit BlockingMutex(LinkerInitialized);
#else
explicit constexpr BlockingMutex(LinkerInitialized)
: opaque_storage_ {0, }, owner_(0) {}
#endif
BlockingMutex();
void Lock();
void Unlock();

View File

@ -57,7 +57,7 @@
#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MEMCHR 1
#define SANITIZER_INTERCEPT_MEMRCHR SI_LINUX
#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
@ -70,7 +70,7 @@
#define SANITIZER_INTERCEPT_READV SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WRITEV SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREADV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
@ -85,6 +85,7 @@
#ifndef SANITIZER_INTERCEPT_PRINTF
# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PRINTF_L SI_FREEBSD
# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
#endif
@ -93,12 +94,13 @@
#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPWENT \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPWENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPWENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
@ -109,10 +111,10 @@
#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
@ -125,7 +127,7 @@
#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined (__x86_64)) // NOLINT
(defined(__i386) || defined (__x86_64) || defined (__mips64)) // NOLINT
#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
@ -133,12 +135,15 @@
#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSNRTOMBS \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CONFSTR \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
@ -147,7 +152,8 @@
#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WORDEXP (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WORDEXP \
SI_FREEBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
@ -158,21 +164,22 @@
#define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ETHER_HOST SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ETHER_HOST \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ETHER_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SHMCTL \
((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64)
#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
SI_MAC || SI_LINUX_NOT_ANDROID
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS
@ -193,7 +200,7 @@
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
#define SANITIZER_INTERCEPT_LGAMMA_R SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_RAND_R SI_MAC || SI_LINUX_NOT_ANDROID

View File

@ -116,6 +116,9 @@
#if SANITIZER_LINUX || SANITIZER_FREEBSD
# include <utime.h>
# include <sys/ptrace.h>
# if defined(__mips64)
# include <asm/ptrace.h>
# endif
#endif
#if !SANITIZER_ANDROID
@ -139,6 +142,9 @@
#include <sys/shm.h>
#include <sys/statvfs.h>
#include <sys/timex.h>
#if defined(__mips64)
# include <sys/procfs.h>
#endif
#include <sys/user.h>
#include <sys/ustat.h>
#include <linux/cyclades.h>
@ -283,14 +289,19 @@ namespace __sanitizer {
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64))
(defined(__i386) || defined(__x86_64) || defined(__mips64))
#if defined(__mips64)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
#else
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#ifdef __x86_64
#endif // __mips64
#if (defined(__x86_64) || defined(__mips64))
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#endif
#endif // __x86_64 || __mips64
int ptrace_peektext = PTRACE_PEEKTEXT;
int ptrace_peekdata = PTRACE_PEEKDATA;
@ -1060,7 +1071,13 @@ CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
#ifndef __GLIBC_PREREQ
#define __GLIBC_PREREQ(x, y) 0
#endif
#if !defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21)
/* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
#endif
CHECK_TYPE_SIZE(shmid_ds);
CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);

View File

@ -18,6 +18,15 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD
// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that
// incroporates the map structure.
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
#else
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
#endif // !SANITIZER_FREEBSD
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
@ -169,7 +178,7 @@ namespace __sanitizer {
unsigned __seq;
u64 __unused1;
u64 __unused2;
#elif defined(__mips__)
#elif defined(__mips__) || defined(__aarch64__)
unsigned int mode;
unsigned short __seq;
unsigned short __pad1;
@ -538,6 +547,10 @@ namespace __sanitizer {
#if SANITIZER_FREEBSD
typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
#elif defined(__mips__)
struct __sanitizer_kernel_sigset_t {
u8 sig[16];
};
#else
struct __sanitizer_kernel_sigset_t {
u8 sig[8];
@ -686,7 +699,7 @@ namespace __sanitizer {
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64))
(defined(__i386) || defined(__x86_64) || defined(__mips64))
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;

Some files were not shown because too many files have changed in this diff Show More