Merge compiler-rt r291274.

This commit is contained in:
Dimitry Andric 2017-01-08 19:47:17 +00:00
commit f00b4812b9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang400-import/; revision=311697
234 changed files with 12259 additions and 3975 deletions

View File

@ -117,6 +117,16 @@ extern "C" {
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
// Symbolizes the supplied 'pc' using the format string 'fmt'.
// Outputs at most 'out_buf_size' bytes into 'out_buf'.
// The format syntax is described in
// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
size_t out_buf_size);
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
char *out_buf, size_t out_buf_size);
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __sanitizer_set_death_callback(void (*callback)(void));
@ -169,7 +179,16 @@ extern "C" {
// use-after-return detection.
void __sanitizer_start_switch_fiber(void **fake_stack_save,
const void *bottom, size_t size);
void __sanitizer_finish_switch_fiber(void *fake_stack_save);
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
const void **bottom_old,
size_t *size_old);
// Get full module name and calculate pc offset within it.
// Returns 1 if pc belongs to some module, 0 if module was not found.
int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
size_t module_path_len,
void **pc_offset);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -23,6 +23,11 @@ extern "C" {
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Dump collected coverage info. Sorts pcs by module into individual
// .sancov files.
void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
// Open <name>.sancov.packed in the coverage directory and return the file
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
// This is intended for use by sandboxing code.
@ -41,13 +46,6 @@ extern "C" {
// Some of the entries in *data will be zero.
uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
// Set *data to the growing buffer with covered PCs and return the size
// of the buffer. The entries are never zero.
// When only unique pcs are collected, the size is equal to
// __sanitizer_get_total_unique_coverage.
// WARNING: EXPERIMENTAL API.
uintptr_t __sanitizer_get_coverage_pc_buffer(uintptr_t **data);
// The coverage instrumentation may optionally provide imprecise counters.
// Rather than exposing the counter values to the user we instead map
// the counters to a bitset.
@ -65,6 +63,7 @@ extern "C" {
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
uintptr_t
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -0,0 +1,65 @@
//===-- xray_interface.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// APIs for controlling XRay functionality explicitly.
//===----------------------------------------------------------------------===//
#ifndef XRAY_XRAY_INTERFACE_H
#define XRAY_XRAY_INTERFACE_H
#include <cstdint>
extern "C" {
enum XRayEntryType { ENTRY = 0, EXIT = 1, TAIL = 2 };
// Provide a function to invoke for when instrumentation points are hit. This is
// a user-visible control surface that overrides the default implementation. The
// function provided should take the following arguments:
//
// - function id: an identifier that indicates the id of a function; this id
// is generated by xray; the mapping between the function id
// and the actual function pointer is available through
// __xray_table.
// - entry type: identifies what kind of instrumentation point was encountered
// (function entry, function exit, etc.). See the enum
// XRayEntryType for more details.
//
// The user handler must handle correctly spurious calls after this handler is
// removed or replaced with another handler, because it would be too costly for
// XRay runtime to avoid spurious calls.
// To prevent circular calling, the handler function itself and all its
// direct&indirect callees must not be instrumented with XRay, which can be
// achieved by marking them all with: __attribute__((xray_never_instrument))
//
// Returns 1 on success, 0 on error.
extern int __xray_set_handler(void (*entry)(int32_t, XRayEntryType));
// This removes whatever the currently provided handler is. Returns 1 on
// success, 0 on error.
extern int __xray_remove_handler();
enum XRayPatchingStatus {
NOT_INITIALIZED = 0,
SUCCESS = 1,
ONGOING = 2,
FAILED = 3,
};
// This tells XRay to patch the instrumentation points. See XRayPatchingStatus
// for possible result values.
extern XRayPatchingStatus __xray_patch();
// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible
// result values.
extern XRayPatchingStatus __xray_unpatch();
}
#endif

View File

@ -0,0 +1,80 @@
//===-- xray_records.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// This header exposes some record types useful for the XRay in-memory logging
// implementation.
//
//===----------------------------------------------------------------------===//
#ifndef XRAY_XRAY_RECORDS_H
#define XRAY_XRAY_RECORDS_H
namespace __xray {
enum FileTypes {
NAIVE_LOG = 0,
};
// This data structure is used to describe the contents of the file. We use this
// for versioning the supported XRay file formats.
struct alignas(32) XRayFileHeader {
uint16_t Version = 0;
// The type of file we're writing out. See the FileTypes enum for more
// information. This allows different implementations of the XRay logging to
// have different files for different information being stored.
uint16_t Type = 0;
// What follows are a set of flags that indicate useful things for when
// reading the data in the file.
bool ConstantTSC : 1;
bool NonstopTSC : 1;
// The frequency by which TSC increases per-second.
alignas(8) uint64_t CycleFrequency = 0;
} __attribute__((packed));
static_assert(sizeof(XRayFileHeader) == 32, "XRayFileHeader != 32 bytes");
enum RecordTypes {
NORMAL = 0,
};
struct alignas(32) XRayRecord {
// This is the type of the record being written. We use 16 bits to allow us to
// treat this as a discriminant, and so that the first 4 bytes get packed
// properly. See RecordTypes for more supported types.
uint16_t RecordType = 0;
// The CPU where the thread is running. We assume number of CPUs <= 256.
uint8_t CPU = 0;
// The type of the event. Usually either ENTER = 0 or EXIT = 1.
uint8_t Type = 0;
// The function ID for the record.
int32_t FuncId = 0;
// Get the full 8 bytes of the TSC when we get the log record.
uint64_t TSC = 0;
// The thread ID for the currently running thread.
uint32_t TId = 0;
// Use some bytes in the end of the record for buffers.
char Buffer[4] = {};
} __attribute__((packed));
static_assert(sizeof(XRayRecord) == 32, "XRayRecord != 32 bytes");
} // namespace __xray
#endif // XRAY_XRAY_RECORDS_H

View File

@ -79,11 +79,13 @@ static struct AsanDeactivatedFlags {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
"allocator_may_return_null %d, coverage %d, coverage_dir %s, "
"allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.may_return_null, coverage, coverage_dir);
allocator_options.may_return_null, coverage, coverage_dir,
allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;

View File

@ -33,3 +33,4 @@ COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)

View File

@ -207,25 +207,27 @@ QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
quarantine_size_mb = f->quarantine_size_mb;
thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
min_redzone = f->redzone;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size_mb = quarantine_size_mb;
f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
AsanAllocator allocator;
AsanQuarantine quarantine;
@ -254,7 +256,7 @@ struct Allocator {
void SharedInitCode(const AllocatorOptions &options) {
CheckOptions(options);
quarantine.Init((uptr)options.quarantine_size_mb << 20,
kMaxThreadLocalQuarantine);
(uptr)options.thread_local_quarantine_size_kb << 10);
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
memory_order_release);
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
@ -262,22 +264,59 @@ struct Allocator {
}
void Initialize(const AllocatorOptions &options) {
allocator.Init(options.may_return_null);
allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
SharedInitCode(options);
}
void RePoisonChunk(uptr chunk) {
// This could a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
uptr beg = ac->Beg();
uptr end = ac->Beg() + ac->UsedSize(true);
uptr chunk_end = chunk + allocated_size;
if (chunk < beg && beg < end && end <= chunk_end) {
// Looks like a valid AsanChunk. Or maybe not. Be conservative and only
// poison the redzones.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
} else {
// This can not be an AsanChunk. Poison everything. It may be reused as
// AsanChunk later.
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
}
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
// Poison all existing allocation's redzones.
if (CanPoisonMemory()) {
allocator.ForceLock();
allocator.ForEachChunk(
[](uptr chunk, void *alloc) {
((Allocator *)alloc)->RePoisonChunk(chunk);
},
this);
allocator.ForceUnlock();
}
}
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
}
// -------------------- Helper methods. -------------------------
@ -356,7 +395,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
@ -373,8 +412,7 @@ struct Allocator {
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
if (!allocated)
return allocator.ReturnNullOrDie();
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@ -530,7 +568,7 @@ struct Allocator {
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, m->UsedSize(), delete_size, stack);
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
QuarantineChunk(m, ptr, stack, alloc_type);
@ -563,7 +601,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
@ -643,6 +681,7 @@ struct Allocator {
void PrintStats() {
allocator.PrintStats();
quarantine.PrintStats();
}
void ForceLock() {
@ -662,17 +701,23 @@ static AsanAllocator &get_allocator() {
return instance.allocator;
}
bool AsanChunkView::IsValid() {
bool AsanChunkView::IsValid() const {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
}
bool AsanChunkView::IsAllocated() {
bool AsanChunkView::IsAllocated() const {
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
bool AsanChunkView::IsQuarantined() const {
return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
}
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
@ -681,14 +726,14 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
StackTrace AsanChunkView::GetAllocStack() {
StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId());
}
StackTrace AsanChunkView::GetFreeStack() {
StackTrace AsanChunkView::GetFreeStack() const {
return GetStackTraceFromId(GetFreeStackId());
}
@ -707,6 +752,9 @@ void GetAllocatorOptions(AllocatorOptions *options) {
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);

View File

@ -33,10 +33,12 @@ struct AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
u32 thread_local_quarantine_size_kb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
@ -49,27 +51,29 @@ void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
// or quarantined chunk.
bool IsAllocated(); // Checks if the memory is currently allocated.
uptr Beg(); // First byte of user memory.
uptr End(); // Last byte of user memory.
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
bool IsValid() const; // Checks if AsanChunkView points to a valid
// allocated or quarantined chunk.
bool IsAllocated() const; // Checks if the memory is currently allocated.
bool IsQuarantined() const; // Checks if the memory is currently quarantined.
uptr Beg() const; // First byte of user memory.
uptr End() const; // Last byte of user memory.
uptr UsedSize() const; // Size requested by the user.
uptr AllocTid() const;
uptr FreeTid() const;
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
u32 GetAllocStackId();
u32 GetFreeStackId();
StackTrace GetAllocStack();
StackTrace GetFreeStack();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
u32 GetAllocStackId() const;
u32 GetFreeStackId() const;
StackTrace GetAllocStack() const;
StackTrace GetFreeStack() const;
AllocType GetAllocType() const;
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
return true;
}
return false;
}
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) {
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
(void)access_size;
if (addr < Beg()) {
*offset = Beg() - addr;
@ -77,7 +81,7 @@ class AsanChunkView {
}
return false;
}
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) {
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
if (addr + access_size > End()) {
*offset = addr - End();
return true;
@ -90,6 +94,7 @@ class AsanChunkView {
};
AsanChunkView FindHeapChunkByAddress(uptr address);
AsanChunkView FindHeapChunkByAllocBeg(uptr address);
// List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
@ -117,18 +122,36 @@ struct AsanMapUnmapCallback {
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__aarch64__) && SANITIZER_ANDROID
const uptr kAllocatorSpace = 0x3000000000ULL;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
# endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
typedef AsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;

View File

@ -14,74 +14,39 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_descriptions.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
namespace __asan {
namespace {
using namespace __asan;
void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
descr->region_kind = "stack";
AsanThread::StackFrameAccess access;
if (!t->GetStackFrameAccessByAddr(addr, &access))
return;
static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
char *name, uptr name_size,
uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16);
if (!ParseFrameDescription(access.frame_descr, &vars)) {
if (!ParseFrameDescription(frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
if (access.offset <= vars[i].beg + vars[i].size) {
internal_strncat(descr->name, vars[i].name_pos,
Min(descr->name_size, vars[i].name_len));
descr->region_address = addr - (access.offset - vars[i].beg);
descr->region_size = vars[i].size;
if (offset <= vars[i].beg + vars[i].size) {
// We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
// if we're limiting the copy due to name_len, we add 1 to ensure we copy
// the whole name and then terminate with '\0'.
internal_strlcpy(name, vars[i].name_pos,
Min(name_size, vars[i].name_len + 1));
region_address = addr - (offset - vars[i].beg);
region_size = vars[i].size;
return;
}
}
}
void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
if (!chunk.IsValid()) {
descr->region_kind = "heap-invalid";
return;
}
descr->region_address = chunk.Beg();
descr->region_size = chunk.UsedSize();
descr->region_kind = "heap";
}
void AsanLocateAddress(uptr addr, AddressDescription *descr) {
if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
return;
}
if (GetInfoForAddressIfGlobal(addr, descr)) {
return;
}
asanThreadRegistry().Lock();
AsanThread *thread = FindThreadByStackAddress(addr);
asanThreadRegistry().Unlock();
if (thread) {
GetInfoForStackVar(addr, descr, thread);
return;
}
GetInfoForHeapAddress(addr, descr);
}
static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
@ -108,18 +73,58 @@ static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
return 0;
}
} // namespace __asan
using namespace __asan;
} // namespace
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address, uptr *region_size) {
AddressDescription descr = { name, name_size, 0, 0, nullptr };
AsanLocateAddress(addr, &descr);
if (region_address) *region_address = descr.region_address;
if (region_size) *region_size = descr.region_size;
return descr.region_kind;
uptr *region_address_ptr,
uptr *region_size_ptr) {
AddressDescription descr(addr);
uptr region_address = 0;
uptr region_size = 0;
const char *region_kind = nullptr;
if (name && name_size > 0) name[0] = 0;
if (auto shadow = descr.AsShadow()) {
// region_{address,size} are already 0
switch (shadow->kind) {
case kShadowKindLow:
region_kind = "low shadow";
break;
case kShadowKindGap:
region_kind = "shadow gap";
break;
case kShadowKindHigh:
region_kind = "high shadow";
break;
}
} else if (auto heap = descr.AsHeap()) {
region_kind = "heap";
region_address = heap->chunk_access.chunk_begin;
region_size = heap->chunk_access.chunk_size;
} else if (auto stack = descr.AsStack()) {
region_kind = "stack";
if (!stack->frame_descr) {
// region_{address,size} are already 0
} else {
FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
name_size, region_address, region_size);
}
} else if (auto global = descr.AsGlobal()) {
region_kind = "global";
auto &g = global->globals[0];
internal_strlcpy(name, g.name, name_size);
region_address = g.beg;
region_size = g.size;
} else {
// region_{address,size} are already 0
region_kind = "heap-invalid";
}
CHECK(region_kind);
if (region_address_ptr) *region_address_ptr = region_address;
if (region_size_ptr) *region_size_ptr = region_size;
return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE

View File

@ -0,0 +1,486 @@
//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan functions for getting information about an address and/or printing it.
//===----------------------------------------------------------------------===//
#include "asan_descriptions.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
// Return " (thread_name) " or an empty string if the name is empty.
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len) {
const char *name = t->name;
if (name[0] == '\0') return "";
buff[0] = 0;
internal_strncat(buff, " (", 3);
internal_strncat(buff, name, buff_len - 4);
internal_strncat(buff, ")", 2);
return buff;
}
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) {
if (tid == kInvalidTid) return "";
asanThreadRegistry().CheckLocked();
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
return ThreadNameWithParenthesis(t, buff, buff_len);
}
void DescribeThread(AsanThreadContext *context) {
CHECK(context);
asanThreadRegistry().CheckLocked();
// No need to announce the main thread.
if (context->tid == 0 || context->announced) {
return;
}
context->announced = true;
char tname[128];
InternalScopedString str(1024);
str.append("Thread T%d%s", context->tid,
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n");
Printf("%s", str.data());
return;
}
str.append(
" created by T%d%s here:\n", context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
Printf("%s", str.data());
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
GetThreadContextByTidLocked(context->parent_tid);
DescribeThread(parent_context);
}
}
// Shadow descriptions
static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
CHECK(!AddrIsInMem(addr));
if (AddrIsInShadowGap(addr)) {
*shadow_kind = kShadowKindGap;
} else if (AddrIsInHighShadow(addr)) {
*shadow_kind = kShadowKindHigh;
} else if (AddrIsInLowShadow(addr)) {
*shadow_kind = kShadowKindLow;
} else {
CHECK(0 && "Address is not in memory and not in shadow?");
return false;
}
return true;
}
bool DescribeAddressIfShadow(uptr addr) {
ShadowAddressDescription descr;
if (!GetShadowAddressInformation(addr, &descr)) return false;
descr.Print();
return true;
}
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) {
if (AddrIsInMem(addr)) return false;
ShadowKind shadow_kind;
if (!GetShadowKind(addr, &shadow_kind)) return false;
if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr;
descr->addr = addr;
descr->kind = shadow_kind;
return true;
}
// Heap descriptions
static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
AsanChunkView chunk, uptr addr,
uptr access_size) {
descr->bad_addr = addr;
if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) {
descr->access_type = kAccessTypeLeft;
} else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) {
descr->access_type = kAccessTypeRight;
if (descr->offset < 0) {
descr->bad_addr -= descr->offset;
descr->offset = 0;
}
} else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) {
descr->access_type = kAccessTypeInside;
} else {
descr->access_type = kAccessTypeUnknown;
}
descr->chunk_begin = chunk.Beg();
descr->chunk_size = chunk.UsedSize();
descr->alloc_type = chunk.GetAllocType();
}
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
Decorator d;
InternalScopedString str(4096);
str.append("%s", d.Location());
switch (descr.access_type) {
case kAccessTypeLeft:
str.append("%p is located %zd bytes to the left of",
(void *)descr.bad_addr, descr.offset);
break;
case kAccessTypeRight:
str.append("%p is located %zd bytes to the right of",
(void *)descr.bad_addr, descr.offset);
break;
case kAccessTypeInside:
str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
descr.offset);
break;
case kAccessTypeUnknown:
str.append(
"%p is located somewhere around (this is AddressSanitizer bug!)",
(void *)descr.bad_addr);
}
str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
(void *)descr.chunk_begin,
(void *)(descr.chunk_begin + descr.chunk_size));
str.append("%s", d.EndLocation());
Printf("%s", str.data());
}
bool GetHeapAddressInformation(uptr addr, uptr access_size,
HeapAddressDescription *descr) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) {
return false;
}
descr->addr = addr;
GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr,
access_size);
CHECK_NE(chunk.AllocTid(), kInvalidTid);
descr->alloc_tid = chunk.AllocTid();
descr->alloc_stack_id = chunk.GetAllocStackId();
descr->free_tid = chunk.FreeTid();
if (descr->free_tid != kInvalidTid)
descr->free_stack_id = chunk.GetFreeStackId();
return true;
}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
bool DescribeAddressIfHeap(uptr addr, uptr access_size) {
HeapAddressDescription descr;
if (!GetHeapAddressInformation(addr, access_size, &descr)) {
Printf(
"AddressSanitizer can not describe address in more detail "
"(wild memory access suspected).\n");
return false;
}
descr.Print();
return true;
}
// Stack descriptions
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr) {
AsanThread *t = FindThreadByStackAddress(addr);
if (!t) return false;
descr->addr = addr;
descr->tid = t->tid();
// Try to fetch precise stack frame for this access.
AsanThread::StackFrameAccess access;
if (!t->GetStackFrameAccessByAddr(addr, &access)) {
descr->frame_descr = nullptr;
return true;
}
descr->offset = access.offset;
descr->access_size = access_size;
descr->frame_pc = access.frame_pc;
descr->frame_descr = access.frame_descr;
#if SANITIZER_PPC64V1
// On PowerPC64 ELFv1, the address of a function actually points to a
// three-doubleword data structure with the first field containing
// the address of the function's code.
descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
#endif
descr->frame_pc += 16;
return true;
}
static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
uptr access_size, uptr prev_var_end,
uptr next_var_beg) {
uptr var_end = var.beg + var.size;
uptr addr_end = addr + access_size;
const char *pos_descr = nullptr;
// If the variable [var.beg, var_end) is the nearest variable to the
// current memory access, indicate it in the log.
if (addr >= var.beg) {
if (addr_end <= var_end)
pos_descr = "is inside"; // May happen if this is a use-after-return.
else if (addr < var_end)
pos_descr = "partially overflows";
else if (addr_end <= next_var_beg &&
next_var_beg - addr_end >= addr - var_end)
pos_descr = "overflows";
} else {
if (addr_end > var.beg)
pos_descr = "partially underflows";
else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
pos_descr = "underflows";
}
InternalScopedString str(1024);
str.append(" [%zd, %zd)", var.beg, var_end);
// Render variable name.
str.append(" '");
for (uptr i = 0; i < var.name_len; ++i) {
str.append("%c", var.name_pos[i]);
}
str.append("'");
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
str.append("%s <== Memory access at offset %zd %s this variable%s\n",
d.Location(), addr, pos_descr, d.EndLocation());
} else {
str.append("\n");
}
Printf("%s", str.data());
}
bool DescribeAddressIfStack(uptr addr, uptr access_size) {
StackAddressDescription descr;
if (!GetStackAddressInformation(addr, access_size, &descr)) return false;
descr.Print();
return true;
}
// Global descriptions
static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g) {
InternalScopedString str(4096);
Decorator d;
str.append("%s", d.Location());
if (addr < g.beg) {
str.append("%p is located %zd bytes to the left", (void *)addr,
g.beg - addr);
} else if (addr + access_size > g.beg + g.size) {
if (addr < g.beg + g.size) addr = g.beg + g.size;
str.append("%p is located %zd bytes to the right", (void *)addr,
addr - (g.beg + g.size));
} else {
// Can it happen?
str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
}
str.append(" of global variable '%s' defined in '",
MaybeDemangleGlobalName(g.name));
PrintGlobalLocation(&str, g);
str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
str.append("%s", d.EndLocation());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
}
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
GlobalAddressDescription *descr) {
descr->addr = addr;
int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites,
ARRAY_SIZE(descr->globals));
descr->size = globals_num;
descr->access_size = access_size;
return globals_num != 0;
}
bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
const char *bug_type) {
GlobalAddressDescription descr;
if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false;
descr.Print(bug_type);
return true;
}
void ShadowAddressDescription::Print() const {
Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
}
void GlobalAddressDescription::Print(const char *bug_type) const {
for (int i = 0; i < size; i++) {
DescribeAddressRelativeToGlobal(addr, access_size, globals[i]);
if (bug_type &&
0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
reg_sites[i]) {
Printf(" registered at:\n");
StackDepotGet(reg_sites[i]).Print();
}
}
}
void StackAddressDescription::Print() const {
Decorator d;
char tname[128];
Printf("%s", d.Location());
Printf("Address %p is located in stack of thread T%d%s", addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
if (!frame_descr) {
Printf("%s\n", d.EndLocation());
return;
}
Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
// Now we print the frame where the alloca has happened.
// We print this frame as a stack trace with one element.
// The symbolizer may print more than one frame if inlining was involved.
// The frame numbers may be different than those in the stack trace printed
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
Printf("%s", d.EndLocation());
StackTrace alloca_stack(&frame_pc, 1);
alloca_stack.Print();
InternalMmapVector<StackVarDescr> vars(16);
if (!ParseFrameDescription(frame_descr, &vars)) {
Printf(
"AddressSanitizer can't parse the stack frame "
"descriptor: |%s|\n",
frame_descr);
// 'addr' is a stack address, so return true even if we can't parse frame
return;
}
uptr n_objects = vars.size();
// Report the number of stack objects.
Printf(" This frame has %zu object(s):\n", n_objects);
// Report all objects in this frame.
for (uptr i = 0; i < n_objects; i++) {
uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end,
next_var_beg);
}
Printf(
"HINT: this may be a false positive if your program uses "
"some custom stack unwind mechanism or swapcontext\n");
if (SANITIZER_WINDOWS)
Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
else
Printf(" (longjmp and C++ exceptions *are* supported)\n");
DescribeThread(GetThreadContextByTidLocked(tid));
}
void HeapAddressDescription::Print() const {
PrintHeapChunkAccess(addr, chunk_access);
asanThreadRegistry().CheckLocked();
AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
char tname[128];
Decorator d;
AsanThreadContext *free_thread = nullptr;
if (free_tid != kInvalidTid) {
free_thread = GetThreadContextByTidLocked(free_tid);
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.EndAllocation());
StackTrace free_stack = GetStackTraceFromId(free_stack_id);
free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
} else {
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
}
alloc_stack.Print();
DescribeThread(GetCurrentThread());
if (free_thread) DescribeThread(free_thread);
DescribeThread(alloc_thread);
}
AddressDescription::AddressDescription(uptr addr, uptr access_size,
bool shouldLockThreadRegistry) {
if (GetShadowAddressInformation(addr, &data.shadow)) {
data.kind = kAddressKindShadow;
return;
}
if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
data.kind = kAddressKindHeap;
return;
}
bool isStackMemory = false;
if (shouldLockThreadRegistry) {
ThreadRegistryLock l(&asanThreadRegistry());
isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
} else {
isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
}
if (isStackMemory) {
data.kind = kAddressKindStack;
return;
}
if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
data.kind = kAddressKindGlobal;
return;
}
data.kind = kAddressKindWild;
addr = 0;
}
void PrintAddressDescription(uptr addr, uptr access_size,
const char *bug_type) {
ShadowAddressDescription shadow_descr;
if (GetShadowAddressInformation(addr, &shadow_descr)) {
shadow_descr.Print();
return;
}
GlobalAddressDescription global_descr;
if (GetGlobalAddressInformation(addr, access_size, &global_descr)) {
global_descr.Print(bug_type);
return;
}
StackAddressDescription stack_descr;
if (GetStackAddressInformation(addr, access_size, &stack_descr)) {
stack_descr.Print();
return;
}
HeapAddressDescription heap_descr;
if (GetHeapAddressInformation(addr, access_size, &heap_descr)) {
heap_descr.Print();
return;
}
// We exhausted our possibilities. Bail out.
Printf(
"AddressSanitizer can not describe address in more detail "
"(wild memory access suspected).\n");
}
} // namespace __asan

View File

@ -0,0 +1,253 @@
//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_descriptions.cc.
// TODO(filcab): Most struct definitions should move to the interface headers.
//===----------------------------------------------------------------------===//
#ifndef ASAN_DESCRIPTIONS_H
#define ASAN_DESCRIPTIONS_H
#include "asan_allocator.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
namespace __asan {
void DescribeThread(AsanThreadContext *context);
static inline void DescribeThread(AsanThread *t) {
if (t) DescribeThread(t->context());
}
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len);
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() {}
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
const char *EndLocation() { return Default(); }
const char *Allocation() { return Magenta(); }
const char *EndAllocation() { return Default(); }
const char *ShadowByte(u8 byte) {
switch (byte) {
case kAsanHeapLeftRedzoneMagic:
case kAsanArrayCookieMagic:
return Red();
case kAsanHeapFreeMagic:
return Magenta();
case kAsanStackLeftRedzoneMagic:
case kAsanStackMidRedzoneMagic:
case kAsanStackRightRedzoneMagic:
return Red();
case kAsanStackAfterReturnMagic:
return Magenta();
case kAsanInitializationOrderMagic:
return Cyan();
case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
return Blue();
case kAsanStackUseAfterScopeMagic:
return Magenta();
case kAsanGlobalRedzoneMagic:
return Red();
case kAsanInternalHeapMagic:
return Yellow();
case kAsanIntraObjectRedzone:
return Yellow();
default:
return Default();
}
}
const char *EndShadowByte() { return Default(); }
const char *MemoryByte() { return Magenta(); }
const char *EndMemoryByte() { return Default(); }
};
enum ShadowKind : u8 {
kShadowKindLow,
kShadowKindGap,
kShadowKindHigh,
};
static const char *const ShadowNames[] = {"low shadow", "shadow gap",
"high shadow"};
struct ShadowAddressDescription {
uptr addr;
ShadowKind kind;
u8 shadow_byte;
void Print() const;
};
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
bool DescribeAddressIfShadow(uptr addr);
enum AccessType {
kAccessTypeLeft,
kAccessTypeRight,
kAccessTypeInside,
kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
};
struct ChunkAccess {
uptr bad_addr;
sptr offset;
uptr chunk_begin;
uptr chunk_size;
u32 access_type : 2;
u32 alloc_type : 2;
};
struct HeapAddressDescription {
uptr addr;
uptr alloc_tid;
uptr free_tid;
u32 alloc_stack_id;
u32 free_stack_id;
ChunkAccess chunk_access;
void Print() const;
};
bool GetHeapAddressInformation(uptr addr, uptr access_size,
HeapAddressDescription *descr);
bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
struct StackAddressDescription {
uptr addr;
uptr tid;
uptr offset;
uptr frame_pc;
uptr access_size;
const char *frame_descr;
void Print() const;
};
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr);
struct GlobalAddressDescription {
uptr addr;
// Assume address is close to at most four globals.
static const int kMaxGlobals = 4;
__asan_global globals[kMaxGlobals];
u32 reg_sites[kMaxGlobals];
uptr access_size;
u8 size;
void Print(const char *bug_type = "") const;
};
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
GlobalAddressDescription *descr);
bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
// General function to describe an address. Will try to describe the address as
// a shadow, global (variable), stack, or heap address.
// bug_type is optional and is used for checking if we're reporting an
// initialization-order-fiasco
// The proper access_size should be passed for stack, global, and heap
// addresses. Defaults to 1.
// Each of the *AddressDescription functions has its own Print() member, which
// may take access_size and bug_type parameters if needed.
void PrintAddressDescription(uptr addr, uptr access_size = 1,
const char *bug_type = "");
enum AddressKind {
kAddressKindWild,
kAddressKindShadow,
kAddressKindHeap,
kAddressKindStack,
kAddressKindGlobal,
};
class AddressDescription {
struct AddressDescriptionData {
AddressKind kind;
union {
ShadowAddressDescription shadow;
HeapAddressDescription heap;
StackAddressDescription stack;
GlobalAddressDescription global;
uptr addr;
};
};
AddressDescriptionData data;
public:
AddressDescription() = default;
// shouldLockThreadRegistry allows us to skip locking if we're sure we already
// have done it.
AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
: AddressDescription(addr, 1, shouldLockThreadRegistry) {}
AddressDescription(uptr addr, uptr access_size,
bool shouldLockThreadRegistry = true);
uptr Address() const {
switch (data.kind) {
case kAddressKindWild:
return data.addr;
case kAddressKindShadow:
return data.shadow.addr;
case kAddressKindHeap:
return data.heap.addr;
case kAddressKindStack:
return data.stack.addr;
case kAddressKindGlobal:
return data.global.addr;
}
UNREACHABLE("AddressInformation kind is invalid");
}
void Print(const char *bug_descr = nullptr) const {
switch (data.kind) {
case kAddressKindWild:
Printf("Address %p is a wild pointer.\n", data.addr);
return;
case kAddressKindShadow:
return data.shadow.Print();
case kAddressKindHeap:
return data.heap.Print();
case kAddressKindStack:
return data.stack.Print();
case kAddressKindGlobal:
// initialization-order-fiasco has a special Print()
return data.global.Print(bug_descr);
}
UNREACHABLE("AddressInformation kind is invalid");
}
void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
const ShadowAddressDescription *AsShadow() const {
return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
}
const HeapAddressDescription *AsHeap() const {
return data.kind == kAddressKindHeap ? &data.heap : nullptr;
}
const StackAddressDescription *AsStack() const {
return data.kind == kAddressKindStack ? &data.stack : nullptr;
}
const GlobalAddressDescription *AsGlobal() const {
return data.kind == kAddressKindGlobal ? &data.global : nullptr;
}
};
} // namespace __asan
#endif // ASAN_DESCRIPTIONS_H

View File

@ -0,0 +1,503 @@
//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan implementation for error structures.
//===----------------------------------------------------------------------===//
#include "asan_errors.h"
#include <signal.h>
#include "asan_descriptions.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
void ErrorStackOverflow::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: %s on address %p"
" (pc %p bp %p sp %p T%d)\n", scariness.GetDescription(),
(void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
Printf("%s", d.EndWarning());
scariness.Print();
BufferedStackTrace stack;
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
common_flags()->fast_unwind_on_fatal);
stack.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
static void MaybeDumpInstructionBytes(uptr pc) {
if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
InternalScopedString str(1024);
str.append("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
}
str.append("\n");
} else {
str.append("unaccessible\n");
}
Report("%s", str.data());
}
static void MaybeDumpRegisters(void *context) {
if (!flags()->dump_registers) return;
SignalContext::DumpAllRegisters(context);
}
void ErrorDeadlySignal::Print() {
Decorator d;
Printf("%s", d.Warning());
const char *description = DescribeSignalOrException(signo);
Report(
"ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
"T%d)\n",
description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
Printf("%s", d.EndWarning());
if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
if (is_memory_access) {
const char *access_type =
write_flag == SignalContext::WRITE
? "WRITE"
: (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
Report("The signal is caused by a %s memory access.\n", access_type);
if (addr < GetPageSizeCached())
Report("Hint: address points to the zero page.\n");
}
scariness.Print();
BufferedStackTrace stack;
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
common_flags()->fast_unwind_on_fatal);
stack.Print();
MaybeDumpInstructionBytes(pc);
MaybeDumpRegisters(context);
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary(description, &stack);
}
void ErrorDoubleFree::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: attempting %s on %p in "
"thread T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
scariness.Print();
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
second_free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorNewDeleteSizeMismatch::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: %s on %p in thread "
"T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
Printf(
" size of the allocated type: %zd bytes;\n"
" size of the deallocated type: %zd bytes.\n",
addr_description.chunk_access.chunk_size, delete_size);
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=new_delete_type_mismatch=0\n");
}
void ErrorFreeNotMalloced::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: attempting free on address "
"which was not malloc()-ed: %p in thread T%d%s\n",
addr_description.Address(), tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorAllocTypeMismatch::Print() {
static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
"operator new []"};
static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
"operator delete []"};
CHECK_NE(alloc_type, dealloc_type);
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
scariness.GetDescription(),
alloc_names[alloc_type], dealloc_names[dealloc_type],
addr_description.addr);
Printf("%s", d.EndWarning());
CHECK_GT(dealloc_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
}
void ErrorMallocUsableSizeNotOwned::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
"pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.EndWarning());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: attempting to call "
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.EndWarning());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorStringFunctionMemoryRangesOverlap::Print() {
Decorator d;
char bug_type[100];
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
"overlap\n",
bug_type, addr1_description.Address(),
addr1_description.Address() + length1, addr2_description.Address(),
addr2_description.Address() + length2);
Printf("%s", d.EndWarning());
scariness.Print();
stack->Print();
addr1_description.Print();
addr2_description.Print();
ReportErrorSummary(bug_type, stack);
}
void ErrorStringFunctionSizeOverflow::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
scariness.GetDescription(), size);
Printf("%s", d.EndWarning());
scariness.Print();
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorBadParamsToAnnotateContiguousContainer::Print() {
Report(
"ERROR: AddressSanitizer: bad parameters to "
"__sanitizer_annotate_contiguous_container:\n"
" beg : %p\n"
" end : %p\n"
" old_mid : %p\n"
" new_mid : %p\n",
beg, end, old_mid, new_mid);
uptr granularity = SHADOW_GRANULARITY;
if (!IsAligned(beg, granularity))
Report("ERROR: beg is not aligned by %d\n", granularity);
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorODRViolation::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
global1.beg);
Printf("%s", d.EndWarning());
InternalScopedString g1_loc(256), g2_loc(256);
PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2);
Printf(" [1] size=%zd '%s' %s\n", global1.size,
MaybeDemangleGlobalName(global1.name), g1_loc.data());
Printf(" [2] size=%zd '%s' %s\n", global2.size,
MaybeDemangleGlobalName(global2.name), g2_loc.data());
if (stack_id1 && stack_id2) {
Printf("These globals were registered at these points:\n");
Printf(" [1]:\n");
StackDepotGet(stack_id1).Print();
Printf(" [2]:\n");
StackDepotGet(stack_id2).Print();
}
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
InternalScopedString error_msg(256);
error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
}
void ErrorInvalidPointerPair::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
addr1_description.Address(), addr2_description.Address());
Printf("%s", d.EndWarning());
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
addr1_description.Print();
addr2_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
return s[-1] > 127 && s[1] > 127;
}
ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
bool is_write_, uptr access_size_)
: ErrorBase(tid),
addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
pc(pc_),
bp(bp_),
sp(sp_),
access_size(access_size_),
is_write(is_write_),
shadow_val(0) {
scariness.Clear();
if (access_size) {
if (access_size <= 9) {
char desr[] = "?-byte";
desr[0] = '0' + access_size;
scariness.Scare(access_size + access_size / 2, desr);
} else if (access_size >= 10) {
scariness.Scare(15, "multi-byte");
}
is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
// Determine the error type.
bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {
u8 *shadow_addr = (u8 *)MemToShadow(addr);
// If we are accessing 16 bytes, look at the second shadow byte.
if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
// If we are in the partial right redzone, look at the next shadow byte.
if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
bool far_from_bounds = false;
shadow_val = *shadow_addr;
int bug_type_score = 0;
// For use-after-frees reads are almost as bad as writes.
int read_after_free_bonus = 0;
switch (shadow_val) {
case kAsanHeapLeftRedzoneMagic:
case kAsanArrayCookieMagic:
bug_descr = "heap-buffer-overflow";
bug_type_score = 10;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanHeapFreeMagic:
bug_descr = "heap-use-after-free";
bug_type_score = 20;
if (!is_write) read_after_free_bonus = 18;
break;
case kAsanStackLeftRedzoneMagic:
bug_descr = "stack-buffer-underflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanInitializationOrderMagic:
bug_descr = "initialization-order-fiasco";
bug_type_score = 1;
break;
case kAsanStackMidRedzoneMagic:
case kAsanStackRightRedzoneMagic:
bug_descr = "stack-buffer-overflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanStackAfterReturnMagic:
bug_descr = "stack-use-after-return";
bug_type_score = 30;
if (!is_write) read_after_free_bonus = 18;
break;
case kAsanUserPoisonedMemoryMagic:
bug_descr = "use-after-poison";
bug_type_score = 20;
break;
case kAsanContiguousContainerOOBMagic:
bug_descr = "container-overflow";
bug_type_score = 10;
break;
case kAsanStackUseAfterScopeMagic:
bug_descr = "stack-use-after-scope";
bug_type_score = 10;
break;
case kAsanGlobalRedzoneMagic:
bug_descr = "global-buffer-overflow";
bug_type_score = 10;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanIntraObjectRedzone:
bug_descr = "intra-object-overflow";
bug_type_score = 10;
break;
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
bug_descr = "dynamic-stack-buffer-overflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
}
scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
}
}
}
static void PrintContainerOverflowHint() {
Printf("HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_container_overflow=0.\n"
"If you suspect a false positive see also: "
"https://github.com/google/sanitizers/wiki/"
"AddressSanitizerContainerOverflow.\n");
}
static void PrintShadowByte(InternalScopedString *str, const char *before,
u8 byte, const char *after = "\n") {
PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
}
static void PrintLegend(InternalScopedString *str) {
str->append(
"Shadow byte legend (one shadow byte represents %d "
"application bytes):\n",
(int)SHADOW_GRANULARITY);
PrintShadowByte(str, " Addressable: ", 0);
str->append(" Partially addressable: ");
for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
str->append("\n");
PrintShadowByte(str, " Heap left redzone: ",
kAsanHeapLeftRedzoneMagic);
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(str, " Stack left redzone: ",
kAsanStackLeftRedzoneMagic);
PrintShadowByte(str, " Stack mid redzone: ",
kAsanStackMidRedzoneMagic);
PrintShadowByte(str, " Stack right redzone: ",
kAsanStackRightRedzoneMagic);
PrintShadowByte(str, " Stack after return: ",
kAsanStackAfterReturnMagic);
PrintShadowByte(str, " Stack use after scope: ",
kAsanStackUseAfterScopeMagic);
PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(str, " Global init order: ",
kAsanInitializationOrderMagic);
PrintShadowByte(str, " Poisoned by user: ",
kAsanUserPoisonedMemoryMagic);
PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
PrintShadowByte(str, " Array cookie: ",
kAsanArrayCookieMagic);
PrintShadowByte(str, " Intra object redzone: ",
kAsanIntraObjectRedzone);
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
}
static void PrintShadowBytes(InternalScopedString *str, const char *before,
u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
if (before) str->append("%s%p:", before, bytes);
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
const char *before =
p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
PrintShadowByte(str, before, *p, after);
}
str->append("\n");
}
static void PrintShadowMemoryForAddress(uptr addr) {
if (!AddrIsInMem(addr)) return;
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
InternalScopedString str(4096 * 8);
str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
const char *prefix = (i == 0) ? "=>" : " ";
PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
(u8 *)shadow_addr, n_bytes_per_row);
}
if (flags()->print_legend) PrintLegend(&str);
Printf("%s", str.data());
}
void ErrorGeneric::Print() {
Decorator d;
Printf("%s", d.Warning());
uptr addr = addr_description.Address();
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
bug_descr, (void *)addr, pc, bp, sp);
Printf("%s", d.EndWarning());
char tname[128];
Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
(void *)addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
scariness.Print();
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
// Pass bug_descr because we have a special case for
// initialization-order-fiasco
addr_description.Print(bug_descr);
if (shadow_val == kAsanContiguousContainerOOBMagic)
PrintContainerOverflowHint();
ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr);
}
} // namespace __asan

View File

@ -0,0 +1,390 @@
//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for error structures.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ERRORS_H
#define ASAN_ERRORS_H
#include "asan_descriptions.h"
#include "asan_scariness_score.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
struct ErrorBase {
ErrorBase() = default;
explicit ErrorBase(u32 tid_) : tid(tid_) {}
ScarinessScoreBase scariness;
u32 tid;
};
struct ErrorStackOverflow : ErrorBase {
uptr addr, pc, bp, sp;
// ErrorStackOverflow never owns the context.
void *context;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStackOverflow() = default;
ErrorStackOverflow(u32 tid, const SignalContext &sig)
: ErrorBase(tid),
addr(sig.addr),
pc(sig.pc),
bp(sig.bp),
sp(sig.sp),
context(sig.context) {
scariness.Clear();
scariness.Scare(10, "stack-overflow");
}
void Print();
};
struct ErrorDeadlySignal : ErrorBase {
uptr addr, pc, bp, sp;
// ErrorDeadlySignal never owns the context.
void *context;
int signo;
SignalContext::WriteFlag write_flag;
bool is_memory_access;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDeadlySignal() = default;
ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
: ErrorBase(tid),
addr(sig.addr),
pc(sig.pc),
bp(sig.bp),
sp(sig.sp),
context(sig.context),
signo(signo_),
write_flag(sig.write_flag),
is_memory_access(sig.is_memory_access) {
scariness.Clear();
if (is_memory_access) {
if (addr < GetPageSizeCached()) {
scariness.Scare(10, "null-deref");
} else if (addr == pc) {
scariness.Scare(60, "wild-jump");
} else if (write_flag == SignalContext::WRITE) {
scariness.Scare(30, "wild-addr-write");
} else if (write_flag == SignalContext::READ) {
scariness.Scare(20, "wild-addr-read");
} else {
scariness.Scare(25, "wild-addr");
}
} else {
scariness.Scare(10, "signal");
}
}
void Print();
};
struct ErrorDoubleFree : ErrorBase {
// ErrorDoubleFree doesn't own the stack trace.
const BufferedStackTrace *second_free_stack;
HeapAddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDoubleFree() = default;
ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
: ErrorBase(tid), second_free_stack(stack) {
CHECK_GT(second_free_stack->size, 0);
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(42, "double-free");
}
void Print();
};
struct ErrorNewDeleteSizeMismatch : ErrorBase {
// ErrorNewDeleteSizeMismatch doesn't own the stack trace.
const BufferedStackTrace *free_stack;
HeapAddressDescription addr_description;
uptr delete_size;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorNewDeleteSizeMismatch() = default;
ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
uptr delete_size_)
: ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(10, "new-delete-type-mismatch");
}
void Print();
};
struct ErrorFreeNotMalloced : ErrorBase {
// ErrorFreeNotMalloced doesn't own the stack trace.
const BufferedStackTrace *free_stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorFreeNotMalloced() = default;
ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
: ErrorBase(tid),
free_stack(stack),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(40, "bad-free");
}
void Print();
};
struct ErrorAllocTypeMismatch : ErrorBase {
// ErrorAllocTypeMismatch doesn't own the stack trace.
const BufferedStackTrace *dealloc_stack;
HeapAddressDescription addr_description;
AllocType alloc_type, dealloc_type;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorAllocTypeMismatch() = default;
ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
AllocType alloc_type_, AllocType dealloc_type_)
: ErrorBase(tid),
dealloc_stack(stack),
alloc_type(alloc_type_),
dealloc_type(dealloc_type_) {
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(10, "alloc-dealloc-mismatch");
};
void Print();
};
struct ErrorMallocUsableSizeNotOwned : ErrorBase {
// ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorMallocUsableSizeNotOwned() = default;
ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "bad-malloc_usable_size");
}
void Print();
};
struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
// ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorSanitizerGetAllocatedSizeNotOwned() = default;
ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
uptr addr)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "bad-__sanitizer_get_allocated_size");
}
void Print();
};
struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
// ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
const BufferedStackTrace *stack;
uptr length1, length2;
AddressDescription addr1_description;
AddressDescription addr2_description;
const char *function;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStringFunctionMemoryRangesOverlap() = default;
ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
uptr addr1, uptr length1_, uptr addr2,
uptr length2_, const char *function_)
: ErrorBase(tid),
stack(stack_),
length1(length1_),
length2(length2_),
addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
function(function_) {
char bug_type[100];
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
scariness.Clear();
scariness.Scare(10, bug_type);
}
void Print();
};
struct ErrorStringFunctionSizeOverflow : ErrorBase {
// ErrorStringFunctionSizeOverflow doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
uptr size;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStringFunctionSizeOverflow() = default;
ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
uptr addr, uptr size_)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false),
size(size_) {
scariness.Clear();
scariness.Scare(10, "negative-size-param");
}
void Print();
};
struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
// ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
const BufferedStackTrace *stack;
uptr beg, end, old_mid, new_mid;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorBadParamsToAnnotateContiguousContainer() = default;
// PS4: Do we want an AddressDescription for beg?
ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
BufferedStackTrace *stack_,
uptr beg_, uptr end_,
uptr old_mid_, uptr new_mid_)
: ErrorBase(tid),
stack(stack_),
beg(beg_),
end(end_),
old_mid(old_mid_),
new_mid(new_mid_) {
scariness.Clear();
scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container");
}
void Print();
};
struct ErrorODRViolation : ErrorBase {
__asan_global global1, global2;
u32 stack_id1, stack_id2;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorODRViolation() = default;
ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
const __asan_global *g2, u32 stack_id2_)
: ErrorBase(tid),
global1(*g1),
global2(*g2),
stack_id1(stack_id1_),
stack_id2(stack_id2_) {
scariness.Clear();
scariness.Scare(10, "odr-violation");
}
void Print();
};
struct ErrorInvalidPointerPair : ErrorBase {
uptr pc, bp, sp;
AddressDescription addr1_description;
AddressDescription addr2_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorInvalidPointerPair() = default;
ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
uptr p2)
: ErrorBase(tid),
pc(pc_),
bp(bp_),
sp(sp_),
addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "invalid-pointer-pair");
}
void Print();
};
struct ErrorGeneric : ErrorBase {
AddressDescription addr_description;
uptr pc, bp, sp;
uptr access_size;
const char *bug_descr;
bool is_write;
u8 shadow_val;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorGeneric() = default;
ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
uptr access_size_);
void Print();
};
// clang-format off
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
macro(StackOverflow) \
macro(DeadlySignal) \
macro(DoubleFree) \
macro(NewDeleteSizeMismatch) \
macro(FreeNotMalloced) \
macro(AllocTypeMismatch) \
macro(MallocUsableSizeNotOwned) \
macro(SanitizerGetAllocatedSizeNotOwned) \
macro(StringFunctionMemoryRangesOverlap) \
macro(StringFunctionSizeOverflow) \
macro(BadParamsToAnnotateContiguousContainer) \
macro(ODRViolation) \
macro(InvalidPointerPair) \
macro(Generic)
// clang-format on
#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {}
#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
case kErrorKind##name: \
return name.Print();
enum ErrorKind {
kErrorKindInvalid = 0,
ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
};
struct ErrorDescription {
ErrorKind kind;
// We're using a tagged union because it allows us to have a trivially
// copiable type and use the same structures as the public interface.
//
// We can add a wrapper around it to make it "more c++-like", but that would
// add a lot of code and the benefit wouldn't be that big.
union {
ErrorBase Base;
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
};
ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
bool IsValid() { return kind != kErrorKindInvalid; }
void Print() {
switch (kind) {
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
case kErrorKindInvalid:
CHECK(0);
}
CHECK(0);
}
};
#undef ASAN_FOR_EACH_ERROR_KIND
#undef ASAN_DEFINE_ERROR_KIND
#undef ASAN_ERROR_DESCRIPTION_MEMBER
#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
#undef ASAN_ERROR_DESCRIPTION_PRINT
} // namespace __asan
#endif // ASAN_ERRORS_H

View File

@ -100,7 +100,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atimic load and store (at least I was not able to make
// with regular non-atomic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;

View File

@ -52,7 +52,7 @@ struct FakeFrame {
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
// frames in round robin fasion to maximize the delay between a deallocation
// frames in round robin fashion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
@ -99,12 +99,12 @@ class FakeStack {
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
// Divide n by the numbe of frames in size class.
// Divide n by the number of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
// The the pointer to the flags of the given class_id.
// The pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);

View File

@ -156,9 +156,19 @@ void InitializeFlags() {
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
(ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
if (f->thread_local_quarantine_size_kb < 0) {
const u32 kDefaultThreadLocalQuarantineSizeKb =
// It is not advised to go lower than 64Kb, otherwise quarantine batches
// pushed from thread local quarantine to global one will create too
// much overhead. One quarantine batch size is 8Kb and it holds up to
// 1021 chunk, which amounts to 1/8 memory overhead per batch when
// thread local quarantine is set to 64Kb.
(ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
}
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");

View File

@ -23,6 +23,12 @@ ASAN_FLAG(int, quarantine_size_mb, -1,
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
ASAN_FLAG(int, thread_local_quarantine_size_kb, -1,
"Size (in Kb) of thread local quarantine used to detect "
"use-after-free errors. Lower value may reduce memory usage but "
"increase the chance of false negatives. It is not advised to go "
"lower than 64Kb, otherwise frequent transfers to global quarantine "
"might affect performance.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
@ -102,7 +108,7 @@ ASAN_FLAG(bool, poison_array_cookie, true,
// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
(SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
!SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
@ -133,6 +139,9 @@ ASAN_FLAG(int, detect_odr_violation, 2,
"have different sizes")
ASAN_FLAG(bool, dump_instruction_bytes, false,
"If true, dump 16 bytes starting at the instruction that caused SEGV")
ASAN_FLAG(bool, dump_registers, true,
"If true, dump values of CPU registers when SEGV happens. Only "
"available on OS X for now.")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "

View File

@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
@ -123,18 +124,6 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
return res;
}
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
Global g = {};
if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
internal_strncpy(descr->name, g.name, descr->name_size);
descr->region_address = g.beg;
descr->region_size = g.size;
descr->region_kind = "global";
return true;
}
return false;
}
enum GlobalSymbolState {
UNREGISTERED = 0,
REGISTERED = 1
@ -279,6 +268,46 @@ void StopInitOrderChecking() {
}
}
static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
bool should_demangle = false;
if (name[0] == '_' && name[1] == 'Z')
should_demangle = true;
else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
should_demangle = true;
return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
unsigned char c = *(unsigned char *)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char *)(g.beg + g.size - 1) != '\0') return;
str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
(char *)g.beg);
}
static const char *GlobalFilename(const __asan_global &g) {
const char *res = g.module_name;
// Prefer the filename from source location, if is available.
if (g.location) res = g.location->filename;
CHECK(res);
return res;
}
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
str->append("%s", GlobalFilename(g));
if (!g.location) return;
if (g.location->line_no) str->append(":%d", g.location->line_no);
if (g.location->column_no) str->append(":%d", g.location->column_no);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@ -319,6 +348,20 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
}
for (uptr i = 0; i < n; i++) {
if (SANITIZER_WINDOWS && globals[i].beg == 0) {
// The MSVC incremental linker may pad globals out to 256 bytes. As long
// as __asan_global is less than 256 bytes large and its size is a power
// of two, we can skip over the padding.
static_assert(
sizeof(__asan_global) < 256 &&
(sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0,
"sizeof(__asan_global) incompatible with incremental linker padding");
// If these are padding bytes, the rest of the global should be zero.
CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 &&
globals[i].name == nullptr && globals[i].module_name == nullptr &&
globals[i].odr_indicator == 0);
continue;
}
RegisterGlobal(&globals[i]);
}
}
@ -329,6 +372,11 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
if (SANITIZER_WINDOWS && globals[i].beg == 0) {
// Skip globals that look like padding from the MSVC incremental linker.
// See comment in __asan_register_globals.
continue;
}
UnregisterGlobal(&globals[i]);
}
}
@ -339,10 +387,10 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
!CanPoisonMemory())
!CanPoisonMemory() ||
!dynamic_init_globals)
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
CHECK(module_name);
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
@ -365,7 +413,8 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
!CanPoisonMemory())
!CanPoisonMemory() ||
!dynamic_init_globals)
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);

View File

@ -0,0 +1,62 @@
//===-- asan_globals_win.cc -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Global registration code that is linked into every Windows DLL and EXE.
//
//===----------------------------------------------------------------------===//
#include "asan_interface_internal.h"
#if SANITIZER_WINDOWS
namespace __asan {
#pragma section(".ASAN$GA", read, write) // NOLINT
#pragma section(".ASAN$GZ", read, write) // NOLINT
extern "C" __declspec(allocate(".ASAN$GA"))
__asan_global __asan_globals_start = {};
extern "C" __declspec(allocate(".ASAN$GZ"))
__asan_global __asan_globals_end = {};
#pragma comment(linker, "/merge:.ASAN=.data")
static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
__asan_global *start = &__asan_globals_start + 1;
__asan_global *end = &__asan_globals_end;
uptr bytediff = (uptr)end - (uptr)start;
if (bytediff % sizeof(__asan_global) != 0) {
#ifdef ASAN_DLL_THUNK
__debugbreak();
#else
CHECK("corrupt asan global array");
#endif
}
// We know end >= start because the linker sorts the portion after the dollar
// sign alphabetically.
uptr n = end - start;
hook(start, n);
}
static void register_dso_globals() {
call_on_globals(&__asan_register_globals);
}
static void unregister_dso_globals() {
call_on_globals(&__asan_unregister_globals);
}
// Register globals
#pragma section(".CRT$XCU", long, read) // NOLINT
#pragma section(".CRT$XTX", long, read) // NOLINT
extern "C" __declspec(allocate(".CRT$XCU"))
void (*const __asan_dso_reg_hook)() = &register_dso_globals;
extern "C" __declspec(allocate(".CRT$XTX"))
void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals;
} // namespace __asan
#endif // SANITIZER_WINDOWS

View File

@ -0,0 +1,34 @@
//===-- asan_globals_win.h --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Interface to the Windows-specific global management code. Separated into a
// standalone header to allow inclusion from asan_win_dynamic_runtime_thunk,
// which defines symbols that clash with other sanitizer headers.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_GLOBALS_WIN_H
#define ASAN_GLOBALS_WIN_H
#if !defined(_MSC_VER)
#error "this file is Windows-only, and uses MSVC pragmas"
#endif
#if defined(_WIN64)
#define SANITIZER_SYM_PREFIX
#else
#define SANITIZER_SYM_PREFIX "_"
#endif
// Use this macro to force linking asan_globals_win.cc into the DSO.
#define ASAN_LINK_GLOBALS_WIN() \
__pragma( \
comment(linker, "/include:" SANITIZER_SYM_PREFIX "__asan_dso_reg_hook"))
#endif // ASAN_GLOBALS_WIN_H

View File

@ -81,6 +81,51 @@ struct AsanInterceptorContext {
} \
} while (0)
// memcpy is called during __asan_init() from the internals of printf(...).
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
if (asan_init_is_running) { \
return REAL(memcpy)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
if (to != from) { \
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return REAL(memcpy)(to, from, size); \
} while (0)
// memset is called inside Printf.
#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
if (asan_init_is_running) { \
return REAL(memset)(block, c, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_WRITE_RANGE(ctx, block, size); \
} \
return REAL(memset)(block, c, size); \
} while (0)
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return internal_memmove(to, from, size); \
} while (0)
#define ASAN_READ_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
@ -198,10 +243,25 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} else { \
*begin = *end = 0; \
}
// Asan needs custom handling of these:
#undef SANITIZER_INTERCEPT_MEMSET
#undef SANITIZER_INTERCEPT_MEMMOVE
#undef SANITIZER_INTERCEPT_MEMCPY
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
do { \
ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
} while (false)
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
do { \
ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
ASAN_MEMCPY_IMPL(ctx, to, from, size); \
} while (false)
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
do { \
ASAN_INTERCEPTOR_ENTER(ctx, memset); \
ASAN_MEMSET_IMPL(ctx, block, c, size); \
} while (false)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
@ -389,90 +449,18 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
}
#endif
// memcpy is called during __asan_init() from the internals of printf(...).
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
if (asan_init_is_running) { \
return REAL(memcpy)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
if (to != from) { \
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return REAL(memcpy)(to, from, size); \
} while (0)
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
// memset is called inside Printf.
#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
if (asan_init_is_running) { \
return REAL(memset)(block, c, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_WRITE_RANGE(ctx, block, size); \
} \
return REAL(memset)(block, c, size); \
} while (0)
void *__asan_memset(void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
if (UNLIKELY(!asan_inited)) \
return internal_memmove(to, from, size); \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return internal_memmove(to, from, size); \
} while (0)
void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memmove);
ASAN_MEMMOVE_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
#if !SANITIZER_MAC
ASAN_MEMCPY_IMPL(ctx, to, from, size);
#else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove()
// calls. If we just disable error reporting with
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362.
ASAN_MEMMOVE_IMPL(ctx, to, from, size);
#endif // !SANITIZER_MAC
}
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memset);
ASAN_MEMSET_IMPL(ctx, block, c, size);
}
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
@ -720,19 +708,10 @@ INTERCEPTOR(int, fork, void) {
namespace __asan {
void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
// Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcpy);
ASAN_INTERCEPT_FUNC(memset);
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
// In asan, REAL(memmove) is not used, but it is used in msan.
ASAN_INTERCEPT_FUNC(memmove);
}
CHECK(REAL(memcpy));
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT

View File

@ -23,6 +23,8 @@
#include "asan_init_version.h"
using __sanitizer::uptr;
using __sanitizer::u64;
using __sanitizer::u32;
extern "C" {
// This function should be called at the very beginning of the process,
@ -79,6 +81,20 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
// Sets bytes of the given range of the shadow memory into specific value.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_00(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f1(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f2(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f3(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f5(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f8(uptr addr, uptr size);
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
@ -156,6 +172,9 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;

View File

@ -36,7 +36,7 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
# if SANITIZER_IOS || SANITIZER_ANDROID
# define ASAN_LOW_MEMORY 1
# else
# define ASAN_LOW_MEMORY 0
@ -65,6 +65,9 @@ void AsanInitFromRtl();
// asan_win.cc
void InitializePlatformExceptionHandlers();
// asan_win.cc / asan_posix.cc
const char *DescribeSignalOrException(int signo);
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
@ -100,17 +103,6 @@ void *AsanDlSymNext(const char *sym);
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
(PlatformHasDifferentMemcpyAndMemmove())
#elif SANITIZER_WINDOWS64
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
#else
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // SANITIZER_MAC
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
@ -132,12 +124,10 @@ extern bool asan_init_is_running;
extern void (*death_callback)(void);
// These magic values are written to shadow for better error reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
const int kAsanHeapRightRedzoneMagic = 0xfb;
const int kAsanHeapFreeMagic = 0xfd;
const int kAsanStackLeftRedzoneMagic = 0xf1;
const int kAsanStackMidRedzoneMagic = 0xf2;
const int kAsanStackRightRedzoneMagic = 0xf3;
const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7;

View File

@ -49,15 +49,6 @@ namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
// into memmove$VARIANT$sse42.
// See also https://github.com/google/sanitizers/issues/34.
// TODO(glider): need to check dynamically that memcpy() and memmove() are
// actually the same function.
return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
}
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;

View File

@ -78,7 +78,13 @@ INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr = asan_malloc(size, &stack);
void *new_ptr;
if (UNLIKELY(!asan_inited)) {
new_ptr = AllocateFromLocalPool(size);
} else {
copy_size = size;
new_ptr = asan_malloc(copy_size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}

View File

@ -124,6 +124,11 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
return realloc(p, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_recalloc_base(void *p, size_t n, size_t elem_size) {
return _recalloc(p, n, elem_size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(const void *ptr) {
GET_CURRENT_PC_BP_SP;
@ -223,6 +228,7 @@ void ReplaceSystemMalloc() {
TryToOverrideFunction("_realloc_base", (uptr)realloc);
TryToOverrideFunction("_realloc_crt", (uptr)realloc);
TryToOverrideFunction("_recalloc", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
TryToOverrideFunction("_msize", (uptr)_msize);
TryToOverrideFunction("_expand", (uptr)_expand);

View File

@ -125,6 +125,7 @@
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
@ -140,7 +141,6 @@ static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
#define SHADOW_SCALE kDefaultShadowScale
@ -168,7 +168,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# if SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset64
# else
# define SHADOW_OFFSET kIosShadowOffset64
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# endif
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
@ -183,7 +183,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
# elif SANITIZER_WINDOWS64
# define SHADOW_OFFSET kWindowsShadowOffset64
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
@ -269,9 +269,25 @@ static inline bool AddrIsInMidMem(uptr a) {
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
}
static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
(flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
@ -295,21 +311,6 @@ static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
}
static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
return (a & (SHADOW_GRANULARITY - 1)) == 0;

View File

@ -32,9 +32,56 @@ struct AllocationSite {
class HeapProfile {
public:
HeapProfile() : allocations_(1024) {}
void ProcessChunk(const AsanChunkView& cv) {
if (cv.IsAllocated()) {
total_allocated_user_size_ += cv.UsedSize();
total_allocated_count_++;
u32 id = cv.GetAllocStackId();
if (id)
Insert(id, cv.UsedSize());
} else if (cv.IsQuarantined()) {
total_quarantined_user_size_ += cv.UsedSize();
total_quarantined_count_++;
} else {
total_other_count_++;
}
}
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
CHECK(total_allocated_user_size_);
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
"%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
"showing top %zd%%\n",
total_allocated_user_size_, total_allocated_count_,
total_quarantined_user_size_, total_quarantined_count_,
total_other_count_, total_allocated_count_ +
total_quarantined_count_ + total_other_count_, top_percent);
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_user_size_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_user_size_ > top_percent)
break;
}
}
private:
uptr total_allocated_user_size_ = 0;
uptr total_allocated_count_ = 0;
uptr total_quarantined_user_size_ = 0;
uptr total_quarantined_count_ = 0;
uptr total_other_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
void Insert(u32 id, uptr size) {
total_allocated_ += size;
total_count_++;
// Linear lookup will be good enough for most cases (although not all).
for (uptr i = 0; i < allocations_.size(); i++) {
if (allocations_[i].id == id) {
@ -45,40 +92,11 @@ class HeapProfile {
}
allocations_.push_back({id, size, 1});
}
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
CHECK(total_allocated_);
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_ > top_percent)
break;
}
}
private:
uptr total_allocated_ = 0;
uptr total_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
};
static void ChunkCallback(uptr chunk, void *arg) {
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
AsanChunkView cv = FindHeapChunkByAddress(chunk);
if (!cv.IsAllocated()) return;
u32 id = cv.GetAllocStackId();
if (!id) return;
hp->Insert(id, cv.UsedSize());
reinterpret_cast<HeapProfile*>(arg)->ProcessChunk(
FindHeapChunkByAllocBeg(chunk));
}
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,

View File

@ -45,26 +45,6 @@
using namespace __asan; // NOLINT
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
} // namespace std
#define OPERATOR_NEW_BODY(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// runtime dylib, and the main executable will depend on both the runtime
// dylib and libstdc++, each of those'll have its implementation of new and
// delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
// FreeBSD prior v9.2 have wrong definition of 'size_t'.
// http://svnweb.freebsd.org/base?view=revision&revision=232261
#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
@ -74,6 +54,30 @@ struct nothrow_t {};
#endif // __FreeBSD_version
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.
// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
namespace std {
struct nothrow_t {};
enum class align_val_t: size_t {};
} // namespace std
#define OPERATOR_NEW_BODY(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
#define OPERATOR_NEW_BODY_ALIGN(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign((uptr)align, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// runtime dylib, and the main executable will depend on both the runtime
// dylib and libstdc++, each of those'll have its implementation of new and
// delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
@ -84,6 +88,18 @@ void *operator new(size_t size, std::nothrow_t const&)
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size) {
@ -131,6 +147,32 @@ void operator delete[](void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {

View File

@ -64,12 +64,9 @@ struct ShadowSegmentEndpoint {
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
@ -117,9 +114,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
CHECK(value == end.value);
CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
@ -131,7 +128,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
CHECK(beg.chunk < end.chunk);
CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
@ -157,9 +154,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
CHECK(value == end.value);
CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
@ -167,7 +164,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
CHECK(beg.chunk < end.chunk);
CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
@ -314,6 +311,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
void __asan_set_shadow_00(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0, size);
}
void __asan_set_shadow_f1(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf1, size);
}
void __asan_set_shadow_f2(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf2, size);
}
void __asan_set_shadow_f3(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf3, size);
}
void __asan_set_shadow_f5(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf5, size);
}
void __asan_set_shadow_f8(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf8, size);
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
@ -388,7 +409,7 @@ const void *__sanitizer_contiguous_container_find_bad_address(
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
uptr r1_end = Min(end + kMaxRangeToCheck, mid);
uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);

View File

@ -86,8 +86,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
// Calls __sanitizer::FlushUnneededShadowMemory() on
// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
// Calls __sanitizer::ReleaseMemoryPagesToOS() on
// [MemToShadow(p), MemToShadow(p+size)].
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan

View File

@ -33,6 +33,19 @@
namespace __asan {
const char *DescribeSignalOrException(int signo) {
switch (signo) {
case SIGFPE:
return "FPE";
case SIGILL:
return "ILL";
case SIGABRT:
return "ABRT";
default:
return "SEGV";
}
}
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
@ -84,12 +97,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(sig);
else if (signo == SIGFPE)
ReportDeadlySignal("FPE", sig);
else if (signo == SIGILL)
ReportDeadlySignal("ILL", sig);
else
ReportDeadlySignal("SEGV", sig);
ReportDeadlySignal(signo, sig);
}
// ---------------------- TSD ---------------- {{{1

File diff suppressed because it is too large Load Diff

View File

@ -25,35 +25,29 @@ struct StackVarDescr {
uptr name_len;
};
struct AddressDescription {
char *name;
uptr name_size;
uptr region_address;
uptr region_size;
const char *region_kind;
};
// Returns the number of globals close to the provided address and copies
// them to "globals" array.
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
int max_globals);
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
const char *MaybeDemangleGlobalName(const char *name);
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after = "\n");
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size);
bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
bool print = true);
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
void ReportStackOverflow(const SignalContext &sig);
void ReportDeadlySignal(const char *description, const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
void ReportDeadlySignal(int signo, const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);

View File

@ -32,6 +32,7 @@
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
@ -263,6 +264,7 @@ static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
// __asan_report_* functions are noreturn, so we need a switch to prevent
// the compiler from removing any of them.
// clang-format off
switch (fake_condition) {
case 1: __asan_report_load1(0); break;
case 2: __asan_report_load2(0); break;
@ -302,7 +304,14 @@ static NOINLINE void force_interface_symbols() {
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
case 40: __asan_set_shadow_00(0, 0); break;
case 41: __asan_set_shadow_f1(0, 0); break;
case 42: __asan_set_shadow_f2(0, 0); break;
case 43: __asan_set_shadow_f3(0, 0); break;
case 44: __asan_set_shadow_f5(0, 0); break;
case 45: __asan_set_shadow_f8(0, 0); break;
}
// clang-format on
}
static void asan_atexit() {
@ -326,8 +335,21 @@ static void InitializeHighMemEnd() {
}
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap)
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
// is actually using this memory. Which means it needs a shadow...
uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
uptr GapShadowEnd =
RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
if (Verbosity())
Printf("protect_shadow_gap=0:"
" not protecting shadow gap, allocating gap's shadow\n"
"|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
GapShadowEnd);
ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
"unprotected gap shadow");
return;
}
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
@ -388,6 +410,8 @@ static void PrintAddressSpaceLayout() {
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
Printf("thread_local_quarantine_size_kb=%zuK\n",
(uptr)flags()->thread_local_quarantine_size_kb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@ -401,6 +425,79 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
static void InitializeShadowMemory() {
// Set the shadow memory address to uninitialized.
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
uptr shadow_start = kLowShadowBeg;
// Detect if a dynamic shadow address must used and find a available location
// when necessary. When dynamic address is used, the macro |kLowShadowBeg|
// expands to |__asan_shadow_memory_dynamic_address| which is
// |kDefaultShadowSentinel|.
if (shadow_start == kDefaultShadowSentinel) {
__asan_shadow_memory_dynamic_address = 0;
CHECK_EQ(0, kLowShadowBeg);
uptr granularity = GetMmapGranularity();
uptr alignment = 8 * granularity;
uptr left_padding = granularity;
uptr space_size = kHighShadowEnd + left_padding;
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment));
}
// Update the shadow memory address (potentially) used by instrumentation.
__asan_shadow_memory_dynamic_address = shadow_start;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
!ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#endif
if (Verbosity()) PrintAddressSpaceLayout();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
}
static void AsanInitInternal() {
if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
@ -434,7 +531,6 @@ static void AsanInitInternal() {
__sanitizer_set_report_path(common_flags()->log_path);
// Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
@ -453,61 +549,9 @@ static void AsanInitInternal() {
ReplaceSystemMalloc();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
!ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#elif SANITIZER_WINDOWS64
// Disable the "mid mem" shadow layout.
if (!full_shadow_is_available) {
kMidMemBeg = 0;
kMidMemEnd = 0;
}
#endif
if (Verbosity()) PrintAddressSpaceLayout();
DisableCoreDumperIfNecessary();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
InitializeShadowMemory();
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnDeadlySignal);
@ -599,6 +643,9 @@ static AsanInitializer asan_initializer;
using namespace __asan; // NOLINT
void NOINLINE __asan_handle_no_return() {
if (asan_init_is_running)
return;
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();

View File

@ -34,10 +34,10 @@
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
class ScarinessScore {
public:
ScarinessScore() {
struct ScarinessScoreBase {
void Clear() {
descr[0] = 0;
score = 0;
}
void Scare(int add_to_score, const char *reason) {
if (descr[0])
@ -52,16 +52,23 @@ class ScarinessScore {
Printf("SCARINESS: %d (%s)\n", score, descr);
}
static void PrintSimple(int score, const char *descr) {
ScarinessScore SS;
SS.Scare(score, descr);
SS.Print();
ScarinessScoreBase SSB;
SSB.Clear();
SSB.Scare(score, descr);
SSB.Print();
}
private:
int score = 0;
int score;
char descr[1024];
};
struct ScarinessScore : ScarinessScoreBase {
ScarinessScore() {
Clear();
}
};
} // namespace __asan
#endif // ASAN_SCARINESS_SCORE_H

View File

@ -141,7 +141,9 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
current_fake_stack->Destroy(this->tid());
}
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
uptr *bottom_old,
uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
Die();
@ -152,6 +154,10 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
fake_stack_ = fake_stack_save;
}
if (bottom_old)
*bottom_old = stack_bottom_;
if (size_old)
*size_old = stack_top_ - stack_bottom_;
stack_bottom_ = next_stack_bottom_;
stack_top_ = next_stack_top_;
atomic_store(&stack_switching_, 0, memory_order_release);
@ -345,7 +351,7 @@ AsanThread *GetCurrentThread() {
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
if (ThreadStackContainsAddress(tctx, &context)) {
if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
@ -447,12 +453,16 @@ void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_finish_switch_fiber(void* fakestack) {
void __sanitizer_finish_switch_fiber(void* fakestack,
const void **bottom_old,
uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
t->FinishSwitchFiber((FakeStack*)fakestack);
t->FinishSwitchFiber((FakeStack*)fakestack,
(uptr*)bottom_old,
(uptr*)size_old);
}
}

View File

@ -94,7 +94,8 @@ class AsanThread {
}
void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
void FinishSwitchFiber(FakeStack *fake_stack_save);
void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
uptr *size_old);
bool has_fake_stack() {
return !atomic_load(&stack_switching_, memory_order_relaxed) &&

View File

@ -19,6 +19,7 @@
#include <stdlib.h>
#include "asan_globals_win.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_report.h"
@ -37,7 +38,13 @@ int __asan_should_detect_stack_use_after_return() {
return __asan_option_detect_stack_use_after_return;
}
// -------------------- A workaround for the abscence of weak symbols ----- {{{
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_shadow_memory_dynamic_address() {
__asan_init();
return __asan_shadow_memory_dynamic_address;
}
// -------------------- A workaround for the absence of weak symbols ----- {{{
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
// use the /alternatename directive to tell the linker to default a specific
// symbol to a specific value, which works nicely for allocator hooks and
@ -64,14 +71,22 @@ void __asan_default_on_error() {}
// }}}
} // extern "C"
// ---------------------- Windows-specific inteceptors ---------------- {{{
// ---------------------- Windows-specific interceptors ---------------- {{{
INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
CHECK(REAL(RtlRaiseException));
// This is a noreturn function, unless it's one of the exceptions raised to
// communicate with the debugger, such as the one from OutputDebugString.
if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C)
__asan_handle_no_return();
REAL(RtlRaiseException)(ExceptionRecord);
}
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
CHECK(REAL(RaiseException));
__asan_handle_no_return();
REAL(RaiseException)(a, b, c, d);
}
#ifdef _WIN64
INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT
@ -123,44 +138,12 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
asan_thread_start, t, thr_flags, tid);
}
namespace {
BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
void EnsureWorkerThreadRegistered() {
// FIXME: GetCurrentThread relies on TSD, which might not play well with
// system thread pools. We might want to use something like reference
// counting to zero out GetCurrentThread() underlying storage when the last
// work item finishes? Or can we disable reclaiming of threads in the pool?
BlockingMutexLock l(&mu_for_thread_tracking);
if (__asan::GetCurrentThread())
return;
AsanThread *t = AsanThread::Create(
/* start_routine */ nullptr, /* arg */ nullptr,
/* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
t->Init();
asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t);
}
} // namespace
INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
// NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
// query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
// System worker pool threads are created at arbitraty point in time and
// without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
// instead and don't register a specific parent_tid/stack.
EnsureWorkerThreadRegistered();
return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
}
// }}}
namespace __asan {
void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
ASAN_INTERCEPT_FUNC(RaiseException);
#ifdef _WIN64
ASAN_INTERCEPT_FUNC(__C_specific_handler);
@ -169,11 +152,15 @@ void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(_except_handler4);
#endif
// NtWaitForWorkViaWorkerFactory is always linked dynamically.
CHECK(::__interception::OverrideFunction(
"NtWaitForWorkViaWorkerFactory",
(uptr)WRAP(NtWaitForWorkViaWorkerFactory),
(uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
// Try to intercept kernel32!RaiseException, and if that fails, intercept
// ntdll!RtlRaiseException instead.
if (!::__interception::OverrideFunction("RaiseException",
(uptr)WRAP(RaiseException),
(uptr *)&REAL(RaiseException))) {
CHECK(::__interception::OverrideFunction("RtlRaiseException",
(uptr)WRAP(RtlRaiseException),
(uptr *)&REAL(RtlRaiseException)));
}
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
@ -229,8 +216,7 @@ void AsanOnDeadlySignal(int, void *siginfo, void *context) {
// Exception handler for dealing with shadow memory.
static LONG CALLBACK
ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
static uptr page_size = GetPageSizeCached();
static uptr alloc_granularity = GetMmapGranularity();
uptr page_size = GetPageSizeCached();
// Only handle access violations.
if (exception_pointers->ExceptionRecord->ExceptionCode !=
EXCEPTION_ACCESS_VIOLATION) {
@ -276,22 +262,57 @@ void InitializePlatformExceptionHandlers() {
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
// Check based on flags if we should report this exception.
static bool ShouldReportDeadlyException(unsigned code) {
switch (code) {
case EXCEPTION_ACCESS_VIOLATION:
case EXCEPTION_IN_PAGE_ERROR:
return common_flags()->handle_segv;
case EXCEPTION_BREAKPOINT:
case EXCEPTION_ILLEGAL_INSTRUCTION: {
return common_flags()->handle_sigill;
}
}
return false;
}
// Return the textual name for this exception.
const char *DescribeSignalOrException(int signo) {
unsigned code = signo;
// Get the string description of the exception if this is a known deadly
// exception.
switch (code) {
case EXCEPTION_ACCESS_VIOLATION:
return "access-violation";
case EXCEPTION_IN_PAGE_ERROR:
return "in-page-error";
case EXCEPTION_BREAKPOINT:
return "breakpoint";
case EXCEPTION_ILLEGAL_INSTRUCTION:
return "illegal-instruction";
}
return nullptr;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
CONTEXT *context = info->ContextRecord;
if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
const char *description =
(exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
? "access-violation"
: "in-page-error";
SignalContext sig = SignalContext::Create(exception_record, context);
ReportDeadlySignal(description, sig);
}
// Continue the search if the signal wasn't deadly.
if (!ShouldReportDeadlyException(exception_record->ExceptionCode))
return EXCEPTION_CONTINUE_SEARCH;
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
SignalContext sig = SignalContext::Create(exception_record, context);
ReportDeadlySignal(exception_record->ExceptionCode, sig);
UNREACHABLE("returned from reporting deadly signal");
}
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
__asan_unhandled_exception_filter(info);
// Bubble out to the default exception filter.
return default_seh_handler(info);
}
@ -331,10 +352,25 @@ int __asan_set_seh_filter() {
// immediately after the CRT runs. This way, our exception filter is called
// first and we can delegate to their filter if appropriate.
#pragma section(".CRT$XCAB", long, read) // NOLINT
__declspec(allocate(".CRT$XCAB"))
int (*__intercept_seh)() = __asan_set_seh_filter;
__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() =
__asan_set_seh_filter;
// Piggyback on the TLS initialization callback directory to initialize asan as
// early as possible. Initializers in .CRT$XL* are called directly by ntdll,
// which run before the CRT. Users also add code to .CRT$XLC, so it's important
// to run our initializers first.
static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) {
if (reason == DLL_PROCESS_ATTACH) __asan_init();
}
#pragma section(".CRT$XLAB", long, read) // NOLINT
__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
#endif
ASAN_LINK_GLOBALS_WIN()
// }}}
} // namespace __asan
#endif // _WIN32
#endif // SANITIZER_WINDOWS

View File

@ -15,21 +15,30 @@
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dll_thunk.lib
// Only compile this code when building asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "asan_init_version.h"
#include "asan_globals_win.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#ifdef _M_IX86
#define WINAPI __stdcall
#else
#define WINAPI
#endif
// ---------- Function interception helper functions and macros ----------- {{{1
extern "C" {
void *__stdcall GetModuleHandleA(const char *module_name);
void *__stdcall GetProcAddress(void *module, const char *proc_name);
void *WINAPI GetModuleHandleA(const char *module_name);
void *WINAPI GetProcAddress(void *module, const char *proc_name);
void abort();
}
using namespace __sanitizer;
static uptr getRealProcAddressOrDie(const char *name) {
uptr ret =
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
@ -105,7 +114,7 @@ static void InterceptHooks();
// ---------- Function wrapping helpers ----------------------------------- {{{1
#define WRAP_V_V(name) \
extern "C" void name() { \
typedef void (*fntype)(); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(); \
} \
@ -113,7 +122,7 @@ static void InterceptHooks();
#define WRAP_V_W(name) \
extern "C" void name(void *arg) { \
typedef void (*fntype)(void *arg); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg); \
} \
@ -121,7 +130,7 @@ static void InterceptHooks();
#define WRAP_V_WW(name) \
extern "C" void name(void *arg1, void *arg2) { \
typedef void (*fntype)(void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2); \
} \
@ -129,7 +138,7 @@ static void InterceptHooks();
#define WRAP_V_WWW(name) \
extern "C" void name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2, arg3); \
} \
@ -137,7 +146,7 @@ static void InterceptHooks();
#define WRAP_W_V(name) \
extern "C" void *name() { \
typedef void *(*fntype)(); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(); \
} \
@ -145,7 +154,7 @@ static void InterceptHooks();
#define WRAP_W_W(name) \
extern "C" void *name(void *arg) { \
typedef void *(*fntype)(void *arg); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg); \
} \
@ -153,7 +162,7 @@ static void InterceptHooks();
#define WRAP_W_WW(name) \
extern "C" void *name(void *arg1, void *arg2) { \
typedef void *(*fntype)(void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2); \
} \
@ -161,7 +170,7 @@ static void InterceptHooks();
#define WRAP_W_WWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3); \
} \
@ -169,7 +178,7 @@ static void InterceptHooks();
#define WRAP_W_WWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
typedef void *(*fntype)(void *, void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4); \
} \
@ -178,7 +187,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5); \
} \
@ -187,7 +196,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5, void *arg6) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
} \
@ -198,9 +207,11 @@ static void InterceptHooks();
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
extern "C" {
int __asan_option_detect_stack_use_after_return;
uptr __asan_shadow_memory_dynamic_address;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
@ -214,7 +225,8 @@ extern "C" {
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
__asan_shadow_memory_dynamic_address =
(uptr)__asan_get_shadow_memory_dynamic_address();
InterceptHooks();
}
}
@ -224,6 +236,7 @@ extern "C" void __asan_version_mismatch_check() {
}
INTERFACE_FUNCTION(__asan_handle_no_return)
INTERFACE_FUNCTION(__asan_unhandled_exception_filter)
INTERFACE_FUNCTION(__asan_report_store1)
INTERFACE_FUNCTION(__asan_report_store2)
@ -257,6 +270,13 @@ INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
INTERFACE_FUNCTION(__asan_set_shadow_00);
INTERFACE_FUNCTION(__asan_set_shadow_f1);
INTERFACE_FUNCTION(__asan_set_shadow_f2);
INTERFACE_FUNCTION(__asan_set_shadow_f3);
INTERFACE_FUNCTION(__asan_set_shadow_f5);
INTERFACE_FUNCTION(__asan_set_shadow_f8);
INTERFACE_FUNCTION(__asan_alloca_poison);
INTERFACE_FUNCTION(__asan_allocas_unpoison);
@ -306,17 +326,18 @@ INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_cov)
INTERFACE_FUNCTION(__sanitizer_cov_dump)
INTERFACE_FUNCTION(__sanitizer_dump_coverage)
INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard)
INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
INTERFACE_FUNCTION(__sanitizer_get_coverage_pc_buffer)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@ -327,6 +348,8 @@ INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
@ -347,6 +370,7 @@ INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
@ -368,6 +392,7 @@ WRAP_W_WW(realloc)
WRAP_W_WW(_realloc_base)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_WWW(_recalloc_base)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
@ -444,4 +469,15 @@ static int call_asan_init() {
#pragma section(".CRT$XIB", long, read) // NOLINT
__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
static void WINAPI asan_thread_init(void *mod, unsigned long reason,
void *reserved) {
if (reason == /*DLL_PROCESS_ATTACH=*/1) __asan_init();
}
#pragma section(".CRT$XLAB", long, read) // NOLINT
__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
ASAN_LINK_GLOBALS_WIN()
#endif // ASAN_DLL_THUNK

View File

@ -1,4 +1,4 @@
//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@ -16,22 +16,25 @@
// This includes:
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
// - installing a custom SEH handlerx
// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
// Only compile this code when building asan_dynamic_runtime_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
#include "asan_globals_win.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
// First, declare CRT sections we'll be using in this file
#pragma section(".CRT$XIB", long, read) // NOLINT
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XCAB", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
#pragma section(".CRT$XLAB", long, read) // NOLINT
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
@ -42,14 +45,37 @@
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the a variable that is
// constant after initialization anyways.
// just to work around this issue, let's clone the variable that is constant
// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return;
__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
void* __asan_shadow_memory_dynamic_address;
}
static int InitializeClonedVariables() {
__asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
__asan_shadow_memory_dynamic_address =
__asan_get_shadow_memory_dynamic_address();
return 0;
}
static void NTAPI asan_thread_init(void *mod, unsigned long reason,
void *reserved) {
if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables();
}
// Our cloned variables must be initialized before C/C++ constructors. If TLS
// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
// initializer is needed as a backup.
__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() =
InitializeClonedVariables;
__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
// unload or on exit. ASan relies on LLVM global_dtors to call
@ -73,6 +99,7 @@ void UnregisterGlobals() {
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
} // namespace
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
@ -81,8 +108,6 @@ int ScheduleUnregisterGlobals() {
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
} // namespace
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
@ -97,4 +122,6 @@ __declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
}
ASAN_LINK_GLOBALS_WIN()
#endif // ASAN_DYNAMIC_RUNTIME_THUNK

View File

@ -15,16 +15,34 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_idivmod __rt_sdiv
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
#if __ARM_ARCH_ISA_THUMB == 1
push {r0, r1, lr}
bl SYMBOL_NAME(__divsi3)
pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
muls r2, r2, r0 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
#else
push { lr }
sub sp, sp, #4
mov r2, sp
#if defined(__MINGW32__)
mov r3, r0
mov r0, r1
mov r1, r3
#endif
bl SYMBOL_NAME(__divmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
#endif // __ARM_ARCH_ISA_THUMB == 1
END_COMPILERRT_FUNCTION(__aeabi_idivmod)
NO_EXEC_STACK_DIRECTIVE

View File

@ -16,6 +16,10 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_ldivmod __rt_sdiv64
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
#if defined(__MINGW32__)
mov r12, r0
mov r0, r2
mov r2, r12
mov r12, r1
mov r1, r3
mov r3, r12
#endif
bl SYMBOL_NAME(__divmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]

View File

@ -16,16 +16,40 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_uidivmod __rt_udiv
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
#if __ARM_ARCH_ISA_THUMB == 1
cmp r0, r1
bcc LOCAL_LABEL(case_denom_larger)
push {r0, r1, lr}
bl SYMBOL_NAME(__aeabi_uidiv)
pop {r1, r2, r3}
muls r2, r2, r0 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
LOCAL_LABEL(case_denom_larger):
movs r1, r0
movs r0, #0
JMP (lr)
#else
push { lr }
sub sp, sp, #4
mov r2, sp
#if defined(__MINGW32__)
mov r3, r0
mov r0, r1
mov r1, r3
#endif
bl SYMBOL_NAME(__udivmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
#endif
END_COMPILERRT_FUNCTION(__aeabi_uidivmod)
NO_EXEC_STACK_DIRECTIVE

View File

@ -16,6 +16,10 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_uldivmod __rt_udiv64
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
#if defined(__MINGW32__)
mov r12, r0
mov r0, r2
mov r2, r12
mov r12, r1
mov r1, r3
mov r3, r12
#endif
bl SYMBOL_NAME(__udivmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]

View File

@ -39,6 +39,9 @@
#include "../assembly.h"
.syntax unified
#if __ARM_ARCH_ISA_THUMB == 2
.thumb
#endif
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2)

View File

@ -49,17 +49,37 @@ LOCAL_LABEL(divzero):
#else
ESTABLISH_FRAME
// Set aside the sign of the quotient.
# if __ARM_ARCH_ISA_THUMB == 1
movs r4, r0
eors r4, r1
# else
eor r4, r0, r1
# endif
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
# if __ARM_ARCH_ISA_THUMB == 1
asrs r2, r0, #31
asrs r3, r1, #31
eors r0, r2
eors r1, r3
subs r0, r0, r2
subs r1, r1, r3
# else
eor r2, r0, r0, asr #31
eor r3, r1, r1, asr #31
sub r0, r2, r0, asr #31
sub r1, r3, r1, asr #31
# endif
// abs(a) / abs(b)
bl SYMBOL_NAME(__udivsi3)
// Apply sign of quotient to result and return.
# if __ARM_ARCH_ISA_THUMB == 1
asrs r4, #31
eors r0, r4
subs r0, r0, r4
# else
eor r0, r0, r4, asr #31
sub r0, r0, r4, asr #31
# endif
CLEAR_FRAME_AND_RETURN
#endif
END_COMPILERRT_FUNCTION(__divsi3)

View File

@ -40,12 +40,26 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
#else
cmp r1, #1
bcc LOCAL_LABEL(divby0)
#if __ARM_ARCH_ISA_THUMB == 1
bne LOCAL_LABEL(num_neq_denom)
JMP(lr)
LOCAL_LABEL(num_neq_denom):
#else
IT(eq)
JMPc(lr, eq)
#endif
cmp r0, r1
#if __ARM_ARCH_ISA_THUMB == 1
bhs LOCAL_LABEL(num_ge_denom)
movs r0, #0
JMP(lr)
LOCAL_LABEL(num_ge_denom):
#else
ITT(cc)
movcc r0, #0
JMPc(lr, cc)
#endif
/*
* Implement division using binary long division algorithm.
*
@ -62,7 +76,7 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
* that (r0 << shift) < 2 * r1. The quotient is stored in r3.
*/
# ifdef __ARM_FEATURE_CLZ
# if defined(__ARM_FEATURE_CLZ)
clz ip, r0
clz r3, r1
/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
@ -77,49 +91,128 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
sub ip, ip, r3, lsl #3
mov r3, #0
bx ip
# else
# else /* No CLZ Feature */
# if __ARM_ARCH_ISA_THUMB == 2
# error THUMB mode requires CLZ or UDIV
# endif
# if __ARM_ARCH_ISA_THUMB == 1
# define BLOCK_SIZE 10
# else
# define BLOCK_SIZE 12
# endif
mov r2, r0
# if __ARM_ARCH_ISA_THUMB == 1
mov ip, r0
adr r0, LOCAL_LABEL(div0block)
adds r0, #1
# else
adr ip, LOCAL_LABEL(div0block)
lsr r3, r2, #16
# endif
lsrs r3, r2, #16
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_16)
movs r2, r3
subs r0, r0, #(16 * BLOCK_SIZE)
LOCAL_LABEL(skip_16):
# else
movhs r2, r3
subhs ip, ip, #(16 * 12)
subhs ip, ip, #(16 * BLOCK_SIZE)
# endif
lsr r3, r2, #8
lsrs r3, r2, #8
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_8)
movs r2, r3
subs r0, r0, #(8 * BLOCK_SIZE)
LOCAL_LABEL(skip_8):
# else
movhs r2, r3
subhs ip, ip, #(8 * 12)
subhs ip, ip, #(8 * BLOCK_SIZE)
# endif
lsr r3, r2, #4
lsrs r3, r2, #4
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_4)
movs r2, r3
subs r0, r0, #(4 * BLOCK_SIZE)
LOCAL_LABEL(skip_4):
# else
movhs r2, r3
subhs ip, #(4 * 12)
subhs ip, #(4 * BLOCK_SIZE)
# endif
lsr r3, r2, #2
lsrs r3, r2, #2
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_2)
movs r2, r3
subs r0, r0, #(2 * BLOCK_SIZE)
LOCAL_LABEL(skip_2):
# else
movhs r2, r3
subhs ip, ip, #(2 * 12)
subhs ip, ip, #(2 * BLOCK_SIZE)
# endif
/* Last block, no need to update r2 or r3. */
cmp r1, r2, lsr #1
subls ip, ip, #(1 * 12)
# if __ARM_ARCH_ISA_THUMB == 1
lsrs r3, r2, #1
cmp r3, r1
blo LOCAL_LABEL(skip_1)
subs r0, r0, #(1 * BLOCK_SIZE)
LOCAL_LABEL(skip_1):
movs r2, r0
mov r0, ip
movs r3, #0
JMP (r2)
mov r3, #0
# else
cmp r1, r2, lsr #1
subls ip, ip, #(1 * BLOCK_SIZE)
movs r3, #0
JMP(ip)
# endif
# endif
# endif /* __ARM_FEATURE_CLZ */
#define IMM #
/* due to the range limit of branch in Thumb1, we have to place the
block closer */
LOCAL_LABEL(divby0):
movs r0, #0
# if defined(__ARM_EABI__)
bl __aeabi_idiv0 // due to relocation limit, can't use b.
# endif
JMP(lr)
#if __ARM_ARCH_ISA_THUMB == 1
#define block(shift) \
lsls r2, r1, IMM shift; \
cmp r0, r2; \
blo LOCAL_LABEL(block_skip_##shift); \
subs r0, r0, r2; \
LOCAL_LABEL(block_skip_##shift) :; \
adcs r3, r3 /* same as ((r3 << 1) | Carry). Carry is set if r0 >= r2. */
/* TODO: if current location counter is not not word aligned, we don't
need the .p2align and nop */
/* Label div0block must be word-aligned. First align block 31 */
.p2align 2
nop /* Padding to align div0block as 31 blocks = 310 bytes */
#else
#define block(shift) \
cmp r0, r1, lsl IMM shift; \
ITT(hs); \
WIDE(addhs) r3, r3, IMM (1 << shift); \
WIDE(subhs) r0, r0, r1, lsl IMM shift
#endif
block(31)
block(30)
@ -159,12 +252,14 @@ LOCAL_LABEL(div0block):
JMP(lr)
#endif /* __ARM_ARCH_EXT_IDIV__ */
#if __ARM_ARCH_EXT_IDIV__
LOCAL_LABEL(divby0):
mov r0, #0
#ifdef __ARM_EABI__
b __aeabi_idiv0
#else
JMP(lr)
mov r0, #0
# ifdef __ARM_EABI__
b __aeabi_idiv0
# else
JMP(lr)
# endif
#endif
END_COMPILERRT_FUNCTION(__udivsi3)

View File

@ -70,7 +70,7 @@
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
#if !defined(__ARM_FEATURE_CLZ) && \
#if !defined(__ARM_FEATURE_CLZ) && __ARM_ARCH_ISA_THUMB != 1 && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
@ -149,6 +149,7 @@
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
#if defined(__ARM_EABI__)

View File

@ -229,13 +229,20 @@ void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
// Where the size is known at compile time, the compiler may emit calls to
// specialised versions of the above functions.
////////////////////////////////////////////////////////////////////////////////
#ifdef __SIZEOF_INT128__
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
/* FIXME: __uint128_t isn't available on 32 bit platforms.
OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
#else
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
#endif
#define OPTIMISED_CASE(n, lockfree, type)\
type __atomic_load_##n(type *src, int model) {\

View File

@ -110,10 +110,12 @@ void __clear_cache(void *start, void *end) {
#elif defined(__linux__)
register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
const register int flags __asm("r2") = 0;
const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
__asm __volatile("svc 0x0"
: "=r"(start_reg)
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg));
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg),
"r"(flags));
if (start_reg != 0) {
compilerrt_abort();
}

View File

@ -0,0 +1,82 @@
//===-- lib/floattitf.c - int128 -> quad-precision conversion -----*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements ti_int to quad-precision conversion for the
// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
// mode.
//
//===----------------------------------------------------------------------===//
#define QUAD_PRECISION
#include "fp_lib.h"
#include "int_lib.h"
/* Returns: convert a ti_int to a fp_t, rounding toward even. */
/* Assumption: fp_t is a IEEE 128 bit floating point type
* ti_int is a 128 bit integral type
*/
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm |
* mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
*/
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t
__floattitf(ti_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
const ti_int s = a >> (N-1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > LDBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
* 1 = msb 1 bit
* P = bit LDBL_MANT_DIG-1 bits to the right of 1
* Q = bit LDBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd) {
case LDBL_MANT_DIG + 1:
a <<= 1;
break;
case LDBL_MANT_DIG + 2:
break;
default:
a = ((tu_int)a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to LDBL_MANT_DIG bits */
} else {
a <<= (LDBL_MANT_DIG - sd);
/* a is now rounded to LDBL_MANT_DIG bits */
}
long_double_bits fb;
fb.u.high.all = (s & 0x8000000000000000LL) /* sign */
| (du_int)(e + 16383) << 48 /* exponent */
| ((a >> 64) & 0x0000ffffffffffffLL); /* significand */
fb.u.low.all = (du_int)(a);
return fb.f;
}
#endif

View File

@ -0,0 +1,79 @@
//===-- lib/floatuntitf.c - uint128 -> quad-precision conversion --*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements tu_int to quad-precision conversion for the
// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
// mode.
//
//===----------------------------------------------------------------------===//
#define QUAD_PRECISION
#include "fp_lib.h"
#include "int_lib.h"
/* Returns: convert a tu_int to a fp_t, rounding toward even. */
/* Assumption: fp_t is a IEEE 128 bit floating point type
* tu_int is a 128 bit integral type
*/
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm |
* mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
*/
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t
__floatuntitf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > LDBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
* 1 = msb 1 bit
* P = bit LDBL_MANT_DIG-1 bits to the right of 1
* Q = bit LDBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd) {
case LDBL_MANT_DIG + 1:
a <<= 1;
break;
case LDBL_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to LDBL_MANT_DIG bits */
} else {
a <<= (LDBL_MANT_DIG - sd);
/* a is now rounded to LDBL_MANT_DIG bits */
}
long_double_bits fb;
fb.u.high.all = (du_int)(e + 16383) << 48 /* exponent */
| ((a >> 64) & 0x0000ffffffffffffLL); /* significand */
fb.u.low.all = (du_int)(a);
return fb.f;
}
#endif

View File

@ -121,14 +121,14 @@ COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
#include <intrin.h>
uint32_t __inline __builtin_ctz(uint32_t value) {
uint32_t trailing_zero = 0;
unsigned long trailing_zero = 0;
if (_BitScanForward(&trailing_zero, value))
return trailing_zero;
return 32;
}
uint32_t __inline __builtin_clz(uint32_t value) {
uint32_t leading_zero = 0;
unsigned long leading_zero = 0;
if (_BitScanReverse(&leading_zero, value))
return 31 - leading_zero;
return 32;
@ -136,7 +136,7 @@ uint32_t __inline __builtin_clz(uint32_t value) {
#if defined(_M_ARM) || defined(_M_X64)
uint32_t __inline __builtin_clzll(uint64_t value) {
uint32_t leading_zero = 0;
unsigned long leading_zero = 0;
if (_BitScanReverse64(&leading_zero, value))
return 63 - leading_zero;
return 64;

View File

@ -0,0 +1,36 @@
/* ===-- mingw_fixfloat.c - Wrap int/float conversions for arm/windows -----===
*
* The LLVM Compiler Infrastructure
*
* This file is dual licensed under the MIT and the University of Illinois Open
* Source Licenses. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
COMPILER_RT_ABI di_int __fixdfdi(double a);
COMPILER_RT_ABI di_int __fixsfdi(float a);
COMPILER_RT_ABI du_int __fixunsdfdi(double a);
COMPILER_RT_ABI du_int __fixunssfdi(float a);
COMPILER_RT_ABI double __floatdidf(di_int a);
COMPILER_RT_ABI float __floatdisf(di_int a);
COMPILER_RT_ABI double __floatundidf(du_int a);
COMPILER_RT_ABI float __floatundisf(du_int a);
COMPILER_RT_ABI di_int __dtoi64(double a) { return __fixdfdi(a); }
COMPILER_RT_ABI di_int __stoi64(float a) { return __fixsfdi(a); }
COMPILER_RT_ABI du_int __dtou64(double a) { return __fixunsdfdi(a); }
COMPILER_RT_ABI du_int __stou64(float a) { return __fixunssfdi(a); }
COMPILER_RT_ABI double __i64tod(di_int a) { return __floatdidf(a); }
COMPILER_RT_ABI float __i64tos(di_int a) { return __floatdisf(a); }
COMPILER_RT_ABI double __u64tod(du_int a) { return __floatundidf(a); }
COMPILER_RT_ABI float __u64tos(du_int a) { return __floatundisf(a); }

View File

@ -30,6 +30,8 @@ typedef ElfW(Ehdr) Elf_Ehdr;
#include "ubsan/ubsan_handlers.h"
#endif
using namespace __sanitizer;
namespace __cfi {
#define kCfiShadowLimitsStorageSize 4096 // 1 page

View File

@ -114,6 +114,26 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;
// | reserved by kernel |
// +--------------------+ 0x0000000000
// On Linux/AArch64 (48-bit VMA), memory is laid out as follow:
//
// +--------------------+ 0x1000000000000 (top of memory)
// | application memory |
// +--------------------+ 0xffff00008000 (kAppAddr)
// | unused |
// +--------------------+ 0xaaaab0000000 (top of PIE address)
// | application PIE |
// +--------------------+ 0xaaaaa0000000 (top of PIE address)
// | |
// | unused |
// | |
// +--------------------+ 0x1200000000 (kUnusedAddr)
// | union table |
// +--------------------+ 0x8000000000 (kUnionTableAddr)
// | shadow memory |
// +--------------------+ 0x0000010000 (kShadowAddr)
// | reserved by kernel |
// +--------------------+ 0x0000000000
typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
#ifdef DFSAN_RUNTIME_VMA
@ -372,11 +392,12 @@ static void InitializePlatformEarly() {
#ifdef DFSAN_RUNTIME_VMA
__dfsan::vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42) {
if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 ||
__dfsan::vmaSize == 48) {
__dfsan_shadow_ptr_mask = ShadowMask();
} else {
Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %d - Supported 39 and 42\n", __dfsan::vmaSize);
Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize);
Die();
}
#endif

View File

@ -18,6 +18,9 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "dfsan_platform.h"
using __sanitizer::uptr;
using __sanitizer::u16;
// Copy declarations from public sanitizer/dfsan_interface.h header here.
typedef u16 dfsan_label;

View File

@ -16,6 +16,8 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
using namespace __sanitizer;
INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF_T offset) {
void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);

View File

@ -46,6 +46,13 @@ struct Mapping42 {
static const uptr kShadowMask = ~0x3c000000000;
};
struct Mapping48 {
static const uptr kShadowAddr = 0x10000;
static const uptr kUnionTableAddr = 0x8000000000;
static const uptr kAppAddr = 0xffff00008000;
static const uptr kShadowMask = ~0xfffff0000000;
};
extern int vmaSize;
# define DFSAN_RUNTIME_VMA 1
#else
@ -72,11 +79,13 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
if (vmaSize == 39)
return MappingImpl<Mapping39, Type>();
else
return MappingImpl<Mapping42, Type>();
switch (vmaSize) {
case 39: return MappingImpl<Mapping39, Type>();
case 42: return MappingImpl<Mapping42, Type>();
case 48: return MappingImpl<Mapping48, Type>();
}
DCHECK(0);
return 0;
#else
return MappingImpl<Mapping, Type>();
#endif

View File

@ -266,6 +266,14 @@ fun:reflect.makeFuncStub=discard
# Replaces __sanitizer_cov_trace_cmp with __dfsw___sanitizer_cov_trace_cmp
fun:__sanitizer_cov_trace_cmp=custom
fun:__sanitizer_cov_trace_cmp=uninstrumented
fun:__sanitizer_cov_trace_cmp1=custom
fun:__sanitizer_cov_trace_cmp1=uninstrumented
fun:__sanitizer_cov_trace_cmp2=custom
fun:__sanitizer_cov_trace_cmp2=uninstrumented
fun:__sanitizer_cov_trace_cmp4=custom
fun:__sanitizer_cov_trace_cmp4=uninstrumented
fun:__sanitizer_cov_trace_cmp8=custom
fun:__sanitizer_cov_trace_cmp8=uninstrumented
# Similar for __sanitizer_cov_trace_switch
fun:__sanitizer_cov_trace_switch=custom
fun:__sanitizer_cov_trace_switch=uninstrumented

View File

@ -94,8 +94,8 @@ static void reportStructCounter(StructHashMap::Handle &Handle) {
type = "struct";
start = &Struct->StructName[7];
}
// Remove the suffixes with '#' during print.
end = strchr(start, '#');
// Remove the suffixes with '$' during print.
end = strchr(start, '$');
CHECK(end != nullptr);
Report(" %s %.*s\n", type, end - start, start);
Report(" size = %u, count = %llu, ratio = %llu, array access = %llu\n",

View File

@ -141,9 +141,17 @@ static bool verifyShadowScheme() {
}
#endif
uptr VmaSize;
static void initializeShadow() {
verifyAddressSpace();
// This is based on the assumption that the intial stack is always allocated
// in the topmost segment of the user address space and the assumption
// holds true on all the platforms currently supported.
VmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
DCHECK(verifyShadowScheme());
Mapping.initialize(ShadowScale[__esan_which_tool]);

View File

@ -34,6 +34,7 @@ namespace __esan {
extern bool EsanIsInitialized;
extern bool EsanDuringInit;
extern uptr VmaSize;
void initializeLibrary(ToolType Tool);
int finalizeLibrary();

View File

@ -17,6 +17,8 @@
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
using namespace __sanitizer;
namespace __esan {
static const char EsanOptsEnv[] = "ESAN_OPTIONS";

View File

@ -0,0 +1,381 @@
//===-- esan_hashtable.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of EfficiencySanitizer, a family of performance tuners.
//
// Generic resizing hashtable.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include <stddef.h>
namespace __esan {
//===----------------------------------------------------------------------===//
// Default hash and comparison functions
//===----------------------------------------------------------------------===//
template <typename T> struct DefaultHash {
size_t operator()(const T &Key) const {
return (size_t)Key;
}
};
template <typename T> struct DefaultEqual {
bool operator()(const T &Key1, const T &Key2) const {
return Key1 == Key2;
}
};
//===----------------------------------------------------------------------===//
// HashTable declaration
//===----------------------------------------------------------------------===//
// A simple resizing and mutex-locked hashtable.
//
// If the default hash functor is used, KeyTy must have an operator size_t().
// If the default comparison functor is used, KeyTy must have an operator ==.
//
// By default all operations are internally-synchronized with a mutex, with no
// synchronization for payloads once hashtable functions return. If
// ExternalLock is set to true, the caller should call the lock() and unlock()
// routines around all hashtable operations and subsequent manipulation of
// payloads.
template <typename KeyTy, typename DataTy, bool ExternalLock = false,
typename HashFuncTy = DefaultHash<KeyTy>,
typename EqualFuncTy = DefaultEqual<KeyTy> >
class HashTable {
public:
// InitialCapacity must be a power of 2.
// ResizeFactor must be between 1 and 99 and indicates the
// maximum percentage full that the table should ever be.
HashTable(u32 InitialCapacity = 2048, u32 ResizeFactor = 70);
~HashTable();
bool lookup(const KeyTy &Key, DataTy &Payload); // Const except for Mutex.
bool add(const KeyTy &Key, const DataTy &Payload);
bool remove(const KeyTy &Key);
u32 size(); // Const except for Mutex.
// If the table is internally-synchronized, this lock must not be held
// while a hashtable function is called as it will deadlock: the lock
// is not recursive. This is meant for use with externally-synchronized
// tables or with an iterator.
void lock();
void unlock();
private:
struct HashEntry {
KeyTy Key;
DataTy Payload;
HashEntry *Next;
};
public:
struct HashPair {
HashPair(KeyTy Key, DataTy Data) : Key(Key), Data(Data) {}
KeyTy Key;
DataTy Data;
};
// This iterator does not perform any synchronization.
// It expects the caller to lock the table across the whole iteration.
// Calling HashTable functions while using the iterator is not supported.
// The iterator returns copies of the keys and data.
class iterator {
public:
iterator(
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table);
iterator(const iterator &Src) = default;
iterator &operator=(const iterator &Src) = default;
HashPair operator*();
iterator &operator++();
iterator &operator++(int);
bool operator==(const iterator &Cmp) const;
bool operator!=(const iterator &Cmp) const;
private:
iterator(
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
int Idx);
friend HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>;
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table;
int Idx;
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashEntry
*Entry;
};
// No erase or insert iterator supported
iterator begin();
iterator end();
private:
void resize();
HashEntry **Table;
u32 Capacity;
u32 Entries;
const u32 ResizeFactor;
BlockingMutex Mutex;
const HashFuncTy HashFunc;
const EqualFuncTy EqualFunc;
};
//===----------------------------------------------------------------------===//
// Hashtable implementation
//===----------------------------------------------------------------------===//
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashTable(
u32 InitialCapacity, u32 ResizeFactor)
: Capacity(InitialCapacity), Entries(0), ResizeFactor(ResizeFactor),
HashFunc(HashFuncTy()), EqualFunc(EqualFuncTy()) {
CHECK(IsPowerOfTwo(Capacity));
CHECK(ResizeFactor >= 1 && ResizeFactor <= 99);
Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::~HashTable() {
for (u32 i = 0; i < Capacity; ++i) {
HashEntry *Entry = Table[i];
while (Entry != nullptr) {
HashEntry *Next = Entry->Next;
Entry->Payload.~DataTy();
InternalFree(Entry);
Entry = Next;
}
}
InternalFree(Table);
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
u32 HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::size() {
u32 Res;
if (!ExternalLock)
Mutex.Lock();
Res = Entries;
if (!ExternalLock)
Mutex.Unlock();
return Res;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lookup(
const KeyTy &Key, DataTy &Payload) {
if (!ExternalLock)
Mutex.Lock();
bool Found = false;
size_t Hash = HashFunc(Key) % Capacity;
HashEntry *Entry = Table[Hash];
for (; Entry != nullptr; Entry = Entry->Next) {
if (EqualFunc(Entry->Key, Key)) {
Payload = Entry->Payload;
Found = true;
break;
}
}
if (!ExternalLock)
Mutex.Unlock();
return Found;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::resize() {
if (!ExternalLock)
Mutex.CheckLocked();
size_t OldCapacity = Capacity;
HashEntry **OldTable = Table;
Capacity *= 2;
Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
// Re-hash
for (u32 i = 0; i < OldCapacity; ++i) {
HashEntry *OldEntry = OldTable[i];
while (OldEntry != nullptr) {
HashEntry *Next = OldEntry->Next;
size_t Hash = HashFunc(OldEntry->Key) % Capacity;
OldEntry->Next = Table[Hash];
Table[Hash] = OldEntry;
OldEntry = Next;
}
}
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::add(
const KeyTy &Key, const DataTy &Payload) {
if (!ExternalLock)
Mutex.Lock();
bool Exists = false;
size_t Hash = HashFunc(Key) % Capacity;
HashEntry *Entry = Table[Hash];
for (; Entry != nullptr; Entry = Entry->Next) {
if (EqualFunc(Entry->Key, Key)) {
Exists = true;
break;
}
}
if (!Exists) {
Entries++;
if (Entries * 100 >= Capacity * ResizeFactor) {
resize();
Hash = HashFunc(Key) % Capacity;
}
HashEntry *Add = (HashEntry *)InternalAlloc(sizeof(*Add));
Add->Key = Key;
Add->Payload = Payload;
Add->Next = Table[Hash];
Table[Hash] = Add;
}
if (!ExternalLock)
Mutex.Unlock();
return !Exists;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::remove(
const KeyTy &Key) {
if (!ExternalLock)
Mutex.Lock();
bool Found = false;
size_t Hash = HashFunc(Key) % Capacity;
HashEntry *Entry = Table[Hash];
HashEntry *Prev = nullptr;
for (; Entry != nullptr; Prev = Entry, Entry = Entry->Next) {
if (EqualFunc(Entry->Key, Key)) {
Found = true;
Entries--;
if (Prev == nullptr)
Table[Hash] = Entry->Next;
else
Prev->Next = Entry->Next;
Entry->Payload.~DataTy();
InternalFree(Entry);
break;
}
}
if (!ExternalLock)
Mutex.Unlock();
return Found;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lock() {
Mutex.Lock();
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::unlock() {
Mutex.Unlock();
}
//===----------------------------------------------------------------------===//
// Iterator implementation
//===----------------------------------------------------------------------===//
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
iterator(
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table)
: Table(Table), Idx(-1), Entry(nullptr) {
operator++();
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
iterator(
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
int Idx)
: Table(Table), Idx(Idx), Entry(nullptr) {
CHECK(Idx >= (int)Table->Capacity); // Only used to create end().
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
EqualFuncTy>::HashPair
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
operator*() {
CHECK(Idx >= 0 && Idx < (int)Table->Capacity);
CHECK(Entry != nullptr);
return HashPair(Entry->Key, Entry->Payload);
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
EqualFuncTy>::iterator &
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
operator++() {
if (Entry != nullptr)
Entry = Entry->Next;
while (Entry == nullptr) {
++Idx;
if (Idx >= (int)Table->Capacity)
break; // At end().
Entry = Table->Table[Idx];
}
return *this;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
EqualFuncTy>::iterator &
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
operator++(int) {
iterator Temp(*this);
operator++();
return Temp;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
operator==(const iterator &Cmp) const {
return Cmp.Table == Table && Cmp.Idx == Idx && Cmp.Entry == Entry;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
operator!=(const iterator &Cmp) const {
return Cmp.Table != Table || Cmp.Idx != Idx || Cmp.Entry != Entry;
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
EqualFuncTy>::iterator
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::begin() {
return iterator(this);
}
template <typename KeyTy, typename DataTy, bool ExternalLock,
typename HashFuncTy, typename EqualFuncTy>
typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
EqualFuncTy>::iterator
HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::end() {
return iterator(this, Capacity);
}
} // namespace __esan

View File

@ -461,28 +461,35 @@ INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
// Malloc interceptors
//===----------------------------------------------------------------------===//
static char early_alloc_buf[128];
static bool used_early_alloc_buf;
static const uptr early_alloc_buf_size = 4096;
static uptr allocated_bytes;
static char early_alloc_buf[early_alloc_buf_size];
static bool isInEarlyAllocBuf(const void *ptr) {
return ((uptr)ptr >= (uptr)early_alloc_buf &&
((uptr)ptr - (uptr)early_alloc_buf) < sizeof(early_alloc_buf));
}
static void *handleEarlyAlloc(uptr size) {
// If esan is initialized during an interceptor (which happens with some
// tcmalloc implementations that call pthread_mutex_lock), the call from
// dlsym to calloc will deadlock. There is only one such calloc (dlsym
// allocates a single pthread key), so we work around it by using a
// static buffer for the calloc request. The loader currently needs
// 32 bytes but we size at 128 to allow for future changes.
// dlsym to calloc will deadlock.
// dlsym may also call malloc before REAL(malloc) is retrieved from dlsym.
// We work around it by using a static buffer for the early malloc/calloc
// requests.
// This solution will also allow us to deliberately intercept malloc & family
// in the future (to perform tool actions on each allocation, without
// replacing the allocator), as it also solves the problem of intercepting
// calloc when it will itself be called before its REAL pointer is
// initialized.
CHECK(!used_early_alloc_buf && size < sizeof(early_alloc_buf));
// We do not handle multiple threads here. This only happens at process init
// time, and while it's possible for a shared library to create early threads
// that race here, we consider that to be a corner case extreme enough that
// it's not worth the effort to handle.
used_early_alloc_buf = true;
return (void *)early_alloc_buf;
void *mem = (void *)&early_alloc_buf[allocated_bytes];
allocated_bytes += size;
CHECK_LT(allocated_bytes, early_alloc_buf_size);
return mem;
}
INTERCEPTOR(void*, calloc, uptr size, uptr n) {
@ -496,14 +503,20 @@ INTERCEPTOR(void*, calloc, uptr size, uptr n) {
return res;
}
INTERCEPTOR(void*, malloc, uptr size) {
if (EsanDuringInit && REAL(malloc) == nullptr)
return handleEarlyAlloc(size);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, malloc, size);
return REAL(malloc)(size);
}
INTERCEPTOR(void, free, void *p) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, free, p);
if (p == (void *)early_alloc_buf) {
// We expect just a singleton use but we clear this for cleanliness.
used_early_alloc_buf = false;
// There are only a few early allocation requests, so we simply skip the free.
if (isInEarlyAllocBuf(p))
return;
}
COMMON_INTERCEPTOR_ENTER(ctx, free, p);
REAL(free)(p);
}
@ -534,6 +547,7 @@ void initializeInterceptors() {
ESAN_MAYBE_INTERCEPT_PTHREAD_SIGMASK;
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
// TODO(bruening): intercept routines that other sanitizers intercept that

View File

@ -21,6 +21,9 @@
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __esan_.
using __sanitizer::uptr;
using __sanitizer::u32;
extern "C" {
// This should be kept consistent with LLVM's EfficiencySanitizerOptions.

View File

@ -25,7 +25,7 @@
namespace __esan {
void verifyAddressSpace() {
#if SANITIZER_LINUX && defined(__x86_64__)
#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
// The kernel determines its mmap base from the stack size limit.
// Our Linux 64-bit shadow mapping assumes the stack limit is less than a
// terabyte, which keeps the mmap region above 0x7e00'.

View File

@ -15,6 +15,7 @@
#ifndef ESAN_SHADOW_H
#define ESAN_SHADOW_H
#include "esan.h"
#include <sanitizer_common/sanitizer_platform.h>
#if SANITIZER_WORDSIZE != 64
@ -23,6 +24,12 @@
namespace __esan {
struct ApplicationRegion {
uptr Start;
uptr End;
bool ShadowMergedWithPrev;
};
#if SANITIZER_LINUX && defined(__x86_64__)
// Linux x86_64
//
@ -89,12 +96,6 @@ namespace __esan {
// [0x000015ff'ff601000, 0x00001600'00000000]
// [0x000015ff'ff600000, 0x000015ff'ff601000]
struct ApplicationRegion {
uptr Start;
uptr End;
bool ShadowMergedWithPrev;
};
static const struct ApplicationRegion AppRegions[] = {
{0x0000000000000000ull, 0x0000010000000000u, false},
{0x0000550000000000u, 0x0000570000000000u, false},
@ -105,6 +106,52 @@ static const struct ApplicationRegion AppRegions[] = {
{0x00007fffff601000u, 0x0000800000000000u, true},
{0xffffffffff600000u, 0xffffffffff601000u, true},
};
#elif SANITIZER_LINUX && SANITIZER_MIPS64
// Application memory falls into these 3 regions
//
// [0x00000001'00000000, 0x00000002'00000000) non-PIE + heap
// [0x000000aa'00000000, 0x000000ab'00000000) PIE
// [0x000000ff'00000000, 0x000000ff'ffffffff) libraries + stack
//
// This formula translates from application memory to shadow memory:
//
// shadow(app) = ((app & 0x00000f'ffffffff) + offset) >> scale
//
// Where the offset for 1:1 is 0x000013'00000000. For other scales, the
// offset is shifted left by the scale, except for scales of 1 and 2 where
// it must be tweaked in order to pass the double-shadow test
// (see the "shadow(shadow)" comments below):
// scale == 0: 0x000013'00000000
// scale == 1: 0x000022'00000000
// scale == 2: 0x000044'00000000
// scale >= 3: (0x000013'00000000 << scale)
//
// The resulting shadow memory regions for a 0 scaling are:
//
// [0x00000014'00000000, 0x00000015'00000000)
// [0x0000001d'00000000, 0x0000001e'00000000)
// [0x00000022'00000000, 0x00000022'ffffffff)
//
// We also want to ensure that a wild access by the application into the shadow
// regions will not corrupt our own shadow memory. shadow(shadow) ends up
// disjoint from shadow(app):
//
// [0x00000017'00000000, 0x00000018'00000000)
// [0x00000020'00000000, 0x00000021'00000000)
// [0x00000015'00000000, 0x00000015'ffffffff]
static const struct ApplicationRegion AppRegions[] = {
{0x0100000000u, 0x0200000000u, false},
{0xaa00000000u, 0xab00000000u, false},
{0xff00000000u, 0xffffffffffu, false},
};
#else
#error Platform not supported
#endif
static const u32 NumAppRegions = sizeof(AppRegions)/sizeof(AppRegions[0]);
// See the comment above: we do not currently support a stack size rlimit
@ -113,29 +160,59 @@ static const uptr MaxStackSize = (1ULL << 40) - 4096;
class ShadowMapping {
public:
static const uptr Mask = 0x00000fffffffffffu;
// The scale and offset vary by tool.
uptr Scale;
uptr Offset;
// TODO(sagar.thakur): Try to hardcode the mask as done in the compiler
// instrumentation to reduce the runtime cost of appToShadow.
struct ShadowMemoryMask40 {
static const uptr Mask = 0x0000000fffffffffu;
};
struct ShadowMemoryMask47 {
static const uptr Mask = 0x00000fffffffffffu;
};
void initialize(uptr ShadowScale) {
static const uptr OffsetArray[3] = {
0x0000130000000000u,
0x0000220000000000u,
0x0000440000000000u,
const uptr OffsetArray40[3] = {
0x0000001300000000u,
0x0000002200000000u,
0x0000004400000000u,
};
const uptr OffsetArray47[3] = {
0x0000130000000000u,
0x0000220000000000u,
0x0000440000000000u,
};
Scale = ShadowScale;
if (Scale <= 2)
Offset = OffsetArray[Scale];
else
Offset = OffsetArray[0] << Scale;
switch (VmaSize) {
case 40: {
if (Scale <= 2)
Offset = OffsetArray40[Scale];
else
Offset = OffsetArray40[0] << Scale;
}
break;
case 47: {
if (Scale <= 2)
Offset = OffsetArray47[Scale];
else
Offset = OffsetArray47[0] << Scale;
}
break;
default: {
Printf("ERROR: %d-bit virtual memory address size not supported\n", VmaSize);
Die();
}
}
}
};
extern ShadowMapping Mapping;
#else
// We'll want to use templatized functions over the ShadowMapping once
// we support more platforms.
#error Platform not supported
#endif
static inline bool getAppRegion(u32 i, uptr *Start, uptr *End) {
if (i >= NumAppRegions)
@ -154,9 +231,21 @@ bool isAppMem(uptr Mem) {
return false;
}
template<typename Params>
uptr appToShadowImpl(uptr App) {
return (((App & Params::Mask) + Mapping.Offset) >> Mapping.Scale);
}
ALWAYS_INLINE
uptr appToShadow(uptr App) {
return (((App & ShadowMapping::Mask) + Mapping.Offset) >> Mapping.Scale);
switch (VmaSize) {
case 40: return appToShadowImpl<ShadowMapping::ShadowMemoryMask40>(App);
case 47: return appToShadowImpl<ShadowMapping::ShadowMemoryMask47>(App);
default: {
Printf("ERROR: %d-bit virtual memory address size not supported\n", VmaSize);
Die();
}
}
}
static inline bool getShadowRegion(u32 i, uptr *Start, uptr *End) {

View File

@ -92,8 +92,8 @@ typedef __sanitizer::OFF64_T OFF64_T;
// Just a pair of pointers.
struct interpose_substitution {
const uptr replacement;
const uptr original;
const __sanitizer::uptr replacement;
const __sanitizer::uptr original;
};
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
@ -158,10 +158,12 @@ const interpose_substitution substitution_##func_name[] \
namespace __interception { \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // __APPLE__
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define ASSIGN_REAL(x, y)
#endif // __APPLE__
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \

View File

@ -148,10 +148,16 @@ static void InterceptionFailed() {
}
static bool DistanceIsWithin2Gig(uptr from, uptr target) {
#if SANITIZER_WINDOWS64
if (from < target)
return target - from <= (uptr)0x7FFFFFFFU;
else
return from - target <= (uptr)0x80000000U;
#else
// In a 32-bit address space, the address calculation will wrap, so this check
// is unnecessary.
return true;
#endif
}
static uptr GetMmapGranularity() {
@ -167,6 +173,21 @@ static uptr RoundUpTo(uptr size, uptr boundary) {
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
static size_t _strlen(const char *str) {
const char* p = str;
while (*p != '\0') ++p;
return p - str;
}
static char* _strchr(char* str, char c) {
while (*str) {
if (*str == c)
return str;
++str;
}
return nullptr;
}
static void _memset(void *p, int value, size_t sz) {
for (size_t i = 0; i < sz; ++i)
((char*)p)[i] = (char)value;
@ -229,10 +250,6 @@ static void WritePadding(uptr from, uptr size) {
_memset((void*)from, 0xCC, (size_t)size);
}
static void CopyInstructions(uptr from, uptr to, uptr size) {
_memcpy((void*)from, (void*)to, (size_t)size);
}
static void WriteJumpInstruction(uptr from, uptr target) {
if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
InterceptionFailed();
@ -294,7 +311,7 @@ struct TrampolineMemoryRegion {
uptr max_size;
};
static const uptr kTrampolineScanLimitRange = 1 << 30; // 1 gig
static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
@ -384,7 +401,7 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
}
// Returns 0 on error.
static size_t GetInstructionSize(uptr address) {
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
switch (*(u64*)address) {
case 0x90909090909006EB: // stub: jmp over 6 x nop.
return 8;
@ -410,7 +427,6 @@ static size_t GetInstructionSize(uptr address) {
case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
return 5;
// Cannot overwrite control-instruction. Return 0 to indicate failure.
@ -452,7 +468,18 @@ static size_t GetInstructionSize(uptr address) {
return 0;
}
switch (0x00FFFFFF & *(u32*)address) {
case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
return 7;
}
#if SANITIZER_WINDOWS64
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
return 8;
}
switch (*(u16*)address) {
case 0x5040: // push rax
case 0x5140: // push rcx
@ -477,17 +504,20 @@ static size_t GetInstructionSize(uptr address) {
case 0xd9f748: // 48 f7 d9 : neg rcx
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0x07c1f6: // f6 c1 07 : test cl, 0x7
case 0xc98548: // 48 85 C9 : test rcx, rcx
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc03345: // 45 33 c0 : xor r8d, r8d
case 0xdb3345: // 45 33 DB : xor r11d, r11d
case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
case 0xc98b4c: // 4C 8B C9 : mov r9, rcx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
case 0xd18b48: // 48 8b d1 : mov rdx, rcx
case 0xdc8b4c: // 4c 8b dc : mov r11,rsp
case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
return 3;
@ -496,11 +526,22 @@ static size_t GetInstructionSize(uptr address) {
case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
return 4;
case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
return 7;
case 0x058b48: // 48 8b 05 XX XX XX XX :
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
// Instructions having offset relative to 'rip' need offset adjustment.
if (rel_offset)
*rel_offset = 3;
return 7;
case 0x2444c7: // C7 44 24 XX YY YY YY YY
// mov dword ptr [rsp + XX], YYYYYYYY
return 8;
}
switch (*(u32*)(address)) {
@ -513,6 +554,10 @@ static size_t GetInstructionSize(uptr address) {
#else
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
return 5;
}
switch (*(u16*)address) {
case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
@ -566,6 +611,28 @@ static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
return cursor;
}
static bool CopyInstructions(uptr to, uptr from, size_t size) {
size_t cursor = 0;
while (cursor != size) {
size_t rel_offset = 0;
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
_memcpy((void*)(to + cursor), (void*)(from + cursor),
(size_t)instruction_size);
if (rel_offset) {
uptr delta = to - from;
uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
#if SANITIZER_WINDOWS64
if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
return false;
#endif
*(u32*)(to + cursor + rel_offset) = relocated_offset;
}
cursor += instruction_size;
}
return true;
}
#if !SANITIZER_WINDOWS64
bool OverrideFunctionWithDetour(
uptr old_func, uptr new_func, uptr *orig_old_func) {
@ -656,7 +723,8 @@ bool OverrideFunctionWithHotPatch(
uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
CopyInstructions(trampoline, old_func, instruction_size);
if (!CopyInstructions(trampoline, old_func, instruction_size))
return false;
WriteDirectBranch(trampoline + instruction_size,
old_func + instruction_size);
*orig_old_func = trampoline;
@ -705,7 +773,8 @@ bool OverrideFunctionWithTrampoline(
uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
CopyInstructions(trampoline, old_func, instructions_length);
if (!CopyInstructions(trampoline, old_func, instructions_length))
return false;
WriteDirectBranch(trampoline + instructions_length,
old_func + instructions_length);
*orig_old_func = trampoline;
@ -820,6 +889,32 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
if (!strcmp(func_name, name)) {
DWORD index = ordinals[i];
RVAPtr<char> func(module, functions[index]);
// Handle forwarded functions.
DWORD offset = functions[index];
if (offset >= export_directory->VirtualAddress &&
offset < export_directory->VirtualAddress + export_directory->Size) {
// An entry for a forwarded function is a string with the following
// format: "<module> . <function_name>" that is stored into the
// exported directory.
char function_name[256];
size_t funtion_name_length = _strlen(func);
if (funtion_name_length >= sizeof(function_name) - 1)
InterceptionFailed();
_memcpy(function_name, func, funtion_name_length);
function_name[funtion_name_length] = '\0';
char* separator = _strchr(function_name, '.');
if (!separator)
InterceptionFailed();
*separator = '\0';
void* redirected_module = GetModuleHandleA(function_name);
if (!redirected_module)
InterceptionFailed();
return InternalGetProcAddress(redirected_module, separator + 1);
}
return (uptr)(char *)func;
}
}
@ -827,19 +922,18 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
return 0;
}
static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
*func_addr = 0;
bool OverrideFunction(
const char *func_name, uptr new_func, uptr *orig_old_func) {
bool hooked = false;
void **DLLs = InterestingDLLsAvailable();
for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
*func_addr = InternalGetProcAddress(DLLs[i], func_name);
return (*func_addr != 0);
}
bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
uptr orig_func;
if (!GetFunctionAddressInDLLs(name, &orig_func))
return false;
return OverrideFunction(orig_func, new_func, orig_old_func);
for (size_t i = 0; DLLs[i]; ++i) {
uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
if (func_addr &&
OverrideFunction(func_addr, new_func, orig_old_func)) {
hooked = true;
}
}
return hooked;
}
bool OverrideImportedFunction(const char *module_to_patch,

View File

@ -43,10 +43,17 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = 0x600000000000ULL;
static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
static const uptr kMetadataSize = sizeof(ChunkMetadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
@ -57,7 +64,9 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
allocator.InitLinkerInitialized(
common_flags()->allocator_may_return_null,
common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {

View File

@ -32,6 +32,7 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
__attribute__((tls_model("initial-exec")))
THREADLOCAL int disable_counter;
bool DisabledInThisThread() { return disable_counter > 0; }
void DisableInThisThread() { disable_counter++; }
@ -449,6 +450,8 @@ static bool CheckForLeaks() {
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
Report(
"HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
Die();
}
param.leak_report.ApplySuppressions();

View File

@ -71,7 +71,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
if (begin <= allocator_begin && allocator_begin < end) {
CHECK_LE(allocator_begin, allocator_end);
CHECK_LT(allocator_end, end);
CHECK_LE(allocator_end, end);
if (begin < allocator_begin)
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
kReachable);

View File

@ -36,7 +36,7 @@ static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}

View File

@ -42,27 +42,43 @@ struct MappingDesc {
#if SANITIZER_LINUX && defined(__mips64)
// Everything is above 0x00e000000000.
// MIPS64 maps:
// - 0x0000000000-0x0200000000: Program own segments
// - 0xa200000000-0xc000000000: PIE program segments
// - 0xe200000000-0xffffffffff: libraries segments.
const MappingDesc kMemoryLayout[] = {
{0x000000000000ULL, 0x00a000000000ULL, MappingDesc::INVALID, "invalid"},
{0x00a000000000ULL, 0x00c000000000ULL, MappingDesc::SHADOW, "shadow"},
{0x00c000000000ULL, 0x00e000000000ULL, MappingDesc::ORIGIN, "origin"},
{0x00e000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app"}};
{0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"},
{0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"},
{0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"},
{0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"},
{0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"},
{0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"},
{0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"},
{0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"},
{0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"},
{0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"},
{0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"},
{0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"},
{0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}};
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x002000000000)
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL)
#elif SANITIZER_LINUX && defined(__aarch64__)
// The mapping describes both 39-bits and 42-bits. AArch64 maps:
// - 0x00000000000-0x00010000000: 39/42-bits program own segments
// - 0x05500000000-0x05600000000: 39-bits PIE program segments
// - 0x07f80000000-0x07fffffffff: 39-bits libraries segments
// - 0x2aa00000000-0x2ab00000000: 42-bits PIE program segments
// - 0x3ff00000000-0x3ffffffffff: 42-bits libraries segments
// The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64
// maps:
// - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments
// - 0x0005500000000-0x0005600000000: 39-bits PIE program segments
// - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments
// - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments
// - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments
// - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments
// - 0xffff000000000-0x1000000000000: 48-bits libraries segments
// It is fragmented in multiples segments to increase the memory available
// on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for
// 39 bits).
// 39 bits). The 48-bits segments only cover the usual PIE/default segments
// plus some more segments (262144GB total, 0.39% total VMA).
const MappingDesc kMemoryLayout[] = {
{0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"},
{0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"},
@ -103,6 +119,42 @@ const MappingDesc kMemoryLayout[] = {
{0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"},
{0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"},
{0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"},
// The mappings below are used only for 48-bits VMA.
// TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE
// segments and some more segments totalizing 262144GB of VMA (which cover
// only 0.32% of all 48-bit VMA). Memory avaliability can be increase by
// adding multiple application segments like 39 and 42 mapping.
{0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"},
{0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"},
{0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"},
{0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"},
{0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"},
{0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"},
{0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"},
{0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"},
{0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"},
{0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"},
{0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"},
{0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"},
{0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"},
{0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"},
{0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"},
{0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"},
{0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"},
{0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"},
{0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"},
{0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"},
{0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"},
{0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"},
{0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"},
{0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"},
{0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"},
{0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"},
};
# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL)
# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL)
@ -277,11 +329,20 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
common_flags()->fast_unwind_on_malloc)
// For platforms which support slow unwinder only, we restrict the store context
// size to 1, basically only storing the current pc. We do this because the slow
// unwinder which is based on libunwind is not async signal safe and causes
// random freezes in forking applications as well as in signal handlers.
#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
if (__msan_get_track_origins() > 1 && msan_inited) \
GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
common_flags()->fast_unwind_on_malloc)
if (__msan_get_track_origins() > 1 && msan_inited) { \
if (!SANITIZER_CAN_FAST_UNWIND) \
GetStackTrace(&stack, Min(1, flags()->store_context_size), pc, bp, \
false); \
else \
GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
common_flags()->fast_unwind_on_malloc); \
}
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \

View File

@ -33,9 +33,12 @@ struct MsanMapUnmapCallback {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size);
if (__msan_get_track_origins())
FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size);
uptr shadow_p = MEM_TO_SHADOW(p);
ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
if (__msan_get_track_origins()) {
uptr origin_p = MEM_TO_ORIGIN(p);
ReleaseMemoryPagesToOS(origin_p, origin_p + size);
}
}
};
@ -56,23 +59,32 @@ struct MsanMapUnmapCallback {
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
static const uptr kAllocatorSize = 0x80000000000; // 8T.
static const uptr kMetadataSize = sizeof(Metadata);
static const uptr kMaxAllowedMallocSize = 8UL << 30;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
DefaultSizeClassMap,
MsanMapUnmapCallback> PrimaryAllocator;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = 0x40000000000; // 4T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__powerpc64__)
static const uptr kAllocatorSpace = 0x300000000000;
static const uptr kAllocatorSize = 0x020000000000; // 2T
static const uptr kMetadataSize = sizeof(Metadata);
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
DefaultSizeClassMap,
MsanMapUnmapCallback> PrimaryAllocator;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = 0x300000000000;
static const uptr kSpaceSize = 0x020000000000; // 2T.
static const uptr kMetadataSize = sizeof(Metadata);
typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
static const uptr kRegionSizeLog = 20;
@ -94,7 +106,9 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
void MsanAllocatorInit() {
allocator.Init(common_flags()->allocator_may_return_null);
allocator.Init(
common_flags()->allocator_may_return_null,
common_flags()->allocator_release_to_os_interval_ms);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@ -112,7 +126,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@ -170,7 +184,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}

View File

@ -45,6 +45,8 @@ using __sanitizer::atomic_uintptr_t;
DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen)
DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
#if SANITIZER_FREEBSD
#define __errno_location __error
@ -64,6 +66,23 @@ bool IsInInterceptorScope() {
return in_interceptor_scope;
}
static uptr allocated_for_dlsym;
static const uptr kDlsymAllocPoolSize = 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
static bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < sizeof(alloc_memory_for_dlsym);
}
static void *AllocateFromLocalPool(uptr size_in_bytes) {
uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
allocated_for_dlsym += size_in_words;
CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
return mem;
}
#define ENSURE_MSAN_INITED() do { \
CHECK(!msan_init_is_running); \
if (!msan_inited) { \
@ -135,10 +154,6 @@ INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
return res;
}
INTERCEPTOR(void *, memcpy, void *dest, const void *src, SIZE_T n) {
return __msan_memcpy(dest, src, n);
}
INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) {
return (char *)__msan_memcpy(dest, src, n) + n;
}
@ -153,14 +168,6 @@ INTERCEPTOR(void *, memccpy, void *dest, const void *src, int c, SIZE_T n) {
return res;
}
INTERCEPTOR(void *, memmove, void *dest, const void *src, SIZE_T n) {
return __msan_memmove(dest, src, n);
}
INTERCEPTOR(void *, memset, void *s, int c, SIZE_T n) {
return __msan_memset(s, c, n);
}
INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) {
return __msan_memmove(dest, src, n);
}
@ -227,14 +234,14 @@ INTERCEPTOR(void *, pvalloc, SIZE_T size) {
INTERCEPTOR(void, free, void *ptr) {
GET_MALLOC_STACK_TRACE;
if (!ptr) return;
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(void, cfree, void *ptr) {
GET_MALLOC_STACK_TRACE;
if (!ptr) return;
if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
@ -907,27 +914,35 @@ INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents,
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!msan_inited)) {
if (UNLIKELY(!msan_inited))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const SIZE_T kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
static SIZE_T allocated;
SIZE_T size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
void *mem = (void*)&calloc_memory_for_dlsym[allocated];
allocated += size_in_words;
CHECK(allocated < kCallocPoolSize);
return mem;
}
return AllocateFromLocalPool(nmemb * size);
return MsanCalloc(&stack, nmemb, size);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr;
if (UNLIKELY(!msan_inited)) {
new_ptr = AllocateFromLocalPool(copy_size);
} else {
copy_size = size;
new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
return MsanReallocate(&stack, ptr, size, sizeof(u64), false);
}
INTERCEPTOR(void *, malloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!msan_inited))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);
return MsanReallocate(&stack, nullptr, size, sizeof(u64), false);
}
@ -1329,11 +1344,23 @@ int OnExit() {
*begin = *end = 0; \
}
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
{ \
(void)ctx; \
return __msan_memset(block, c, size); \
}
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
{ \
(void)ctx; \
return __msan_memmove(to, from, size); \
}
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
{ \
(void)ctx; \
return __msan_memcpy(to, from, size); \
}
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// Msan needs custom handling of these:
#undef SANITIZER_INTERCEPT_MEMSET
#undef SANITIZER_INTERCEPT_MEMMOVE
#undef SANITIZER_INTERCEPT_MEMCPY
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s)
@ -1489,11 +1516,8 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(fread);
MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED;
INTERCEPT_FUNCTION(readlink);
INTERCEPT_FUNCTION(memcpy);
INTERCEPT_FUNCTION(memccpy);
INTERCEPT_FUNCTION(mempcpy);
INTERCEPT_FUNCTION(memset);
INTERCEPT_FUNCTION(memmove);
INTERCEPT_FUNCTION(bcopy);
INTERCEPT_FUNCTION(wmemset);
INTERCEPT_FUNCTION(wmemcpy);

View File

@ -37,6 +37,16 @@ void __msan_warning();
SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn))
void __msan_warning_noreturn();
using __sanitizer::uptr;
using __sanitizer::sptr;
using __sanitizer::uu64;
using __sanitizer::uu32;
using __sanitizer::uu16;
using __sanitizer::u64;
using __sanitizer::u32;
using __sanitizer::u16;
using __sanitizer::u8;
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_warning_1(u8 s, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE

View File

@ -66,7 +66,8 @@ static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
}
if ((uptr)addr != beg) {
uptr end = beg + size - 1;
Printf("FATAL: Cannot protect memory range %p - %p.\n", beg, end);
Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
name);
return false;
}
}

View File

@ -20,7 +20,6 @@
|*
\*===----------------------------------------------------------------------===*/
#include "InstrProfilingInternal.h"
#include "InstrProfilingPort.h"
#include "InstrProfilingUtil.h"
@ -35,6 +34,9 @@
#else
#include <sys/mman.h>
#include <sys/file.h>
#ifndef MAP_FILE
#define MAP_FILE 0
#endif
#endif
#if defined(__FreeBSD__) && defined(__i386__)

View File

@ -72,7 +72,7 @@
#endif
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
Inc->getHash()->getZExtValue()))
@ -204,7 +204,7 @@ COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
#else
COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
llvm::IndexedInstrProf::ComputeHash(NameValue)))
llvm::IndexedInstrProf::ComputeHash(NameValue)))
#endif
COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
@ -603,7 +603,12 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
#define IR_LEVEL_PROF_VERSION_VAR __llvm_profile_raw_version
#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
/* The variable that holds the name of the profile data
* specified via command line. */
#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
/* Runtime section names and name strings. */
#define INSTR_PROF_DATA_SECT_NAME __llvm_prf_data

View File

@ -16,15 +16,26 @@
#define INSTR_PROF_VALUE_PROF_DATA
#include "InstrProfData.inc"
COMPILER_RT_VISIBILITY char *(*GetEnvHook)(const char *) = 0;
COMPILER_RT_WEAK uint64_t __llvm_profile_raw_version = INSTR_PROF_RAW_VERSION;
COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
COMPILER_RT_WEAK char INSTR_PROF_PROFILE_NAME_VAR[1] = {0};
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) {
return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64)
: (INSTR_PROF_RAW_MAGIC_32);
}
static unsigned ProfileDumped = 0;
COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() {
return ProfileDumped;
}
COMPILER_RT_VISIBILITY void lprofSetProfileDumped() {
ProfileDumped = 1;
}
/* Return the number of bytes needed to add to SizeInBytes to make it
* the result a multiple of 8.
*/
@ -66,4 +77,5 @@ COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
}
}
}
ProfileDumped = 0;
}

View File

@ -112,12 +112,33 @@ void INSTR_PROF_VALUE_PROF_FUNC(
* Writes to the file with the last name given to \a *
* __llvm_profile_set_filename(),
* or if it hasn't been called, the \c LLVM_PROFILE_FILE environment variable,
* or if that's not set, the last name given to
* \a __llvm_profile_override_default_filename(), or if that's not set,
* \c "default.profraw".
* or if that's not set, the last name set to INSTR_PROF_PROFILE_NAME_VAR,
* or if that's not set, \c "default.profraw".
*/
int __llvm_profile_write_file(void);
/*!
* \brief this is a wrapper interface to \c __llvm_profile_write_file.
* After this interface is invoked, a arleady dumped flag will be set
* so that profile won't be dumped again during program exit.
* Invocation of interface __llvm_profile_reset_counters will clear
* the flag. This interface is designed to be used to collect profile
* data from user selected hot regions. The use model is
* __llvm_profile_reset_counters();
* ... hot region 1
* __llvm_profile_dump();
* .. some other code
* __llvm_profile_reset_counters();
* ... hot region 2
* __llvm_profile_dump();
*
* It is expected that on-line profile merging is on with \c %m specifier
* used in profile filename . If merging is not turned on, user is expected
* to invoke __llvm_profile_set_filename to specify different profile names
* for different regions before dumping to avoid profile write clobbering.
*/
int __llvm_profile_dump(void);
/*!
* \brief Set the filename for writing instrumentation data.
*
@ -129,25 +150,22 @@ int __llvm_profile_write_file(void);
*/
void __llvm_profile_set_filename(const char *Name);
/*!
* \brief Set the filename for writing instrumentation data, unless the
* \c LLVM_PROFILE_FILE environment variable was set.
*
* Unless overridden, sets the filename to be used for subsequent calls to
* \a __llvm_profile_write_file().
*
* \c Name is not copied, so it must remain valid. Passing NULL resets the
* filename logic to the default behaviour (unless the \c LLVM_PROFILE_FILE
* was set in which case it has no effect).
*/
void __llvm_profile_override_default_filename(const char *Name);
/*! \brief Register to write instrumentation data to file at exit. */
int __llvm_profile_register_write_file_atexit(void);
/*! \brief Initialize file handling. */
void __llvm_profile_initialize_file(void);
/*!
* \brief Return path prefix (excluding the base filename) of the profile data.
* This is useful for users using \c -fprofile-generate=./path_prefix who do
* not care about the default raw profile name. It is also useful to collect
* more than more profile data files dumped in the same directory (Online
* merge mode is turned on for instrumented programs with shared libs).
* Side-effect: this API call will invoke malloc with dynamic memory allocation.
*/
const char *__llvm_profile_get_path_prefix();
/*! \brief Get the magic token for the file format. */
uint64_t __llvm_profile_get_magic(void);
@ -166,8 +184,8 @@ uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
* Note that this variable's visibility needs to be hidden so that the
* definition of this variable in an instrumented shared library won't
* affect runtime initialization decision of the main program.
*/
COMPILER_RT_VISIBILITY extern int __llvm_profile_runtime;
* __llvm_profile_profile_runtime. */
COMPILER_RT_VISIBILITY extern int INSTR_PROF_PROFILE_RUNTIME_VAR;
/*!
* This variable is defined in InstrProfiling.c. Its main purpose is to
@ -179,6 +197,13 @@ COMPILER_RT_VISIBILITY extern int __llvm_profile_runtime;
* main program are expected to be instrumented in the same way), there is
* no need for this variable to be hidden.
*/
extern uint64_t __llvm_profile_raw_version;
extern uint64_t INSTR_PROF_RAW_VERSION_VAR; /* __llvm_profile_raw_version */
/*!
* This variable is a weak symbol defined in InstrProfiling.c. It allows
* compiler instrumentation to provide overriding definition with value
* from compiler command line. This variable has default visibility.
*/
extern char INSTR_PROF_PROFILE_NAME_VAR[1]; /* __llvm_profile_filename. */
#endif /* PROFILE_INSTRPROFILING_H_ */

View File

@ -59,10 +59,14 @@ static const char *getPNSStr(ProfileNameSpecifier PNS) {
}
#define MAX_PID_SIZE 16
/* Data structure holding the result of parsed filename pattern. */
/* Data structure holding the result of parsed filename pattern. */
typedef struct lprofFilename {
/* File name string possibly with %p or %h specifiers. */
const char *FilenamePat;
/* A flag indicating if FilenamePat's memory is allocated
* by runtime. */
unsigned OwnsFilenamePat;
const char *ProfilePathPrefix;
char PidChars[MAX_PID_SIZE];
char Hostname[COMPILER_RT_MAX_HOSTLEN];
unsigned NumPids;
@ -78,7 +82,8 @@ typedef struct lprofFilename {
ProfileNameSpecifier PNS;
} lprofFilename;
lprofFilename lprofCurFilename = {0, {0}, {0}, 0, 0, 0, PNS_unknown};
COMPILER_RT_WEAK lprofFilename lprofCurFilename = {0, 0, 0, {0}, {0},
0, 0, 0, PNS_unknown};
int getpid(void);
static int getCurFilenameLength();
@ -229,16 +234,17 @@ static void truncateCurrentFile(void) {
return;
/* Create the directory holding the file, if needed. */
if (strchr(Filename, DIR_SEPARATOR)
#if defined(DIR_SEPARATOR_2)
|| strchr(Filename, DIR_SEPARATOR_2)
#endif
) {
if (lprofFindFirstDirSeparator(Filename)) {
char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
strncpy(Copy, Filename, Length + 1);
__llvm_profile_recursive_mkdir(Copy);
}
/* By pass file truncation to allow online raw profile
* merging. */
if (lprofCurFilename.MergePoolSize)
return;
/* Truncate the file. Later we'll reopen and append. */
File = fopen(Filename, "w");
if (!File)
@ -248,6 +254,9 @@ static void truncateCurrentFile(void) {
static const char *DefaultProfileName = "default.profraw";
static void resetFilenameToDefault(void) {
if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
free((void *)lprofCurFilename.FilenamePat);
}
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
lprofCurFilename.FilenamePat = DefaultProfileName;
lprofCurFilename.PNS = PNS_default;
@ -263,31 +272,46 @@ static int containsMergeSpecifier(const char *FilenamePat, int I) {
/* Parses the pattern string \p FilenamePat and stores the result to
* lprofcurFilename structure. */
static int parseFilenamePattern(const char *FilenamePat) {
static int parseFilenamePattern(const char *FilenamePat,
unsigned CopyFilenamePat) {
int NumPids = 0, NumHosts = 0, I;
char *PidChars = &lprofCurFilename.PidChars[0];
char *Hostname = &lprofCurFilename.Hostname[0];
int MergingEnabled = 0;
lprofCurFilename.FilenamePat = FilenamePat;
/* Clean up cached prefix. */
if (lprofCurFilename.ProfilePathPrefix)
free((void *)lprofCurFilename.ProfilePathPrefix);
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
free((void *)lprofCurFilename.FilenamePat);
}
if (!CopyFilenamePat)
lprofCurFilename.FilenamePat = FilenamePat;
else {
lprofCurFilename.FilenamePat = strdup(FilenamePat);
lprofCurFilename.OwnsFilenamePat = 1;
}
/* Check the filename for "%p", which indicates a pid-substitution. */
for (I = 0; FilenamePat[I]; ++I)
if (FilenamePat[I] == '%') {
if (FilenamePat[++I] == 'p') {
if (!NumPids++) {
if (snprintf(PidChars, MAX_PID_SIZE, "%d", getpid()) <= 0) {
PROF_WARN(
"Unable to parse filename pattern %s. Using the default name.",
FilenamePat);
PROF_WARN("Unable to get pid for filename pattern %s. Using the "
"default name.",
FilenamePat);
return -1;
}
}
} else if (FilenamePat[I] == 'h') {
if (!NumHosts++)
if (COMPILER_RT_GETHOSTNAME(Hostname, COMPILER_RT_MAX_HOSTLEN)) {
PROF_WARN(
"Unable to parse filename pattern %s. Using the default name.",
FilenamePat);
PROF_WARN("Unable to get hostname for filename pattern %s. Using "
"the default name.",
FilenamePat);
return -1;
}
} else if (containsMergeSpecifier(FilenamePat, I)) {
@ -312,7 +336,8 @@ static int parseFilenamePattern(const char *FilenamePat) {
}
static void parseAndSetFilename(const char *FilenamePat,
ProfileNameSpecifier PNS) {
ProfileNameSpecifier PNS,
unsigned CopyFilenamePat) {
const char *OldFilenamePat = lprofCurFilename.FilenamePat;
ProfileNameSpecifier OldPNS = lprofCurFilename.PNS;
@ -323,33 +348,28 @@ static void parseAndSetFilename(const char *FilenamePat,
if (!FilenamePat)
FilenamePat = DefaultProfileName;
/* When -fprofile-instr-generate=<path> is specified on the
* command line, each module will be instrumented with runtime
* init call to __llvm_profile_init function which calls
* __llvm_profile_override_default_filename. In most of the cases,
* the path will be identical, so bypass the parsing completely.
*/
if (OldFilenamePat && !strcmp(OldFilenamePat, FilenamePat)) {
lprofCurFilename.PNS = PNS;
return;
}
/* When PNS >= OldPNS, the last one wins. */
if (!FilenamePat || parseFilenamePattern(FilenamePat))
if (!FilenamePat || parseFilenamePattern(FilenamePat, CopyFilenamePat))
resetFilenameToDefault();
lprofCurFilename.PNS = PNS;
if (!OldFilenamePat) {
PROF_NOTE("Set profile file path to \"%s\" via %s.\n",
lprofCurFilename.FilenamePat, getPNSStr(PNS));
if (getenv("LLVM_PROFILE_VERBOSE"))
PROF_NOTE("Set profile file path to \"%s\" via %s.\n",
lprofCurFilename.FilenamePat, getPNSStr(PNS));
} else {
PROF_NOTE("Override old profile path \"%s\" via %s to \"%s\" via %s.\n",
OldFilenamePat, getPNSStr(OldPNS), lprofCurFilename.FilenamePat,
getPNSStr(PNS));
if (getenv("LLVM_PROFILE_VERBOSE"))
PROF_NOTE("Override old profile path \"%s\" via %s to \"%s\" via %s.\n",
OldFilenamePat, getPNSStr(OldPNS), lprofCurFilename.FilenamePat,
getPNSStr(PNS));
}
if (!lprofCurFilename.MergePoolSize)
truncateCurrentFile();
truncateCurrentFile();
}
/* Return buffer length that is required to store the current profile
@ -429,16 +449,61 @@ static const char *getFilenamePatFromEnv(void) {
return Filename;
}
COMPILER_RT_VISIBILITY
const char *__llvm_profile_get_path_prefix(void) {
int Length;
char *FilenameBuf, *Prefix;
const char *Filename, *PrefixEnd;
if (lprofCurFilename.ProfilePathPrefix)
return lprofCurFilename.ProfilePathPrefix;
Length = getCurFilenameLength();
FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
Filename = getCurFilename(FilenameBuf);
if (!Filename)
return "\0";
PrefixEnd = lprofFindLastDirSeparator(Filename);
if (!PrefixEnd)
return "\0";
Length = PrefixEnd - Filename + 1;
Prefix = (char *)malloc(Length + 1);
if (!Prefix) {
PROF_ERR("Failed to %s\n", "allocate memory.");
return "\0";
}
memcpy(Prefix, Filename, Length);
Prefix[Length] = '\0';
lprofCurFilename.ProfilePathPrefix = Prefix;
return Prefix;
}
/* This method is invoked by the runtime initialization hook
* InstrProfilingRuntime.o if it is linked in. Both user specified
* profile path via -fprofile-instr-generate= and LLVM_PROFILE_FILE
* environment variable can override this default value. */
COMPILER_RT_VISIBILITY
void __llvm_profile_initialize_file(void) {
const char *FilenamePat;
const char *EnvFilenamePat;
const char *SelectedPat = NULL;
ProfileNameSpecifier PNS = PNS_unknown;
int hasCommandLineOverrider = (INSTR_PROF_PROFILE_NAME_VAR[0] != 0);
FilenamePat = getFilenamePatFromEnv();
parseAndSetFilename(FilenamePat, FilenamePat ? PNS_environment : PNS_default);
EnvFilenamePat = getFilenamePatFromEnv();
if (EnvFilenamePat) {
SelectedPat = EnvFilenamePat;
PNS = PNS_environment;
} else if (hasCommandLineOverrider) {
SelectedPat = INSTR_PROF_PROFILE_NAME_VAR;
PNS = PNS_command_line;
} else {
SelectedPat = NULL;
PNS = PNS_default;
}
parseAndSetFilename(SelectedPat, PNS, 0);
}
/* This API is directly called by the user application code. It has the
@ -447,18 +512,7 @@ void __llvm_profile_initialize_file(void) {
*/
COMPILER_RT_VISIBILITY
void __llvm_profile_set_filename(const char *FilenamePat) {
parseAndSetFilename(FilenamePat, PNS_runtime_api);
}
/*
* This API is invoked by the global initializers emitted by Clang/LLVM when
* -fprofile-instr-generate=<..> is specified (vs -fprofile-instr-generate
* without an argument). This option has lower precedence than the
* LLVM_PROFILE_FILE environment variable.
*/
COMPILER_RT_VISIBILITY
void __llvm_profile_override_default_filename(const char *FilenamePat) {
parseAndSetFilename(FilenamePat, PNS_command_line);
parseAndSetFilename(FilenamePat, PNS_runtime_api, 1);
}
/* The public API for writing profile data into the file with name
@ -471,6 +525,12 @@ int __llvm_profile_write_file(void) {
const char *Filename;
char *FilenameBuf;
if (lprofProfileDumped()) {
PROF_NOTE("Profile data not written to file: %s.\n",
"already written");
return 0;
}
Length = getCurFilenameLength();
FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
Filename = getCurFilename(FilenameBuf);
@ -497,6 +557,18 @@ int __llvm_profile_write_file(void) {
return rc;
}
COMPILER_RT_VISIBILITY
int __llvm_profile_dump(void) {
if (!doMerging())
PROF_WARN("Later invocation of __llvm_profile_dump can lead to clobbering "
" of previously dumped profile data : %s. Either use %%m "
"in profile name or change profile name before dumping.\n",
"online profile merging is not on");
int rc = __llvm_profile_write_file();
lprofSetProfileDumped();
return rc;
}
static void writeFileWithoutReturn(void) { __llvm_profile_write_file(); }
COMPILER_RT_VISIBILITY

View File

@ -163,21 +163,13 @@ void lprofSetupValueProfiler();
* to dump merged profile data into its own profile file. */
uint64_t lprofGetLoadModuleSignature();
/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
/* Return the path prefix specified by GCOV_PREFIX environment variable.
* If GCOV_PREFIX_STRIP is also specified, the strip level (integer value)
* is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen.
/*
* Return non zero value if the profile data has already been
* dumped to the file.
*/
const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen);
/* Apply the path prefix specified in \c Prefix to path string in \c PathStr,
* and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip
* is not zero, path prefixes are stripped from \c PathStr (the level of
* stripping is specified by \c PrefixStrip) before \c Prefix is added.
*/
void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
size_t PrefixLen, int PrefixStrip);
unsigned lprofProfileDumped();
void lprofSetProfileDumped();
COMPILER_RT_VISIBILITY extern char *(*GetEnvHook)(const char *);
COMPILER_RT_VISIBILITY extern void (*FreeHook)(void *);
COMPILER_RT_VISIBILITY extern uint8_t *DynamicBufferIOBuffer;
COMPILER_RT_VISIBILITY extern uint32_t VPBufferSize;

View File

@ -40,14 +40,14 @@
#endif
#define COMPILER_RT_MAX_HOSTLEN 128
#ifdef _MSC_VER
#define COMPILER_RT_GETHOSTNAME(Name, Len) gethostname(Name, Len)
#elif defined(__ORBIS__)
#ifdef __ORBIS__
#define COMPILER_RT_GETHOSTNAME(Name, Len) ((void)(Name), (void)(Len), (-1))
#else
#define COMPILER_RT_GETHOSTNAME(Name, Len) lprofGetHostName(Name, Len)
#ifndef _MSC_VER
#define COMPILER_RT_HAS_UNAME 1
#endif
#endif
#if COMPILER_RT_HAS_ATOMICS == 1
#ifdef _MSC_VER

View File

@ -11,7 +11,8 @@ extern "C" {
#include "InstrProfiling.h"
COMPILER_RT_VISIBILITY int __llvm_profile_runtime;
/* int __llvm_profile_runtime */
COMPILER_RT_VISIBILITY int INSTR_PROF_PROFILE_RUNTIME_VAR;
}
namespace {

View File

@ -35,7 +35,7 @@ void __llvm_profile_recursive_mkdir(char *path) {
for (i = 1; path[i] != '\0'; ++i) {
char save = path[i];
if (!(path[i] == '/' || path[i] == '\\'))
if (!IS_DIR_SEPARATOR(path[i]))
continue;
path[i] = '\0';
#ifdef _WIN32
@ -66,7 +66,19 @@ void *lprofPtrFetchAdd(void **Mem, long ByteIncr) {
#endif
#ifdef COMPILER_RT_HAS_UNAME
#ifdef _MSC_VER
COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
WCHAR Buffer[COMPILER_RT_MAX_HOSTLEN];
DWORD BufferSize = sizeof(Buffer);
BOOL Result =
GetComputerNameExW(ComputerNameDnsFullyQualified, Buffer, &BufferSize);
if (!Result)
return -1;
if (WideCharToMultiByte(CP_UTF8, 0, Buffer, -1, Name, Len, NULL, NULL) == 0)
return -1;
return 0;
}
#elif defined(COMPILER_RT_HAS_UNAME)
COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
struct utsname N;
int R;
@ -184,3 +196,26 @@ lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
memcpy(Dest + PrefixLen, StrippedPathStr, strlen(StrippedPathStr) + 1);
}
COMPILER_RT_VISIBILITY const char *
lprofFindFirstDirSeparator(const char *Path) {
const char *Sep;
Sep = strchr(Path, DIR_SEPARATOR);
if (Sep)
return Sep;
#if defined(DIR_SEPARATOR_2)
Sep = strchr(Path, DIR_SEPARATOR_2);
#endif
return Sep;
}
COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
const char *Sep;
Sep = strrchr(Path, DIR_SEPARATOR);
if (Sep)
return Sep;
#if defined(DIR_SEPARATOR_2)
Sep = strrchr(Path, DIR_SEPARATOR_2);
#endif
return Sep;
}

View File

@ -25,6 +25,27 @@ FILE *lprofOpenFileEx(const char *Filename);
static inline char *getenv(const char *name) { return NULL; }
#endif /* #if __ORBIS__ */
/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
/* Return the path prefix specified by GCOV_PREFIX environment variable.
* If GCOV_PREFIX_STRIP is also specified, the strip level (integer value)
* is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen.
*/
const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen);
/* Apply the path prefix specified in \c Prefix to path string in \c PathStr,
* and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip
* is not zero, path prefixes are stripped from \c PathStr (the level of
* stripping is specified by \c PrefixStrip) before \c Prefix is added.
*/
void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
size_t PrefixLen, int PrefixStrip);
/* Returns a pointer to the first occurrence of \c DIR_SEPARATOR char in
* the string \c Path, or NULL if the char is not found. */
const char *lprofFindFirstDirSeparator(const char *Path);
/* Returns a pointer to the last occurrence of \c DIR_SEPARATOR char in
* the string \c Path, or NULL if the char is not found. */
const char *lprofFindLastDirSeparator(const char *Path);
int lprofGetHostName(char *Name, int Len);
unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV);

View File

@ -192,7 +192,7 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
* the runtime can wipe out more than one lowest count entries
* to give space for hot targets.
*/
if (!(--MinCountVNode->Count)) {
if (!MinCountVNode->Count || !(--MinCountVNode->Count)) {
CurVNode = MinCountVNode;
CurVNode->Value = TargetValue;
CurVNode->Count++;

View File

@ -20,6 +20,9 @@
#include "WindowsMMap.h"
#include "InstrProfiling.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#ifdef __USE_FILE_OFFSET64
# define DWORD_HI(x) (x >> 32)
# define DWORD_LO(x) ((x) & 0xffffffff)

View File

@ -92,6 +92,8 @@ static __thread void *unsafe_stack_start = nullptr;
static __thread size_t unsafe_stack_size = 0;
static __thread size_t unsafe_stack_guard = 0;
using namespace __sanitizer;
static inline void *unsafe_stack_alloc(size_t size, size_t guard) {
CHECK_GE(size + guard, size);
void *addr = MmapOrDie(size + guard, "unsafe_stack_alloc");

View File

@ -0,0 +1,60 @@
//===-- sancov_flags.cc -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage runtime flags.
//
//===----------------------------------------------------------------------===//
#include "sancov_flags.h"
#include "sanitizer_flag_parser.h"
#include "sanitizer_platform.h"
#if !SANITIZER_LINUX
// other platforms do not have weak symbols out of the box.
extern "C" const char* __sancov_default_options() { return ""; }
#endif
using namespace __sanitizer;
namespace __sancov {
SancovFlags sancov_flags_dont_use_directly; // use via flags();
void SancovFlags::SetDefaults() {
#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "sancov_flags.inc"
#undef SANCOV_FLAG
}
static void RegisterSancovFlags(FlagParser *parser, SancovFlags *f) {
#define SANCOV_FLAG(Type, Name, DefaultValue, Description) \
RegisterFlag(parser, #Name, Description, &f->Name);
#include "sancov_flags.inc"
#undef SANCOV_FLAG
}
static const char *MaybeCallSancovDefaultOptions() {
return (&__sancov_default_options) ? __sancov_default_options() : "";
}
void InitializeSancovFlags() {
SancovFlags *f = sancov_flags();
f->SetDefaults();
FlagParser parser;
RegisterSancovFlags(&parser, f);
parser.ParseString(MaybeCallSancovDefaultOptions());
parser.ParseString(GetEnv("SANCOV_OPTIONS"));
ReportUnrecognizedFlags();
if (f->help) parser.PrintFlagDescriptions();
}
} // namespace __sancov

View File

@ -0,0 +1,40 @@
//===-- sancov_flags.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef SANCOV_FLAGS_H
#define SANCOV_FLAGS_H
#include "sanitizer_flag_parser.h"
#include "sanitizer_internal_defs.h"
namespace __sancov {
struct SancovFlags {
#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "sancov_flags.inc"
#undef SANCOV_FLAG
void SetDefaults();
};
extern SancovFlags sancov_flags_dont_use_directly;
inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
void InitializeSancovFlags();
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
__sancov_default_options();
} // namespace __sancov
#endif

View File

@ -0,0 +1,21 @@
//===-- sancov_flags.inc ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Sanitizer Coverage runtime flags.
//
//===----------------------------------------------------------------------===//
#ifndef SANCOV_FLAG
#error "Defnine SANCOV_FLAG prior to including this file!"
#endif
SANCOV_FLAG(bool, symbolize, true,
"If set, converage information will be symbolized by sancov tool "
"after dumping.")
SANCOV_FLAG(bool, help, false, "Print flags help.")

View File

@ -73,6 +73,8 @@ class AddrHashMap {
~Handle();
T *operator->();
T &operator*();
const T &operator*() const;
bool created() const;
bool exists() const;
@ -136,6 +138,16 @@ T *AddrHashMap<T, kSize>::Handle::operator->() {
return &cell_->val;
}
template <typename T, uptr kSize>
const T &AddrHashMap<T, kSize>::Handle::operator*() const {
return cell_->val;
}
template <typename T, uptr kSize>
T &AddrHashMap<T, kSize>::Handle::operator*() {
return cell_->val;
}
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;

View File

@ -13,27 +13,33 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
# else
# include <stdlib.h>
# define __libc_malloc malloc
# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
if (error) return nullptr;
return p;
}
# endif
# define __libc_realloc realloc
# define __libc_free free
# endif
@ -41,10 +47,20 @@ static void *__libc_memalign(uptr alignment, uptr size) {
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
return __libc_memalign(alignment, size);
#else
// Windows does not provide __libc_memalign/posix_memalign. It provides
// __aligned_malloc, but the allocated blocks can't be passed to free,
// they need to be passed to __aligned_free. InternalAlloc interface does
// not account for such requirement. Alignemnt does not seem to be used
// anywhere in runtime, so just call __libc_malloc for now.
DCHECK_EQ(alignment, 0);
return __libc_malloc(size);
#endif
}
static void *RawInternalRealloc(void *ptr, uptr size,
@ -62,7 +78,7 @@ InternalAllocator *internal_allocator() {
return 0;
}
#else // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@ -78,7 +94,8 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
internal_allocator_instance->Init(/* may_return_null*/ false);
internal_allocator_instance->Init(
/* may_return_null */ false, kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@ -115,7 +132,7 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
#endif // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
@ -145,7 +162,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
return internal_allocator()->ReturnNullOrDie();
return internal_allocator()->ReturnNullOrDieOnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
@ -192,7 +209,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
void NORETURN ReportAllocatorCannotReturnNull() {
static atomic_uint8_t reporting_out_of_memory = {0};
bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More