Upgrade our copies of clang, llvm, lld, lldb, compiler-rt and libc++ to

4.0.0 (branches/release_40 296509).  The release will follow soon.

Please note that from 3.5.0 onwards, clang, llvm and lldb require C++11
support to build; see UPDATING for more information.

Also note that as of 4.0.0, lld should be able to link the base system
on amd64 and aarch64.  See the WITH_LLD_IS_LLD setting in src.conf(5).
Though please be aware that this is work in progress.

Release notes for llvm, clang and lld will be available here:
<http://releases.llvm.org/4.0.0/docs/ReleaseNotes.html>
<http://releases.llvm.org/4.0.0/tools/clang/docs/ReleaseNotes.html>
<http://releases.llvm.org/4.0.0/tools/lld/docs/ReleaseNotes.html>

Thanks to Ed Maste, Jan Beich, Antoine Brodin and Eric Fiselier for
their help.

Relnotes:	yes
Exp-run:	antoine
PR:		215969, 216008
MFC after:	1 month
This commit is contained in:
Dimitry Andric 2017-03-02 20:49:40 +00:00
commit ed085b68ab
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=314564
5348 changed files with 784938 additions and 625076 deletions

View File

@ -513,7 +513,7 @@ TMAKE= MAKEOBJDIRPREFIX=${OBJTREE} \
# cross-tools stage
XMAKE= TOOLS_PREFIX=${WORLDTMP} ${BMAKE} \
TARGET=${TARGET} TARGET_ARCH=${TARGET_ARCH} \
MK_GDB=no MK_TESTS=no MK_LLD_IS_LD=no
MK_GDB=no MK_TESTS=no
# kernel-tools stage
KTMAKEENV= INSTALL="sh ${.CURDIR}/tools/install.sh" \
@ -1825,6 +1825,9 @@ _elftctools= lib/libelftc \
# cross-build on a FreeBSD 10 host:
_elftctools+= usr.bin/addr2line
.endif
.if ${MK_LLD_IS_LD} != "no"
_lld= usr.bin/clang/lld
.endif
.elif ${TARGET_ARCH} != ${MACHINE_ARCH} && ${MK_ELFTOOLCHAIN_BOOTSTRAP} != "no"
# If cross-building with an external binutils we still need to build strip for
# the target (for at least crunchide).
@ -1849,6 +1852,7 @@ cross-tools: .MAKE .PHONY
${LOCAL_XTOOL_DIRS} \
${_clang_libs} \
${_clang} \
${_lld} \
${_binutils} \
${_elftctools} \
${_dtrace_tools} \

View File

@ -38,6 +38,119 @@
# xargs -n1 | sort | uniq -d;
# done
# 20170302: new libc++ import which bumps version from 3.9.1 to 4.0.0.
OLD_FILES+=usr/include/c++/v1/__undef___deallocate
OLD_FILES+=usr/include/c++/v1/tr1/__undef___deallocate
# 20170302: new clang import which bumps version from 3.9.1 to 4.0.0.
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.9.1/include/sanitizer
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.9.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.9.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.9.1/include/msa.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.9.1/include
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.9.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.9.1/lib
OLD_DIRS+=usr/lib/clang/3.9.1
# 20170226: SVR4 compatibility removed
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/streams.4

View File

@ -51,6 +51,11 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 12.x IS SLOW:
****************************** SPECIAL WARNING: ******************************
20170302:
Clang, llvm, lldb, compiler-rt and libc++ have been upgraded to 4.0.0.
Please see the 20141231 entry below for information about prerequisites
and upgrading, if you are not already using clang 3.5.0 or higher.
20170221:
The code that provides support for ZFS .zfs/ directory functionality
has been reimplemented. It's not possible now to create a snapshot

View File

@ -117,6 +117,16 @@ extern "C" {
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
// Symbolizes the supplied 'pc' using the format string 'fmt'.
// Outputs at most 'out_buf_size' bytes into 'out_buf'.
// The format syntax is described in
// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
size_t out_buf_size);
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
char *out_buf, size_t out_buf_size);
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __sanitizer_set_death_callback(void (*callback)(void));
@ -169,7 +179,16 @@ extern "C" {
// use-after-return detection.
void __sanitizer_start_switch_fiber(void **fake_stack_save,
const void *bottom, size_t size);
void __sanitizer_finish_switch_fiber(void *fake_stack_save);
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
const void **bottom_old,
size_t *size_old);
// Get full module name and calculate pc offset within it.
// Returns 1 if pc belongs to some module, 0 if module was not found.
int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
size_t module_path_len,
void **pc_offset);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -23,6 +23,11 @@ extern "C" {
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Dump collected coverage info. Sorts pcs by module into individual
// .sancov files.
void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
// Open <name>.sancov.packed in the coverage directory and return the file
// descriptor. Returns -1 on failure, or if coverage dumping is disabled.
// This is intended for use by sandboxing code.
@ -41,13 +46,6 @@ extern "C" {
// Some of the entries in *data will be zero.
uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
// Set *data to the growing buffer with covered PCs and return the size
// of the buffer. The entries are never zero.
// When only unique pcs are collected, the size is equal to
// __sanitizer_get_total_unique_coverage.
// WARNING: EXPERIMENTAL API.
uintptr_t __sanitizer_get_coverage_pc_buffer(uintptr_t **data);
// The coverage instrumentation may optionally provide imprecise counters.
// Rather than exposing the counter values to the user we instead map
// the counters to a bitset.
@ -65,6 +63,7 @@ extern "C" {
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
uintptr_t
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -0,0 +1,65 @@
//===-- xray_interface.h ----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// APIs for controlling XRay functionality explicitly.
//===----------------------------------------------------------------------===//
#ifndef XRAY_XRAY_INTERFACE_H
#define XRAY_XRAY_INTERFACE_H
#include <cstdint>
extern "C" {
enum XRayEntryType { ENTRY = 0, EXIT = 1, TAIL = 2 };
// Provide a function to invoke for when instrumentation points are hit. This is
// a user-visible control surface that overrides the default implementation. The
// function provided should take the following arguments:
//
// - function id: an identifier that indicates the id of a function; this id
// is generated by xray; the mapping between the function id
// and the actual function pointer is available through
// __xray_table.
// - entry type: identifies what kind of instrumentation point was encountered
// (function entry, function exit, etc.). See the enum
// XRayEntryType for more details.
//
// The user handler must handle correctly spurious calls after this handler is
// removed or replaced with another handler, because it would be too costly for
// XRay runtime to avoid spurious calls.
// To prevent circular calling, the handler function itself and all its
// direct&indirect callees must not be instrumented with XRay, which can be
// achieved by marking them all with: __attribute__((xray_never_instrument))
//
// Returns 1 on success, 0 on error.
extern int __xray_set_handler(void (*entry)(int32_t, XRayEntryType));
// This removes whatever the currently provided handler is. Returns 1 on
// success, 0 on error.
extern int __xray_remove_handler();
enum XRayPatchingStatus {
NOT_INITIALIZED = 0,
SUCCESS = 1,
ONGOING = 2,
FAILED = 3,
};
// This tells XRay to patch the instrumentation points. See XRayPatchingStatus
// for possible result values.
extern XRayPatchingStatus __xray_patch();
// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible
// result values.
extern XRayPatchingStatus __xray_unpatch();
}
#endif

View File

@ -0,0 +1,80 @@
//===-- xray_records.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// This header exposes some record types useful for the XRay in-memory logging
// implementation.
//
//===----------------------------------------------------------------------===//
#ifndef XRAY_XRAY_RECORDS_H
#define XRAY_XRAY_RECORDS_H
namespace __xray {
enum FileTypes {
NAIVE_LOG = 0,
};
// This data structure is used to describe the contents of the file. We use this
// for versioning the supported XRay file formats.
struct alignas(32) XRayFileHeader {
uint16_t Version = 0;
// The type of file we're writing out. See the FileTypes enum for more
// information. This allows different implementations of the XRay logging to
// have different files for different information being stored.
uint16_t Type = 0;
// What follows are a set of flags that indicate useful things for when
// reading the data in the file.
bool ConstantTSC : 1;
bool NonstopTSC : 1;
// The frequency by which TSC increases per-second.
alignas(8) uint64_t CycleFrequency = 0;
} __attribute__((packed));
static_assert(sizeof(XRayFileHeader) == 32, "XRayFileHeader != 32 bytes");
enum RecordTypes {
NORMAL = 0,
};
struct alignas(32) XRayRecord {
// This is the type of the record being written. We use 16 bits to allow us to
// treat this as a discriminant, and so that the first 4 bytes get packed
// properly. See RecordTypes for more supported types.
uint16_t RecordType = 0;
// The CPU where the thread is running. We assume number of CPUs <= 256.
uint8_t CPU = 0;
// The type of the event. Usually either ENTER = 0 or EXIT = 1.
uint8_t Type = 0;
// The function ID for the record.
int32_t FuncId = 0;
// Get the full 8 bytes of the TSC when we get the log record.
uint64_t TSC = 0;
// The thread ID for the currently running thread.
uint32_t TId = 0;
// Use some bytes in the end of the record for buffers.
char Buffer[4] = {};
} __attribute__((packed));
static_assert(sizeof(XRayRecord) == 32, "XRayRecord != 32 bytes");
} // namespace __xray
#endif // XRAY_XRAY_RECORDS_H

View File

@ -77,13 +77,16 @@ static struct AsanDeactivatedFlags {
void Print() {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
"quarantine_size_mb %d, thread_local_quarantine_size_kb %d, "
"max_redzone %d, poison_heap %d, malloc_context_size %d, "
"alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, "
"coverage_dir %s, allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb,
allocator_options.thread_local_quarantine_size_kb,
allocator_options.max_redzone, poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.may_return_null, coverage, coverage_dir);
allocator_options.may_return_null, coverage, coverage_dir,
allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;
@ -107,6 +110,7 @@ void AsanDeactivate() {
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
disabled.thread_local_quarantine_size_kb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
disabled.max_redzone = 16;
disabled.alloc_dealloc_mismatch = false;

View File

@ -24,6 +24,7 @@
ASAN_ACTIVATION_FLAG(int, redzone)
ASAN_ACTIVATION_FLAG(int, max_redzone)
ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
ASAN_ACTIVATION_FLAG(int, thread_local_quarantine_size_kb)
ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
ASAN_ACTIVATION_FLAG(bool, poison_heap)
@ -33,3 +34,4 @@ COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)

View File

@ -207,25 +207,27 @@ QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
quarantine_size_mb = f->quarantine_size_mb;
thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
min_redzone = f->redzone;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size_mb = quarantine_size_mb;
f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
AsanAllocator allocator;
AsanQuarantine quarantine;
@ -254,7 +256,7 @@ struct Allocator {
void SharedInitCode(const AllocatorOptions &options) {
CheckOptions(options);
quarantine.Init((uptr)options.quarantine_size_mb << 20,
kMaxThreadLocalQuarantine);
(uptr)options.thread_local_quarantine_size_kb << 10);
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
memory_order_release);
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
@ -262,22 +264,59 @@ struct Allocator {
}
void Initialize(const AllocatorOptions &options) {
allocator.Init(options.may_return_null);
allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
SharedInitCode(options);
}
void RePoisonChunk(uptr chunk) {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
uptr beg = ac->Beg();
uptr end = ac->Beg() + ac->UsedSize(true);
uptr chunk_end = chunk + allocated_size;
if (chunk < beg && beg < end && end <= chunk_end &&
ac->chunk_state == CHUNK_ALLOCATED) {
// Looks like a valid AsanChunk in use, poison redzones only.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
} else {
// This is either not an AsanChunk or freed or quarantined AsanChunk.
// In either case, poison everything.
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
}
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
// Poison all existing allocation's redzones.
if (CanPoisonMemory()) {
allocator.ForceLock();
allocator.ForEachChunk(
[](uptr chunk, void *alloc) {
((Allocator *)alloc)->RePoisonChunk(chunk);
},
this);
allocator.ForceUnlock();
}
}
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
}
// -------------------- Helper methods. -------------------------
@ -356,7 +395,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
@ -373,8 +412,7 @@ struct Allocator {
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
if (!allocated)
return allocator.ReturnNullOrDie();
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@ -530,7 +568,7 @@ struct Allocator {
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, m->UsedSize(), delete_size, stack);
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
QuarantineChunk(m, ptr, stack, alloc_type);
@ -563,7 +601,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
@ -643,6 +681,7 @@ struct Allocator {
void PrintStats() {
allocator.PrintStats();
quarantine.PrintStats();
}
void ForceLock() {
@ -662,17 +701,23 @@ static AsanAllocator &get_allocator() {
return instance.allocator;
}
bool AsanChunkView::IsValid() {
bool AsanChunkView::IsValid() const {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
}
bool AsanChunkView::IsAllocated() {
bool AsanChunkView::IsAllocated() const {
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
bool AsanChunkView::IsQuarantined() const {
return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
}
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
@ -681,14 +726,14 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
StackTrace AsanChunkView::GetAllocStack() {
StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId());
}
StackTrace AsanChunkView::GetFreeStack() {
StackTrace AsanChunkView::GetFreeStack() const {
return GetStackTraceFromId(GetFreeStackId());
}
@ -707,6 +752,9 @@ void GetAllocatorOptions(AllocatorOptions *options) {
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);

View File

@ -33,10 +33,12 @@ struct AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
u32 thread_local_quarantine_size_kb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
@ -49,27 +51,29 @@ void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
bool IsValid(); // Checks if AsanChunkView points to a valid allocated
// or quarantined chunk.
bool IsAllocated(); // Checks if the memory is currently allocated.
uptr Beg(); // First byte of user memory.
uptr End(); // Last byte of user memory.
uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
bool IsValid() const; // Checks if AsanChunkView points to a valid
// allocated or quarantined chunk.
bool IsAllocated() const; // Checks if the memory is currently allocated.
bool IsQuarantined() const; // Checks if the memory is currently quarantined.
uptr Beg() const; // First byte of user memory.
uptr End() const; // Last byte of user memory.
uptr UsedSize() const; // Size requested by the user.
uptr AllocTid() const;
uptr FreeTid() const;
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
u32 GetAllocStackId();
u32 GetFreeStackId();
StackTrace GetAllocStack();
StackTrace GetFreeStack();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
u32 GetAllocStackId() const;
u32 GetFreeStackId() const;
StackTrace GetAllocStack() const;
StackTrace GetFreeStack() const;
AllocType GetAllocType() const;
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
return true;
}
return false;
}
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) {
bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
(void)access_size;
if (addr < Beg()) {
*offset = Beg() - addr;
@ -77,7 +81,7 @@ class AsanChunkView {
}
return false;
}
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) {
bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
if (addr + access_size > End()) {
*offset = addr - End();
return true;
@ -90,6 +94,7 @@ class AsanChunkView {
};
AsanChunkView FindHeapChunkByAddress(uptr address);
AsanChunkView FindHeapChunkByAllocBeg(uptr address);
// List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
@ -117,18 +122,36 @@ struct AsanMapUnmapCallback {
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__aarch64__) && SANITIZER_ANDROID
const uptr kAllocatorSpace = 0x3000000000ULL;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
typedef DefaultSizeClassMap SizeClassMap;
# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# endif
typedef DefaultSizeClassMap SizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
# endif
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
typedef AsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;

View File

@ -14,74 +14,39 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
#include "asan_descriptions.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
namespace __asan {
namespace {
using namespace __asan;
void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
descr->region_kind = "stack";
AsanThread::StackFrameAccess access;
if (!t->GetStackFrameAccessByAddr(addr, &access))
return;
static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
char *name, uptr name_size,
uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16);
if (!ParseFrameDescription(access.frame_descr, &vars)) {
if (!ParseFrameDescription(frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
if (access.offset <= vars[i].beg + vars[i].size) {
internal_strncat(descr->name, vars[i].name_pos,
Min(descr->name_size, vars[i].name_len));
descr->region_address = addr - (access.offset - vars[i].beg);
descr->region_size = vars[i].size;
if (offset <= vars[i].beg + vars[i].size) {
// We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
// if we're limiting the copy due to name_len, we add 1 to ensure we copy
// the whole name and then terminate with '\0'.
internal_strlcpy(name, vars[i].name_pos,
Min(name_size, vars[i].name_len + 1));
region_address = addr - (offset - vars[i].beg);
region_size = vars[i].size;
return;
}
}
}
void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
descr->name[0] = 0;
descr->region_address = 0;
descr->region_size = 0;
if (!chunk.IsValid()) {
descr->region_kind = "heap-invalid";
return;
}
descr->region_address = chunk.Beg();
descr->region_size = chunk.UsedSize();
descr->region_kind = "heap";
}
void AsanLocateAddress(uptr addr, AddressDescription *descr) {
if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
return;
}
if (GetInfoForAddressIfGlobal(addr, descr)) {
return;
}
asanThreadRegistry().Lock();
AsanThread *thread = FindThreadByStackAddress(addr);
asanThreadRegistry().Unlock();
if (thread) {
GetInfoForStackVar(addr, descr, thread);
return;
}
GetInfoForHeapAddress(addr, descr);
}
static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
@ -108,18 +73,58 @@ static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
return 0;
}
} // namespace __asan
using namespace __asan;
} // namespace
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
uptr *region_address, uptr *region_size) {
AddressDescription descr = { name, name_size, 0, 0, nullptr };
AsanLocateAddress(addr, &descr);
if (region_address) *region_address = descr.region_address;
if (region_size) *region_size = descr.region_size;
return descr.region_kind;
uptr *region_address_ptr,
uptr *region_size_ptr) {
AddressDescription descr(addr);
uptr region_address = 0;
uptr region_size = 0;
const char *region_kind = nullptr;
if (name && name_size > 0) name[0] = 0;
if (auto shadow = descr.AsShadow()) {
// region_{address,size} are already 0
switch (shadow->kind) {
case kShadowKindLow:
region_kind = "low shadow";
break;
case kShadowKindGap:
region_kind = "shadow gap";
break;
case kShadowKindHigh:
region_kind = "high shadow";
break;
}
} else if (auto heap = descr.AsHeap()) {
region_kind = "heap";
region_address = heap->chunk_access.chunk_begin;
region_size = heap->chunk_access.chunk_size;
} else if (auto stack = descr.AsStack()) {
region_kind = "stack";
if (!stack->frame_descr) {
// region_{address,size} are already 0
} else {
FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
name_size, region_address, region_size);
}
} else if (auto global = descr.AsGlobal()) {
region_kind = "global";
auto &g = global->globals[0];
internal_strlcpy(name, g.name, name_size);
region_address = g.beg;
region_size = g.size;
} else {
// region_{address,size} are already 0
region_kind = "heap-invalid";
}
CHECK(region_kind);
if (region_address_ptr) *region_address_ptr = region_address;
if (region_size_ptr) *region_size_ptr = region_size;
return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE

View File

@ -0,0 +1,486 @@
//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan functions for getting information about an address and/or printing it.
//===----------------------------------------------------------------------===//
#include "asan_descriptions.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
// Return " (thread_name) " or an empty string if the name is empty.
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len) {
const char *name = t->name;
if (name[0] == '\0') return "";
buff[0] = 0;
internal_strncat(buff, " (", 3);
internal_strncat(buff, name, buff_len - 4);
internal_strncat(buff, ")", 2);
return buff;
}
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) {
if (tid == kInvalidTid) return "";
asanThreadRegistry().CheckLocked();
AsanThreadContext *t = GetThreadContextByTidLocked(tid);
return ThreadNameWithParenthesis(t, buff, buff_len);
}
void DescribeThread(AsanThreadContext *context) {
CHECK(context);
asanThreadRegistry().CheckLocked();
// No need to announce the main thread.
if (context->tid == 0 || context->announced) {
return;
}
context->announced = true;
char tname[128];
InternalScopedString str(1024);
str.append("Thread T%d%s", context->tid,
ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n");
Printf("%s", str.data());
return;
}
str.append(
" created by T%d%s here:\n", context->parent_tid,
ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
Printf("%s", str.data());
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
if (flags()->print_full_thread_history) {
AsanThreadContext *parent_context =
GetThreadContextByTidLocked(context->parent_tid);
DescribeThread(parent_context);
}
}
// Shadow descriptions
static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
CHECK(!AddrIsInMem(addr));
if (AddrIsInShadowGap(addr)) {
*shadow_kind = kShadowKindGap;
} else if (AddrIsInHighShadow(addr)) {
*shadow_kind = kShadowKindHigh;
} else if (AddrIsInLowShadow(addr)) {
*shadow_kind = kShadowKindLow;
} else {
CHECK(0 && "Address is not in memory and not in shadow?");
return false;
}
return true;
}
bool DescribeAddressIfShadow(uptr addr) {
ShadowAddressDescription descr;
if (!GetShadowAddressInformation(addr, &descr)) return false;
descr.Print();
return true;
}
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) {
if (AddrIsInMem(addr)) return false;
ShadowKind shadow_kind;
if (!GetShadowKind(addr, &shadow_kind)) return false;
if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr;
descr->addr = addr;
descr->kind = shadow_kind;
return true;
}
// Heap descriptions
static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
AsanChunkView chunk, uptr addr,
uptr access_size) {
descr->bad_addr = addr;
if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) {
descr->access_type = kAccessTypeLeft;
} else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) {
descr->access_type = kAccessTypeRight;
if (descr->offset < 0) {
descr->bad_addr -= descr->offset;
descr->offset = 0;
}
} else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) {
descr->access_type = kAccessTypeInside;
} else {
descr->access_type = kAccessTypeUnknown;
}
descr->chunk_begin = chunk.Beg();
descr->chunk_size = chunk.UsedSize();
descr->alloc_type = chunk.GetAllocType();
}
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
Decorator d;
InternalScopedString str(4096);
str.append("%s", d.Location());
switch (descr.access_type) {
case kAccessTypeLeft:
str.append("%p is located %zd bytes to the left of",
(void *)descr.bad_addr, descr.offset);
break;
case kAccessTypeRight:
str.append("%p is located %zd bytes to the right of",
(void *)descr.bad_addr, descr.offset);
break;
case kAccessTypeInside:
str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
descr.offset);
break;
case kAccessTypeUnknown:
str.append(
"%p is located somewhere around (this is AddressSanitizer bug!)",
(void *)descr.bad_addr);
}
str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
(void *)descr.chunk_begin,
(void *)(descr.chunk_begin + descr.chunk_size));
str.append("%s", d.EndLocation());
Printf("%s", str.data());
}
bool GetHeapAddressInformation(uptr addr, uptr access_size,
HeapAddressDescription *descr) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) {
return false;
}
descr->addr = addr;
GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr,
access_size);
CHECK_NE(chunk.AllocTid(), kInvalidTid);
descr->alloc_tid = chunk.AllocTid();
descr->alloc_stack_id = chunk.GetAllocStackId();
descr->free_tid = chunk.FreeTid();
if (descr->free_tid != kInvalidTid)
descr->free_stack_id = chunk.GetFreeStackId();
return true;
}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
StackTrace res = StackDepotGet(id);
CHECK(res.trace);
return res;
}
bool DescribeAddressIfHeap(uptr addr, uptr access_size) {
HeapAddressDescription descr;
if (!GetHeapAddressInformation(addr, access_size, &descr)) {
Printf(
"AddressSanitizer can not describe address in more detail "
"(wild memory access suspected).\n");
return false;
}
descr.Print();
return true;
}
// Stack descriptions
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr) {
AsanThread *t = FindThreadByStackAddress(addr);
if (!t) return false;
descr->addr = addr;
descr->tid = t->tid();
// Try to fetch precise stack frame for this access.
AsanThread::StackFrameAccess access;
if (!t->GetStackFrameAccessByAddr(addr, &access)) {
descr->frame_descr = nullptr;
return true;
}
descr->offset = access.offset;
descr->access_size = access_size;
descr->frame_pc = access.frame_pc;
descr->frame_descr = access.frame_descr;
#if SANITIZER_PPC64V1
// On PowerPC64 ELFv1, the address of a function actually points to a
// three-doubleword data structure with the first field containing
// the address of the function's code.
descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
#endif
descr->frame_pc += 16;
return true;
}
static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
uptr access_size, uptr prev_var_end,
uptr next_var_beg) {
uptr var_end = var.beg + var.size;
uptr addr_end = addr + access_size;
const char *pos_descr = nullptr;
// If the variable [var.beg, var_end) is the nearest variable to the
// current memory access, indicate it in the log.
if (addr >= var.beg) {
if (addr_end <= var_end)
pos_descr = "is inside"; // May happen if this is a use-after-return.
else if (addr < var_end)
pos_descr = "partially overflows";
else if (addr_end <= next_var_beg &&
next_var_beg - addr_end >= addr - var_end)
pos_descr = "overflows";
} else {
if (addr_end > var.beg)
pos_descr = "partially underflows";
else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
pos_descr = "underflows";
}
InternalScopedString str(1024);
str.append(" [%zd, %zd)", var.beg, var_end);
// Render variable name.
str.append(" '");
for (uptr i = 0; i < var.name_len; ++i) {
str.append("%c", var.name_pos[i]);
}
str.append("'");
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
str.append("%s <== Memory access at offset %zd %s this variable%s\n",
d.Location(), addr, pos_descr, d.EndLocation());
} else {
str.append("\n");
}
Printf("%s", str.data());
}
bool DescribeAddressIfStack(uptr addr, uptr access_size) {
StackAddressDescription descr;
if (!GetStackAddressInformation(addr, access_size, &descr)) return false;
descr.Print();
return true;
}
// Global descriptions
static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g) {
InternalScopedString str(4096);
Decorator d;
str.append("%s", d.Location());
if (addr < g.beg) {
str.append("%p is located %zd bytes to the left", (void *)addr,
g.beg - addr);
} else if (addr + access_size > g.beg + g.size) {
if (addr < g.beg + g.size) addr = g.beg + g.size;
str.append("%p is located %zd bytes to the right", (void *)addr,
addr - (g.beg + g.size));
} else {
// Can it happen?
str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
}
str.append(" of global variable '%s' defined in '",
MaybeDemangleGlobalName(g.name));
PrintGlobalLocation(&str, g);
str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
str.append("%s", d.EndLocation());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
}
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
GlobalAddressDescription *descr) {
descr->addr = addr;
int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites,
ARRAY_SIZE(descr->globals));
descr->size = globals_num;
descr->access_size = access_size;
return globals_num != 0;
}
bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
const char *bug_type) {
GlobalAddressDescription descr;
if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false;
descr.Print(bug_type);
return true;
}
void ShadowAddressDescription::Print() const {
Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
}
void GlobalAddressDescription::Print(const char *bug_type) const {
for (int i = 0; i < size; i++) {
DescribeAddressRelativeToGlobal(addr, access_size, globals[i]);
if (bug_type &&
0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
reg_sites[i]) {
Printf(" registered at:\n");
StackDepotGet(reg_sites[i]).Print();
}
}
}
void StackAddressDescription::Print() const {
Decorator d;
char tname[128];
Printf("%s", d.Location());
Printf("Address %p is located in stack of thread T%d%s", addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
if (!frame_descr) {
Printf("%s\n", d.EndLocation());
return;
}
Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
// Now we print the frame where the alloca has happened.
// We print this frame as a stack trace with one element.
// The symbolizer may print more than one frame if inlining was involved.
// The frame numbers may be different than those in the stack trace printed
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
Printf("%s", d.EndLocation());
StackTrace alloca_stack(&frame_pc, 1);
alloca_stack.Print();
InternalMmapVector<StackVarDescr> vars(16);
if (!ParseFrameDescription(frame_descr, &vars)) {
Printf(
"AddressSanitizer can't parse the stack frame "
"descriptor: |%s|\n",
frame_descr);
// 'addr' is a stack address, so return true even if we can't parse frame
return;
}
uptr n_objects = vars.size();
// Report the number of stack objects.
Printf(" This frame has %zu object(s):\n", n_objects);
// Report all objects in this frame.
for (uptr i = 0; i < n_objects; i++) {
uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end,
next_var_beg);
}
Printf(
"HINT: this may be a false positive if your program uses "
"some custom stack unwind mechanism or swapcontext\n");
if (SANITIZER_WINDOWS)
Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
else
Printf(" (longjmp and C++ exceptions *are* supported)\n");
DescribeThread(GetThreadContextByTidLocked(tid));
}
void HeapAddressDescription::Print() const {
PrintHeapChunkAccess(addr, chunk_access);
asanThreadRegistry().CheckLocked();
AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
char tname[128];
Decorator d;
AsanThreadContext *free_thread = nullptr;
if (free_tid != kInvalidTid) {
free_thread = GetThreadContextByTidLocked(free_tid);
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.EndAllocation());
StackTrace free_stack = GetStackTraceFromId(free_stack_id);
free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
} else {
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
}
alloc_stack.Print();
DescribeThread(GetCurrentThread());
if (free_thread) DescribeThread(free_thread);
DescribeThread(alloc_thread);
}
AddressDescription::AddressDescription(uptr addr, uptr access_size,
bool shouldLockThreadRegistry) {
if (GetShadowAddressInformation(addr, &data.shadow)) {
data.kind = kAddressKindShadow;
return;
}
if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
data.kind = kAddressKindHeap;
return;
}
bool isStackMemory = false;
if (shouldLockThreadRegistry) {
ThreadRegistryLock l(&asanThreadRegistry());
isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
} else {
isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
}
if (isStackMemory) {
data.kind = kAddressKindStack;
return;
}
if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
data.kind = kAddressKindGlobal;
return;
}
data.kind = kAddressKindWild;
addr = 0;
}
void PrintAddressDescription(uptr addr, uptr access_size,
const char *bug_type) {
ShadowAddressDescription shadow_descr;
if (GetShadowAddressInformation(addr, &shadow_descr)) {
shadow_descr.Print();
return;
}
GlobalAddressDescription global_descr;
if (GetGlobalAddressInformation(addr, access_size, &global_descr)) {
global_descr.Print(bug_type);
return;
}
StackAddressDescription stack_descr;
if (GetStackAddressInformation(addr, access_size, &stack_descr)) {
stack_descr.Print();
return;
}
HeapAddressDescription heap_descr;
if (GetHeapAddressInformation(addr, access_size, &heap_descr)) {
heap_descr.Print();
return;
}
// We exhausted our possibilities. Bail out.
Printf(
"AddressSanitizer can not describe address in more detail "
"(wild memory access suspected).\n");
}
} // namespace __asan

View File

@ -0,0 +1,253 @@
//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_descriptions.cc.
// TODO(filcab): Most struct definitions should move to the interface headers.
//===----------------------------------------------------------------------===//
#ifndef ASAN_DESCRIPTIONS_H
#define ASAN_DESCRIPTIONS_H
#include "asan_allocator.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
namespace __asan {
void DescribeThread(AsanThreadContext *context);
static inline void DescribeThread(AsanThread *t) {
if (t) DescribeThread(t->context());
}
const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
uptr buff_len);
const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() {}
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
const char *EndLocation() { return Default(); }
const char *Allocation() { return Magenta(); }
const char *EndAllocation() { return Default(); }
const char *ShadowByte(u8 byte) {
switch (byte) {
case kAsanHeapLeftRedzoneMagic:
case kAsanArrayCookieMagic:
return Red();
case kAsanHeapFreeMagic:
return Magenta();
case kAsanStackLeftRedzoneMagic:
case kAsanStackMidRedzoneMagic:
case kAsanStackRightRedzoneMagic:
return Red();
case kAsanStackAfterReturnMagic:
return Magenta();
case kAsanInitializationOrderMagic:
return Cyan();
case kAsanUserPoisonedMemoryMagic:
case kAsanContiguousContainerOOBMagic:
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
return Blue();
case kAsanStackUseAfterScopeMagic:
return Magenta();
case kAsanGlobalRedzoneMagic:
return Red();
case kAsanInternalHeapMagic:
return Yellow();
case kAsanIntraObjectRedzone:
return Yellow();
default:
return Default();
}
}
const char *EndShadowByte() { return Default(); }
const char *MemoryByte() { return Magenta(); }
const char *EndMemoryByte() { return Default(); }
};
enum ShadowKind : u8 {
kShadowKindLow,
kShadowKindGap,
kShadowKindHigh,
};
static const char *const ShadowNames[] = {"low shadow", "shadow gap",
"high shadow"};
struct ShadowAddressDescription {
uptr addr;
ShadowKind kind;
u8 shadow_byte;
void Print() const;
};
bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
bool DescribeAddressIfShadow(uptr addr);
enum AccessType {
kAccessTypeLeft,
kAccessTypeRight,
kAccessTypeInside,
kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
};
struct ChunkAccess {
uptr bad_addr;
sptr offset;
uptr chunk_begin;
uptr chunk_size;
u32 access_type : 2;
u32 alloc_type : 2;
};
struct HeapAddressDescription {
uptr addr;
uptr alloc_tid;
uptr free_tid;
u32 alloc_stack_id;
u32 free_stack_id;
ChunkAccess chunk_access;
void Print() const;
};
bool GetHeapAddressInformation(uptr addr, uptr access_size,
HeapAddressDescription *descr);
bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
struct StackAddressDescription {
uptr addr;
uptr tid;
uptr offset;
uptr frame_pc;
uptr access_size;
const char *frame_descr;
void Print() const;
};
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr);
struct GlobalAddressDescription {
uptr addr;
// Assume address is close to at most four globals.
static const int kMaxGlobals = 4;
__asan_global globals[kMaxGlobals];
u32 reg_sites[kMaxGlobals];
uptr access_size;
u8 size;
void Print(const char *bug_type = "") const;
};
bool GetGlobalAddressInformation(uptr addr, uptr access_size,
GlobalAddressDescription *descr);
bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
// General function to describe an address. Will try to describe the address as
// a shadow, global (variable), stack, or heap address.
// bug_type is optional and is used for checking if we're reporting an
// initialization-order-fiasco
// The proper access_size should be passed for stack, global, and heap
// addresses. Defaults to 1.
// Each of the *AddressDescription functions has its own Print() member, which
// may take access_size and bug_type parameters if needed.
void PrintAddressDescription(uptr addr, uptr access_size = 1,
const char *bug_type = "");
enum AddressKind {
kAddressKindWild,
kAddressKindShadow,
kAddressKindHeap,
kAddressKindStack,
kAddressKindGlobal,
};
class AddressDescription {
struct AddressDescriptionData {
AddressKind kind;
union {
ShadowAddressDescription shadow;
HeapAddressDescription heap;
StackAddressDescription stack;
GlobalAddressDescription global;
uptr addr;
};
};
AddressDescriptionData data;
public:
AddressDescription() = default;
// shouldLockThreadRegistry allows us to skip locking if we're sure we already
// have done it.
AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
: AddressDescription(addr, 1, shouldLockThreadRegistry) {}
AddressDescription(uptr addr, uptr access_size,
bool shouldLockThreadRegistry = true);
uptr Address() const {
switch (data.kind) {
case kAddressKindWild:
return data.addr;
case kAddressKindShadow:
return data.shadow.addr;
case kAddressKindHeap:
return data.heap.addr;
case kAddressKindStack:
return data.stack.addr;
case kAddressKindGlobal:
return data.global.addr;
}
UNREACHABLE("AddressInformation kind is invalid");
}
void Print(const char *bug_descr = nullptr) const {
switch (data.kind) {
case kAddressKindWild:
Printf("Address %p is a wild pointer.\n", data.addr);
return;
case kAddressKindShadow:
return data.shadow.Print();
case kAddressKindHeap:
return data.heap.Print();
case kAddressKindStack:
return data.stack.Print();
case kAddressKindGlobal:
// initialization-order-fiasco has a special Print()
return data.global.Print(bug_descr);
}
UNREACHABLE("AddressInformation kind is invalid");
}
void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
const ShadowAddressDescription *AsShadow() const {
return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
}
const HeapAddressDescription *AsHeap() const {
return data.kind == kAddressKindHeap ? &data.heap : nullptr;
}
const StackAddressDescription *AsStack() const {
return data.kind == kAddressKindStack ? &data.stack : nullptr;
}
const GlobalAddressDescription *AsGlobal() const {
return data.kind == kAddressKindGlobal ? &data.global : nullptr;
}
};
} // namespace __asan
#endif // ASAN_DESCRIPTIONS_H

View File

@ -0,0 +1,503 @@
//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan implementation for error structures.
//===----------------------------------------------------------------------===//
#include "asan_errors.h"
#include <signal.h>
#include "asan_descriptions.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
void ErrorStackOverflow::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: %s on address %p"
" (pc %p bp %p sp %p T%d)\n", scariness.GetDescription(),
(void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
Printf("%s", d.EndWarning());
scariness.Print();
BufferedStackTrace stack;
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
common_flags()->fast_unwind_on_fatal);
stack.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
static void MaybeDumpInstructionBytes(uptr pc) {
if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
InternalScopedString str(1024);
str.append("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
}
str.append("\n");
} else {
str.append("unaccessible\n");
}
Report("%s", str.data());
}
static void MaybeDumpRegisters(void *context) {
if (!flags()->dump_registers) return;
SignalContext::DumpAllRegisters(context);
}
void ErrorDeadlySignal::Print() {
Decorator d;
Printf("%s", d.Warning());
const char *description = DescribeSignalOrException(signo);
Report(
"ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
"T%d)\n",
description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
Printf("%s", d.EndWarning());
if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
if (is_memory_access) {
const char *access_type =
write_flag == SignalContext::WRITE
? "WRITE"
: (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
Report("The signal is caused by a %s memory access.\n", access_type);
if (addr < GetPageSizeCached())
Report("Hint: address points to the zero page.\n");
}
scariness.Print();
BufferedStackTrace stack;
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
common_flags()->fast_unwind_on_fatal);
stack.Print();
MaybeDumpInstructionBytes(pc);
MaybeDumpRegisters(context);
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary(description, &stack);
}
void ErrorDoubleFree::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: attempting %s on %p in "
"thread T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
scariness.Print();
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
second_free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorNewDeleteSizeMismatch::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: %s on %p in thread "
"T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
Printf(
" size of the allocated type: %zd bytes;\n"
" size of the deallocated type: %zd bytes.\n",
addr_description.chunk_access.chunk_size, delete_size);
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=new_delete_type_mismatch=0\n");
}
void ErrorFreeNotMalloced::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
Report(
"ERROR: AddressSanitizer: attempting free on address "
"which was not malloc()-ed: %p in thread T%d%s\n",
addr_description.Address(), tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorAllocTypeMismatch::Print() {
static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
"operator new []"};
static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
"operator delete []"};
CHECK_NE(alloc_type, dealloc_type);
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
scariness.GetDescription(),
alloc_names[alloc_type], dealloc_names[dealloc_type],
addr_description.addr);
Printf("%s", d.EndWarning());
CHECK_GT(dealloc_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
stack.Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
}
void ErrorMallocUsableSizeNotOwned::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
"pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.EndWarning());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: attempting to call "
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.EndWarning());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorStringFunctionMemoryRangesOverlap::Print() {
Decorator d;
char bug_type[100];
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
"overlap\n",
bug_type, addr1_description.Address(),
addr1_description.Address() + length1, addr2_description.Address(),
addr2_description.Address() + length2);
Printf("%s", d.EndWarning());
scariness.Print();
stack->Print();
addr1_description.Print();
addr2_description.Print();
ReportErrorSummary(bug_type, stack);
}
void ErrorStringFunctionSizeOverflow::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
scariness.GetDescription(), size);
Printf("%s", d.EndWarning());
scariness.Print();
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorBadParamsToAnnotateContiguousContainer::Print() {
Report(
"ERROR: AddressSanitizer: bad parameters to "
"__sanitizer_annotate_contiguous_container:\n"
" beg : %p\n"
" end : %p\n"
" old_mid : %p\n"
" new_mid : %p\n",
beg, end, old_mid, new_mid);
uptr granularity = SHADOW_GRANULARITY;
if (!IsAligned(beg, granularity))
Report("ERROR: beg is not aligned by %d\n", granularity);
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorODRViolation::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
global1.beg);
Printf("%s", d.EndWarning());
InternalScopedString g1_loc(256), g2_loc(256);
PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2);
Printf(" [1] size=%zd '%s' %s\n", global1.size,
MaybeDemangleGlobalName(global1.name), g1_loc.data());
Printf(" [2] size=%zd '%s' %s\n", global2.size,
MaybeDemangleGlobalName(global2.name), g2_loc.data());
if (stack_id1 && stack_id2) {
Printf("These globals were registered at these points:\n");
Printf(" [1]:\n");
StackDepotGet(stack_id1).Print();
Printf(" [2]:\n");
StackDepotGet(stack_id2).Print();
}
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
InternalScopedString error_msg(256);
error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
}
void ErrorInvalidPointerPair::Print() {
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
addr1_description.Address(), addr2_description.Address());
Printf("%s", d.EndWarning());
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
addr1_description.Print();
addr2_description.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
return s[-1] > 127 && s[1] > 127;
}
ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
bool is_write_, uptr access_size_)
: ErrorBase(tid),
addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
pc(pc_),
bp(bp_),
sp(sp_),
access_size(access_size_),
is_write(is_write_),
shadow_val(0) {
scariness.Clear();
if (access_size) {
if (access_size <= 9) {
char desr[] = "?-byte";
desr[0] = '0' + access_size;
scariness.Scare(access_size + access_size / 2, desr);
} else if (access_size >= 10) {
scariness.Scare(15, "multi-byte");
}
is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
// Determine the error type.
bug_descr = "unknown-crash";
if (AddrIsInMem(addr)) {
u8 *shadow_addr = (u8 *)MemToShadow(addr);
// If we are accessing 16 bytes, look at the second shadow byte.
if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
// If we are in the partial right redzone, look at the next shadow byte.
if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
bool far_from_bounds = false;
shadow_val = *shadow_addr;
int bug_type_score = 0;
// For use-after-frees reads are almost as bad as writes.
int read_after_free_bonus = 0;
switch (shadow_val) {
case kAsanHeapLeftRedzoneMagic:
case kAsanArrayCookieMagic:
bug_descr = "heap-buffer-overflow";
bug_type_score = 10;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanHeapFreeMagic:
bug_descr = "heap-use-after-free";
bug_type_score = 20;
if (!is_write) read_after_free_bonus = 18;
break;
case kAsanStackLeftRedzoneMagic:
bug_descr = "stack-buffer-underflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanInitializationOrderMagic:
bug_descr = "initialization-order-fiasco";
bug_type_score = 1;
break;
case kAsanStackMidRedzoneMagic:
case kAsanStackRightRedzoneMagic:
bug_descr = "stack-buffer-overflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanStackAfterReturnMagic:
bug_descr = "stack-use-after-return";
bug_type_score = 30;
if (!is_write) read_after_free_bonus = 18;
break;
case kAsanUserPoisonedMemoryMagic:
bug_descr = "use-after-poison";
bug_type_score = 20;
break;
case kAsanContiguousContainerOOBMagic:
bug_descr = "container-overflow";
bug_type_score = 10;
break;
case kAsanStackUseAfterScopeMagic:
bug_descr = "stack-use-after-scope";
bug_type_score = 10;
break;
case kAsanGlobalRedzoneMagic:
bug_descr = "global-buffer-overflow";
bug_type_score = 10;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
case kAsanIntraObjectRedzone:
bug_descr = "intra-object-overflow";
bug_type_score = 10;
break;
case kAsanAllocaLeftMagic:
case kAsanAllocaRightMagic:
bug_descr = "dynamic-stack-buffer-overflow";
bug_type_score = 25;
far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
break;
}
scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
}
}
}
static void PrintContainerOverflowHint() {
Printf("HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_container_overflow=0.\n"
"If you suspect a false positive see also: "
"https://github.com/google/sanitizers/wiki/"
"AddressSanitizerContainerOverflow.\n");
}
static void PrintShadowByte(InternalScopedString *str, const char *before,
u8 byte, const char *after = "\n") {
PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
}
static void PrintLegend(InternalScopedString *str) {
str->append(
"Shadow byte legend (one shadow byte represents %d "
"application bytes):\n",
(int)SHADOW_GRANULARITY);
PrintShadowByte(str, " Addressable: ", 0);
str->append(" Partially addressable: ");
for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
str->append("\n");
PrintShadowByte(str, " Heap left redzone: ",
kAsanHeapLeftRedzoneMagic);
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
PrintShadowByte(str, " Stack left redzone: ",
kAsanStackLeftRedzoneMagic);
PrintShadowByte(str, " Stack mid redzone: ",
kAsanStackMidRedzoneMagic);
PrintShadowByte(str, " Stack right redzone: ",
kAsanStackRightRedzoneMagic);
PrintShadowByte(str, " Stack after return: ",
kAsanStackAfterReturnMagic);
PrintShadowByte(str, " Stack use after scope: ",
kAsanStackUseAfterScopeMagic);
PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
PrintShadowByte(str, " Global init order: ",
kAsanInitializationOrderMagic);
PrintShadowByte(str, " Poisoned by user: ",
kAsanUserPoisonedMemoryMagic);
PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
PrintShadowByte(str, " Array cookie: ",
kAsanArrayCookieMagic);
PrintShadowByte(str, " Intra object redzone: ",
kAsanIntraObjectRedzone);
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
}
static void PrintShadowBytes(InternalScopedString *str, const char *before,
u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
if (before) str->append("%s%p:", before, bytes);
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
const char *before =
p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
const char *after = p == guilty ? "]" : "";
PrintShadowByte(str, before, *p, after);
}
str->append("\n");
}
static void PrintShadowMemoryForAddress(uptr addr) {
if (!AddrIsInMem(addr)) return;
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
InternalScopedString str(4096 * 8);
str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
const char *prefix = (i == 0) ? "=>" : " ";
PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
(u8 *)shadow_addr, n_bytes_per_row);
}
if (flags()->print_legend) PrintLegend(&str);
Printf("%s", str.data());
}
void ErrorGeneric::Print() {
Decorator d;
Printf("%s", d.Warning());
uptr addr = addr_description.Address();
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
bug_descr, (void *)addr, pc, bp, sp);
Printf("%s", d.EndWarning());
char tname[128];
Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
(void *)addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
scariness.Print();
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
// Pass bug_descr because we have a special case for
// initialization-order-fiasco
addr_description.Print(bug_descr);
if (shadow_val == kAsanContiguousContainerOOBMagic)
PrintContainerOverflowHint();
ReportErrorSummary(bug_descr, &stack);
PrintShadowMemoryForAddress(addr);
}
} // namespace __asan

View File

@ -0,0 +1,390 @@
//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for error structures.
//===----------------------------------------------------------------------===//
#ifndef ASAN_ERRORS_H
#define ASAN_ERRORS_H
#include "asan_descriptions.h"
#include "asan_scariness_score.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __asan {
struct ErrorBase {
ErrorBase() = default;
explicit ErrorBase(u32 tid_) : tid(tid_) {}
ScarinessScoreBase scariness;
u32 tid;
};
struct ErrorStackOverflow : ErrorBase {
uptr addr, pc, bp, sp;
// ErrorStackOverflow never owns the context.
void *context;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStackOverflow() = default;
ErrorStackOverflow(u32 tid, const SignalContext &sig)
: ErrorBase(tid),
addr(sig.addr),
pc(sig.pc),
bp(sig.bp),
sp(sig.sp),
context(sig.context) {
scariness.Clear();
scariness.Scare(10, "stack-overflow");
}
void Print();
};
struct ErrorDeadlySignal : ErrorBase {
uptr addr, pc, bp, sp;
// ErrorDeadlySignal never owns the context.
void *context;
int signo;
SignalContext::WriteFlag write_flag;
bool is_memory_access;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDeadlySignal() = default;
ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
: ErrorBase(tid),
addr(sig.addr),
pc(sig.pc),
bp(sig.bp),
sp(sig.sp),
context(sig.context),
signo(signo_),
write_flag(sig.write_flag),
is_memory_access(sig.is_memory_access) {
scariness.Clear();
if (is_memory_access) {
if (addr < GetPageSizeCached()) {
scariness.Scare(10, "null-deref");
} else if (addr == pc) {
scariness.Scare(60, "wild-jump");
} else if (write_flag == SignalContext::WRITE) {
scariness.Scare(30, "wild-addr-write");
} else if (write_flag == SignalContext::READ) {
scariness.Scare(20, "wild-addr-read");
} else {
scariness.Scare(25, "wild-addr");
}
} else {
scariness.Scare(10, "signal");
}
}
void Print();
};
struct ErrorDoubleFree : ErrorBase {
// ErrorDoubleFree doesn't own the stack trace.
const BufferedStackTrace *second_free_stack;
HeapAddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDoubleFree() = default;
ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
: ErrorBase(tid), second_free_stack(stack) {
CHECK_GT(second_free_stack->size, 0);
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(42, "double-free");
}
void Print();
};
struct ErrorNewDeleteSizeMismatch : ErrorBase {
// ErrorNewDeleteSizeMismatch doesn't own the stack trace.
const BufferedStackTrace *free_stack;
HeapAddressDescription addr_description;
uptr delete_size;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorNewDeleteSizeMismatch() = default;
ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
uptr delete_size_)
: ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(10, "new-delete-type-mismatch");
}
void Print();
};
struct ErrorFreeNotMalloced : ErrorBase {
// ErrorFreeNotMalloced doesn't own the stack trace.
const BufferedStackTrace *free_stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorFreeNotMalloced() = default;
ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
: ErrorBase(tid),
free_stack(stack),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(40, "bad-free");
}
void Print();
};
struct ErrorAllocTypeMismatch : ErrorBase {
// ErrorAllocTypeMismatch doesn't own the stack trace.
const BufferedStackTrace *dealloc_stack;
HeapAddressDescription addr_description;
AllocType alloc_type, dealloc_type;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorAllocTypeMismatch() = default;
ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
AllocType alloc_type_, AllocType dealloc_type_)
: ErrorBase(tid),
dealloc_stack(stack),
alloc_type(alloc_type_),
dealloc_type(dealloc_type_) {
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(10, "alloc-dealloc-mismatch");
};
void Print();
};
struct ErrorMallocUsableSizeNotOwned : ErrorBase {
// ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorMallocUsableSizeNotOwned() = default;
ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "bad-malloc_usable_size");
}
void Print();
};
struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
// ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorSanitizerGetAllocatedSizeNotOwned() = default;
ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
uptr addr)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "bad-__sanitizer_get_allocated_size");
}
void Print();
};
struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
// ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
const BufferedStackTrace *stack;
uptr length1, length2;
AddressDescription addr1_description;
AddressDescription addr2_description;
const char *function;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStringFunctionMemoryRangesOverlap() = default;
ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
uptr addr1, uptr length1_, uptr addr2,
uptr length2_, const char *function_)
: ErrorBase(tid),
stack(stack_),
length1(length1_),
length2(length2_),
addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
function(function_) {
char bug_type[100];
internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
scariness.Clear();
scariness.Scare(10, bug_type);
}
void Print();
};
struct ErrorStringFunctionSizeOverflow : ErrorBase {
// ErrorStringFunctionSizeOverflow doesn't own the stack trace.
const BufferedStackTrace *stack;
AddressDescription addr_description;
uptr size;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStringFunctionSizeOverflow() = default;
ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
uptr addr, uptr size_)
: ErrorBase(tid),
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false),
size(size_) {
scariness.Clear();
scariness.Scare(10, "negative-size-param");
}
void Print();
};
struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
// ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
const BufferedStackTrace *stack;
uptr beg, end, old_mid, new_mid;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorBadParamsToAnnotateContiguousContainer() = default;
// PS4: Do we want an AddressDescription for beg?
ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
BufferedStackTrace *stack_,
uptr beg_, uptr end_,
uptr old_mid_, uptr new_mid_)
: ErrorBase(tid),
stack(stack_),
beg(beg_),
end(end_),
old_mid(old_mid_),
new_mid(new_mid_) {
scariness.Clear();
scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container");
}
void Print();
};
struct ErrorODRViolation : ErrorBase {
__asan_global global1, global2;
u32 stack_id1, stack_id2;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorODRViolation() = default;
ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
const __asan_global *g2, u32 stack_id2_)
: ErrorBase(tid),
global1(*g1),
global2(*g2),
stack_id1(stack_id1_),
stack_id2(stack_id2_) {
scariness.Clear();
scariness.Scare(10, "odr-violation");
}
void Print();
};
struct ErrorInvalidPointerPair : ErrorBase {
uptr pc, bp, sp;
AddressDescription addr1_description;
AddressDescription addr2_description;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorInvalidPointerPair() = default;
ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
uptr p2)
: ErrorBase(tid),
pc(pc_),
bp(bp_),
sp(sp_),
addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
scariness.Scare(10, "invalid-pointer-pair");
}
void Print();
};
struct ErrorGeneric : ErrorBase {
AddressDescription addr_description;
uptr pc, bp, sp;
uptr access_size;
const char *bug_descr;
bool is_write;
u8 shadow_val;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorGeneric() = default;
ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
uptr access_size_);
void Print();
};
// clang-format off
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
macro(StackOverflow) \
macro(DeadlySignal) \
macro(DoubleFree) \
macro(NewDeleteSizeMismatch) \
macro(FreeNotMalloced) \
macro(AllocTypeMismatch) \
macro(MallocUsableSizeNotOwned) \
macro(SanitizerGetAllocatedSizeNotOwned) \
macro(StringFunctionMemoryRangesOverlap) \
macro(StringFunctionSizeOverflow) \
macro(BadParamsToAnnotateContiguousContainer) \
macro(ODRViolation) \
macro(InvalidPointerPair) \
macro(Generic)
// clang-format on
#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {}
#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
case kErrorKind##name: \
return name.Print();
enum ErrorKind {
kErrorKindInvalid = 0,
ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
};
struct ErrorDescription {
ErrorKind kind;
// We're using a tagged union because it allows us to have a trivially
// copiable type and use the same structures as the public interface.
//
// We can add a wrapper around it to make it "more c++-like", but that would
// add a lot of code and the benefit wouldn't be that big.
union {
ErrorBase Base;
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
};
ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
bool IsValid() { return kind != kErrorKindInvalid; }
void Print() {
switch (kind) {
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
case kErrorKindInvalid:
CHECK(0);
}
CHECK(0);
}
};
#undef ASAN_FOR_EACH_ERROR_KIND
#undef ASAN_DEFINE_ERROR_KIND
#undef ASAN_ERROR_DESCRIPTION_MEMBER
#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
#undef ASAN_ERROR_DESCRIPTION_PRINT
} // namespace __asan
#endif // ASAN_ERRORS_H

View File

@ -100,7 +100,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
// with regular non-atimic load and store (at least I was not able to make
// with regular non-atomic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;

View File

@ -52,7 +52,7 @@ struct FakeFrame {
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
// frames in round robin fasion to maximize the delay between a deallocation
// frames in round robin fashion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
@ -99,12 +99,12 @@ class FakeStack {
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
// Divide n by the numbe of frames in size class.
// Divide n by the number of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
// The the pointer to the flags of the given class_id.
// The pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);

View File

@ -156,9 +156,24 @@ void InitializeFlags() {
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
(ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
if (f->thread_local_quarantine_size_kb < 0) {
const u32 kDefaultThreadLocalQuarantineSizeKb =
// It is not advised to go lower than 64Kb, otherwise quarantine batches
// pushed from thread local quarantine to global one will create too
// much overhead. One quarantine batch size is 8Kb and it holds up to
// 1021 chunk, which amounts to 1/8 memory overhead per batch when
// thread local quarantine is set to 64Kb.
(ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
}
if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) {
Report("%s: thread_local_quarantine_size_kb can be set to 0 only when "
"quarantine_size_mb is set to 0\n", SanitizerToolName);
Die();
}
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");

View File

@ -23,6 +23,12 @@ ASAN_FLAG(int, quarantine_size_mb, -1,
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
ASAN_FLAG(int, thread_local_quarantine_size_kb, -1,
"Size (in Kb) of thread local quarantine used to detect "
"use-after-free errors. Lower value may reduce memory usage but "
"increase the chance of false negatives. It is not advised to go "
"lower than 64Kb, otherwise frequent transfers to global quarantine "
"might affect performance.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
@ -102,7 +108,7 @@ ASAN_FLAG(bool, poison_array_cookie, true,
// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
(SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
!SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
@ -133,6 +139,9 @@ ASAN_FLAG(int, detect_odr_violation, 2,
"have different sizes")
ASAN_FLAG(bool, dump_instruction_bytes, false,
"If true, dump 16 bytes starting at the instruction that caused SEGV")
ASAN_FLAG(bool, dump_registers, true,
"If true, dump values of CPU registers when SEGV happens. Only "
"available on OS X for now.")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "

View File

@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
@ -123,18 +124,6 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
return res;
}
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
Global g = {};
if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
internal_strncpy(descr->name, g.name, descr->name_size);
descr->region_address = g.beg;
descr->region_size = g.size;
descr->region_kind = "global";
return true;
}
return false;
}
enum GlobalSymbolState {
UNREGISTERED = 0,
REGISTERED = 1
@ -279,6 +268,46 @@ void StopInitOrderChecking() {
}
}
static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
const char *MaybeDemangleGlobalName(const char *name) {
// We can spoil names of globals with C linkage, so use an heuristic
// approach to check if the name should be demangled.
bool should_demangle = false;
if (name[0] == '_' && name[1] == 'Z')
should_demangle = true;
else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
should_demangle = true;
return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
unsigned char c = *(unsigned char *)p;
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char *)(g.beg + g.size - 1) != '\0') return;
str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
(char *)g.beg);
}
static const char *GlobalFilename(const __asan_global &g) {
const char *res = g.module_name;
// Prefer the filename from source location, if is available.
if (g.location) res = g.location->filename;
CHECK(res);
return res;
}
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
str->append("%s", GlobalFilename(g));
if (!g.location) return;
if (g.location->line_no) str->append(":%d", g.location->line_no);
if (g.location->column_no) str->append(":%d", g.location->column_no);
}
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@ -319,6 +348,20 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
}
for (uptr i = 0; i < n; i++) {
if (SANITIZER_WINDOWS && globals[i].beg == 0) {
// The MSVC incremental linker may pad globals out to 256 bytes. As long
// as __asan_global is less than 256 bytes large and its size is a power
// of two, we can skip over the padding.
static_assert(
sizeof(__asan_global) < 256 &&
(sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0,
"sizeof(__asan_global) incompatible with incremental linker padding");
// If these are padding bytes, the rest of the global should be zero.
CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 &&
globals[i].name == nullptr && globals[i].module_name == nullptr &&
globals[i].odr_indicator == 0);
continue;
}
RegisterGlobal(&globals[i]);
}
}
@ -329,6 +372,11 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
if (SANITIZER_WINDOWS && globals[i].beg == 0) {
// Skip globals that look like padding from the MSVC incremental linker.
// See comment in __asan_register_globals.
continue;
}
UnregisterGlobal(&globals[i]);
}
}
@ -339,10 +387,10 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
!CanPoisonMemory())
!CanPoisonMemory() ||
!dynamic_init_globals)
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(dynamic_init_globals);
CHECK(module_name);
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
@ -365,7 +413,8 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
!CanPoisonMemory())
!CanPoisonMemory() ||
!dynamic_init_globals)
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);

View File

@ -0,0 +1,62 @@
//===-- asan_globals_win.cc -----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Global registration code that is linked into every Windows DLL and EXE.
//
//===----------------------------------------------------------------------===//
#include "asan_interface_internal.h"
#if SANITIZER_WINDOWS
namespace __asan {
#pragma section(".ASAN$GA", read, write) // NOLINT
#pragma section(".ASAN$GZ", read, write) // NOLINT
extern "C" __declspec(allocate(".ASAN$GA"))
__asan_global __asan_globals_start = {};
extern "C" __declspec(allocate(".ASAN$GZ"))
__asan_global __asan_globals_end = {};
#pragma comment(linker, "/merge:.ASAN=.data")
static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
__asan_global *start = &__asan_globals_start + 1;
__asan_global *end = &__asan_globals_end;
uptr bytediff = (uptr)end - (uptr)start;
if (bytediff % sizeof(__asan_global) != 0) {
#ifdef ASAN_DLL_THUNK
__debugbreak();
#else
CHECK("corrupt asan global array");
#endif
}
// We know end >= start because the linker sorts the portion after the dollar
// sign alphabetically.
uptr n = end - start;
hook(start, n);
}
static void register_dso_globals() {
call_on_globals(&__asan_register_globals);
}
static void unregister_dso_globals() {
call_on_globals(&__asan_unregister_globals);
}
// Register globals
#pragma section(".CRT$XCU", long, read) // NOLINT
#pragma section(".CRT$XTX", long, read) // NOLINT
extern "C" __declspec(allocate(".CRT$XCU"))
void (*const __asan_dso_reg_hook)() = &register_dso_globals;
extern "C" __declspec(allocate(".CRT$XTX"))
void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals;
} // namespace __asan
#endif // SANITIZER_WINDOWS

View File

@ -0,0 +1,34 @@
//===-- asan_globals_win.h --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Interface to the Windows-specific global management code. Separated into a
// standalone header to allow inclusion from asan_win_dynamic_runtime_thunk,
// which defines symbols that clash with other sanitizer headers.
//
//===----------------------------------------------------------------------===//
#ifndef ASAN_GLOBALS_WIN_H
#define ASAN_GLOBALS_WIN_H
#if !defined(_MSC_VER)
#error "this file is Windows-only, and uses MSVC pragmas"
#endif
#if defined(_WIN64)
#define SANITIZER_SYM_PREFIX
#else
#define SANITIZER_SYM_PREFIX "_"
#endif
// Use this macro to force linking asan_globals_win.cc into the DSO.
#define ASAN_LINK_GLOBALS_WIN() \
__pragma( \
comment(linker, "/include:" SANITIZER_SYM_PREFIX "__asan_dso_reg_hook"))
#endif // ASAN_GLOBALS_WIN_H

View File

@ -81,6 +81,51 @@ struct AsanInterceptorContext {
} \
} while (0)
// memcpy is called during __asan_init() from the internals of printf(...).
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
if (asan_init_is_running) { \
return REAL(memcpy)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
if (to != from) { \
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return REAL(memcpy)(to, from, size); \
} while (0)
// memset is called inside Printf.
#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
if (asan_init_is_running) { \
return REAL(memset)(block, c, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_WRITE_RANGE(ctx, block, size); \
} \
return REAL(memset)(block, c, size); \
} while (0)
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return internal_memmove(to, from, size); \
} while (0)
#define ASAN_READ_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
@ -198,10 +243,25 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} else { \
*begin = *end = 0; \
}
// Asan needs custom handling of these:
#undef SANITIZER_INTERCEPT_MEMSET
#undef SANITIZER_INTERCEPT_MEMMOVE
#undef SANITIZER_INTERCEPT_MEMCPY
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
do { \
ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
} while (false)
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
do { \
ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
ASAN_MEMCPY_IMPL(ctx, to, from, size); \
} while (false)
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
do { \
ASAN_INTERCEPTOR_ENTER(ctx, memset); \
ASAN_MEMSET_IMPL(ctx, block, c, size); \
} while (false)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
@ -389,90 +449,18 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
}
#endif
// memcpy is called during __asan_init() from the internals of printf(...).
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
if (asan_init_is_running) { \
return REAL(memcpy)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
if (to != from) { \
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return REAL(memcpy)(to, from, size); \
} while (0)
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
// memset is called inside Printf.
#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
if (asan_init_is_running) { \
return REAL(memset)(block, c, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_WRITE_RANGE(ctx, block, size); \
} \
return REAL(memset)(block, c, size); \
} while (0)
void *__asan_memset(void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
if (UNLIKELY(!asan_inited)) \
return internal_memmove(to, from, size); \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return internal_memmove(to, from, size); \
} while (0)
void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memmove);
ASAN_MEMMOVE_IMPL(ctx, to, from, size);
}
INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
#if !SANITIZER_MAC
ASAN_MEMCPY_IMPL(ctx, to, from, size);
#else
// At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
// with WRAP(memcpy). As a result, false positives are reported for memmove()
// calls. If we just disable error reporting with
// ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
// internal_memcpy(), which may lead to crashes, see
// http://llvm.org/bugs/show_bug.cgi?id=16362.
ASAN_MEMMOVE_IMPL(ctx, to, from, size);
#endif // !SANITIZER_MAC
}
INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, memset);
ASAN_MEMSET_IMPL(ctx, block, c, size);
}
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
@ -720,19 +708,10 @@ INTERCEPTOR(int, fork, void) {
namespace __asan {
void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(was_called_once == false);
CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
// Intercept mem* functions.
ASAN_INTERCEPT_FUNC(memcpy);
ASAN_INTERCEPT_FUNC(memset);
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
// In asan, REAL(memmove) is not used, but it is used in msan.
ASAN_INTERCEPT_FUNC(memmove);
}
CHECK(REAL(memcpy));
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT

View File

@ -23,6 +23,8 @@
#include "asan_init_version.h"
using __sanitizer::uptr;
using __sanitizer::u64;
using __sanitizer::u32;
extern "C" {
// This function should be called at the very beginning of the process,
@ -79,6 +81,20 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
// Sets bytes of the given range of the shadow memory into specific value.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_00(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f1(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f2(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f3(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f5(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f8(uptr addr, uptr size);
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
@ -156,6 +172,9 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;

View File

@ -36,7 +36,7 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
# if SANITIZER_IOS || SANITIZER_ANDROID
# define ASAN_LOW_MEMORY 1
# else
# define ASAN_LOW_MEMORY 0
@ -65,6 +65,9 @@ void AsanInitFromRtl();
// asan_win.cc
void InitializePlatformExceptionHandlers();
// asan_win.cc / asan_posix.cc
const char *DescribeSignalOrException(int signo);
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
@ -100,17 +103,6 @@ void *AsanDlSymNext(const char *sym);
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
(PlatformHasDifferentMemcpyAndMemmove())
#elif SANITIZER_WINDOWS64
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
#else
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // SANITIZER_MAC
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
@ -132,12 +124,10 @@ extern bool asan_init_is_running;
extern void (*death_callback)(void);
// These magic values are written to shadow for better error reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
const int kAsanHeapRightRedzoneMagic = 0xfb;
const int kAsanHeapFreeMagic = 0xfd;
const int kAsanStackLeftRedzoneMagic = 0xf1;
const int kAsanStackMidRedzoneMagic = 0xf2;
const int kAsanStackRightRedzoneMagic = 0xf3;
const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7;

View File

@ -49,15 +49,6 @@ namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
// into memmove$VARIANT$sse42.
// See also https://github.com/google/sanitizers/issues/34.
// TODO(glider): need to check dynamically that memcpy() and memmove() are
// actually the same function.
return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
}
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;

View File

@ -80,7 +80,13 @@ INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr = asan_malloc(size, &stack);
void *new_ptr;
if (UNLIKELY(!asan_inited)) {
new_ptr = AllocateFromLocalPool(size);
} else {
copy_size = size;
new_ptr = asan_malloc(copy_size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}

View File

@ -119,6 +119,11 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
return realloc(p, size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
void *_recalloc_base(void *p, size_t n, size_t elem_size) {
return _recalloc(p, n, elem_size);
}
ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(const void *ptr) {
GET_CURRENT_PC_BP_SP;
@ -218,6 +223,7 @@ void ReplaceSystemMalloc() {
TryToOverrideFunction("_realloc_base", (uptr)realloc);
TryToOverrideFunction("_realloc_crt", (uptr)realloc);
TryToOverrideFunction("_recalloc", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
TryToOverrideFunction("_msize", (uptr)_msize);
TryToOverrideFunction("_expand", (uptr)_expand);

View File

@ -125,6 +125,7 @@
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
@ -140,7 +141,6 @@ static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
#define SHADOW_SCALE kDefaultShadowScale
@ -168,7 +168,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# if SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset64
# else
# define SHADOW_OFFSET kIosShadowOffset64
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# endif
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
@ -183,7 +183,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
# elif SANITIZER_WINDOWS64
# define SHADOW_OFFSET kWindowsShadowOffset64
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
@ -269,9 +269,25 @@ static inline bool AddrIsInMidMem(uptr a) {
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
}
static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
(flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
@ -295,21 +311,6 @@ static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
}
static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
if (SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
return (a & (SHADOW_GRANULARITY - 1)) == 0;

View File

@ -32,9 +32,56 @@ struct AllocationSite {
class HeapProfile {
public:
HeapProfile() : allocations_(1024) {}
void ProcessChunk(const AsanChunkView& cv) {
if (cv.IsAllocated()) {
total_allocated_user_size_ += cv.UsedSize();
total_allocated_count_++;
u32 id = cv.GetAllocStackId();
if (id)
Insert(id, cv.UsedSize());
} else if (cv.IsQuarantined()) {
total_quarantined_user_size_ += cv.UsedSize();
total_quarantined_count_++;
} else {
total_other_count_++;
}
}
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
CHECK(total_allocated_user_size_);
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
"%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
"showing top %zd%%\n",
total_allocated_user_size_, total_allocated_count_,
total_quarantined_user_size_, total_quarantined_count_,
total_other_count_, total_allocated_count_ +
total_quarantined_count_ + total_other_count_, top_percent);
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_user_size_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_user_size_ > top_percent)
break;
}
}
private:
uptr total_allocated_user_size_ = 0;
uptr total_allocated_count_ = 0;
uptr total_quarantined_user_size_ = 0;
uptr total_quarantined_count_ = 0;
uptr total_other_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
void Insert(u32 id, uptr size) {
total_allocated_ += size;
total_count_++;
// Linear lookup will be good enough for most cases (although not all).
for (uptr i = 0; i < allocations_.size(); i++) {
if (allocations_[i].id == id) {
@ -45,40 +92,11 @@ class HeapProfile {
}
allocations_.push_back({id, size, 1});
}
void Print(uptr top_percent) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
CHECK(total_allocated_);
uptr total_shown = 0;
Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
"showing top %zd%%\n", total_allocated_, total_count_, top_percent);
for (uptr i = 0; i < allocations_.size(); i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
a.total_size * 100 / total_allocated_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
if (total_shown * 100 / total_allocated_ > top_percent)
break;
}
}
private:
uptr total_allocated_ = 0;
uptr total_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
};
static void ChunkCallback(uptr chunk, void *arg) {
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
AsanChunkView cv = FindHeapChunkByAddress(chunk);
if (!cv.IsAllocated()) return;
u32 id = cv.GetAllocStackId();
if (!id) return;
hp->Insert(id, cv.UsedSize());
reinterpret_cast<HeapProfile*>(arg)->ProcessChunk(
FindHeapChunkByAllocBeg(chunk));
}
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,

View File

@ -45,26 +45,6 @@
using namespace __asan; // NOLINT
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.
// Fake std::nothrow_t to avoid including <new>.
namespace std {
struct nothrow_t {};
} // namespace std
#define OPERATOR_NEW_BODY(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// runtime dylib, and the main executable will depend on both the runtime
// dylib and libstdc++, each of those'll have its implementation of new and
// delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
// FreeBSD prior v9.2 have wrong definition of 'size_t'.
// http://svnweb.freebsd.org/base?view=revision&revision=232261
#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
@ -74,6 +54,30 @@ struct nothrow_t {};
#endif // __FreeBSD_version
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.
// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
namespace std {
struct nothrow_t {};
enum class align_val_t: size_t {};
} // namespace std
#define OPERATOR_NEW_BODY(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
#define OPERATOR_NEW_BODY_ALIGN(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign((uptr)align, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// runtime dylib, and the main executable will depend on both the runtime
// dylib and libstdc++, each of those'll have its implementation of new and
// delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
@ -84,6 +88,18 @@ void *operator new(size_t size, std::nothrow_t const&)
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size) {
@ -131,6 +147,32 @@ void operator delete[](void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW);
}
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {

View File

@ -64,12 +64,9 @@ struct ShadowSegmentEndpoint {
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
@ -117,9 +114,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
CHECK(value == end.value);
CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
@ -131,7 +128,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
CHECK(beg.chunk < end.chunk);
CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
@ -157,9 +154,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
CHECK(beg.offset < end.offset);
CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
CHECK(value == end.value);
CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
@ -167,7 +164,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
CHECK(beg.chunk < end.chunk);
CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
@ -314,6 +311,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
void __asan_set_shadow_00(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0, size);
}
void __asan_set_shadow_f1(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf1, size);
}
void __asan_set_shadow_f2(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf2, size);
}
void __asan_set_shadow_f3(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf3, size);
}
void __asan_set_shadow_f5(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf5, size);
}
void __asan_set_shadow_f8(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf8, size);
}
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
@ -388,7 +409,7 @@ const void *__sanitizer_contiguous_container_find_bad_address(
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
uptr r1_end = Min(end + kMaxRangeToCheck, mid);
uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);

View File

@ -86,8 +86,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
// Calls __sanitizer::FlushUnneededShadowMemory() on
// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
// Calls __sanitizer::ReleaseMemoryPagesToOS() on
// [MemToShadow(p), MemToShadow(p+size)].
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan

View File

@ -33,6 +33,19 @@
namespace __asan {
const char *DescribeSignalOrException(int signo) {
switch (signo) {
case SIGFPE:
return "FPE";
case SIGILL:
return "ILL";
case SIGABRT:
return "ABRT";
default:
return "SEGV";
}
}
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
@ -84,12 +97,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(sig);
else if (signo == SIGFPE)
ReportDeadlySignal("FPE", sig);
else if (signo == SIGILL)
ReportDeadlySignal("ILL", sig);
else
ReportDeadlySignal("SEGV", sig);
ReportDeadlySignal(signo, sig);
}
// ---------------------- TSD ---------------- {{{1

File diff suppressed because it is too large Load Diff

View File

@ -25,35 +25,29 @@ struct StackVarDescr {
uptr name_len;
};
struct AddressDescription {
char *name;
uptr name_size;
uptr region_address;
uptr region_size;
const char *region_kind;
};
// Returns the number of globals close to the provided address and copies
// them to "globals" array.
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
int max_globals);
bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
const char *MaybeDemangleGlobalName(const char *name);
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after = "\n");
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size);
bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
bool print = true);
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size);
void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
void ReportStackOverflow(const SignalContext &sig);
void ReportDeadlySignal(const char *description, const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
void ReportDeadlySignal(int signo, const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);

View File

@ -32,6 +32,7 @@
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
@ -45,6 +46,7 @@ static void AsanDie() {
// Don't die twice - run a busy loop.
while (1) { }
}
if (common_flags()->print_module_map >= 1) PrintModuleMap();
if (flags()->sleep_before_dying) {
Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying);
SleepForSeconds(flags()->sleep_before_dying);
@ -263,6 +265,7 @@ static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
// __asan_report_* functions are noreturn, so we need a switch to prevent
// the compiler from removing any of them.
// clang-format off
switch (fake_condition) {
case 1: __asan_report_load1(0); break;
case 2: __asan_report_load2(0); break;
@ -302,7 +305,14 @@ static NOINLINE void force_interface_symbols() {
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
case 40: __asan_set_shadow_00(0, 0); break;
case 41: __asan_set_shadow_f1(0, 0); break;
case 42: __asan_set_shadow_f2(0, 0); break;
case 43: __asan_set_shadow_f3(0, 0); break;
case 44: __asan_set_shadow_f5(0, 0); break;
case 45: __asan_set_shadow_f8(0, 0); break;
}
// clang-format on
}
static void asan_atexit() {
@ -326,8 +336,21 @@ static void InitializeHighMemEnd() {
}
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap)
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
// is actually using this memory. Which means it needs a shadow...
uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
uptr GapShadowEnd =
RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
if (Verbosity())
Printf("protect_shadow_gap=0:"
" not protecting shadow gap, allocating gap's shadow\n"
"|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
GapShadowEnd);
ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
"unprotected gap shadow");
return;
}
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
@ -388,6 +411,8 @@ static void PrintAddressSpaceLayout() {
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
Printf("thread_local_quarantine_size_kb=%zuK\n",
(uptr)flags()->thread_local_quarantine_size_kb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@ -401,6 +426,79 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
static void InitializeShadowMemory() {
// Set the shadow memory address to uninitialized.
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
uptr shadow_start = kLowShadowBeg;
// Detect if a dynamic shadow address must used and find a available location
// when necessary. When dynamic address is used, the macro |kLowShadowBeg|
// expands to |__asan_shadow_memory_dynamic_address| which is
// |kDefaultShadowSentinel|.
if (shadow_start == kDefaultShadowSentinel) {
__asan_shadow_memory_dynamic_address = 0;
CHECK_EQ(0, kLowShadowBeg);
uptr granularity = GetMmapGranularity();
uptr alignment = 8 * granularity;
uptr left_padding = granularity;
uptr space_size = kHighShadowEnd + left_padding;
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
CHECK_NE((uptr)0, shadow_start);
CHECK(IsAligned(shadow_start, alignment));
}
// Update the shadow memory address (potentially) used by instrumentation.
__asan_shadow_memory_dynamic_address = shadow_start;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
!ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#endif
if (Verbosity()) PrintAddressSpaceLayout();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
}
static void AsanInitInternal() {
if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
@ -434,7 +532,6 @@ static void AsanInitInternal() {
__sanitizer_set_report_path(common_flags()->log_path);
// Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
@ -453,61 +550,9 @@ static void AsanInitInternal() {
ReplaceSystemMalloc();
uptr shadow_start = kLowShadowBeg;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
!ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#elif SANITIZER_WINDOWS64
// Disable the "mid mem" shadow layout.
if (!full_shadow_is_available) {
kMidMemBeg = 0;
kMidMemEnd = 0;
}
#endif
if (Verbosity()) PrintAddressSpaceLayout();
DisableCoreDumperIfNecessary();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
InitializeShadowMemory();
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnDeadlySignal);
@ -599,6 +644,9 @@ static AsanInitializer asan_initializer;
using namespace __asan; // NOLINT
void NOINLINE __asan_handle_no_return() {
if (asan_init_is_running)
return;
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();

View File

@ -34,10 +34,10 @@
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
class ScarinessScore {
public:
ScarinessScore() {
struct ScarinessScoreBase {
void Clear() {
descr[0] = 0;
score = 0;
}
void Scare(int add_to_score, const char *reason) {
if (descr[0])
@ -52,16 +52,23 @@ class ScarinessScore {
Printf("SCARINESS: %d (%s)\n", score, descr);
}
static void PrintSimple(int score, const char *descr) {
ScarinessScore SS;
SS.Scare(score, descr);
SS.Print();
ScarinessScoreBase SSB;
SSB.Clear();
SSB.Scare(score, descr);
SSB.Print();
}
private:
int score = 0;
int score;
char descr[1024];
};
struct ScarinessScore : ScarinessScoreBase {
ScarinessScore() {
Clear();
}
};
} // namespace __asan
#endif // ASAN_SCARINESS_SCORE_H

View File

@ -141,7 +141,9 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
current_fake_stack->Destroy(this->tid());
}
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
uptr *bottom_old,
uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
Die();
@ -152,6 +154,10 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
fake_stack_ = fake_stack_save;
}
if (bottom_old)
*bottom_old = stack_bottom_;
if (size_old)
*size_old = stack_top_ - stack_bottom_;
stack_bottom_ = next_stack_bottom_;
stack_top_ = next_stack_top_;
atomic_store(&stack_switching_, 0, memory_order_release);
@ -345,7 +351,7 @@ AsanThread *GetCurrentThread() {
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
if (ThreadStackContainsAddress(tctx, &context)) {
if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
@ -447,12 +453,16 @@ void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_finish_switch_fiber(void* fakestack) {
void __sanitizer_finish_switch_fiber(void* fakestack,
const void **bottom_old,
uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
t->FinishSwitchFiber((FakeStack*)fakestack);
t->FinishSwitchFiber((FakeStack*)fakestack,
(uptr*)bottom_old,
(uptr*)size_old);
}
}

View File

@ -94,7 +94,8 @@ class AsanThread {
}
void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
void FinishSwitchFiber(FakeStack *fake_stack_save);
void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
uptr *size_old);
bool has_fake_stack() {
return !atomic_load(&stack_switching_, memory_order_relaxed) &&

View File

@ -19,6 +19,7 @@
#include <stdlib.h>
#include "asan_globals_win.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_report.h"
@ -37,7 +38,13 @@ int __asan_should_detect_stack_use_after_return() {
return __asan_option_detect_stack_use_after_return;
}
// -------------------- A workaround for the abscence of weak symbols ----- {{{
SANITIZER_INTERFACE_ATTRIBUTE
uptr __asan_get_shadow_memory_dynamic_address() {
__asan_init();
return __asan_shadow_memory_dynamic_address;
}
// -------------------- A workaround for the absence of weak symbols ----- {{{
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
// use the /alternatename directive to tell the linker to default a specific
// symbol to a specific value, which works nicely for allocator hooks and
@ -64,14 +71,22 @@ void __asan_default_on_error() {}
// }}}
} // extern "C"
// ---------------------- Windows-specific inteceptors ---------------- {{{
// ---------------------- Windows-specific interceptors ---------------- {{{
INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
CHECK(REAL(RtlRaiseException));
// This is a noreturn function, unless it's one of the exceptions raised to
// communicate with the debugger, such as the one from OutputDebugString.
if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C)
__asan_handle_no_return();
REAL(RtlRaiseException)(ExceptionRecord);
}
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
CHECK(REAL(RaiseException));
__asan_handle_no_return();
REAL(RaiseException)(a, b, c, d);
}
#ifdef _WIN64
INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT
@ -123,44 +138,12 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
asan_thread_start, t, thr_flags, tid);
}
namespace {
BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
void EnsureWorkerThreadRegistered() {
// FIXME: GetCurrentThread relies on TSD, which might not play well with
// system thread pools. We might want to use something like reference
// counting to zero out GetCurrentThread() underlying storage when the last
// work item finishes? Or can we disable reclaiming of threads in the pool?
BlockingMutexLock l(&mu_for_thread_tracking);
if (__asan::GetCurrentThread())
return;
AsanThread *t = AsanThread::Create(
/* start_routine */ nullptr, /* arg */ nullptr,
/* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
t->Init();
asanThreadRegistry().StartThread(t->tid(), 0, 0);
SetCurrentThread(t);
}
} // namespace
INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
// NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
// query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
// System worker pool threads are created at arbitraty point in time and
// without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
// instead and don't register a specific parent_tid/stack.
EnsureWorkerThreadRegistered();
return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
}
// }}}
namespace __asan {
void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
ASAN_INTERCEPT_FUNC(RaiseException);
#ifdef _WIN64
ASAN_INTERCEPT_FUNC(__C_specific_handler);
@ -169,11 +152,15 @@ void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(_except_handler4);
#endif
// NtWaitForWorkViaWorkerFactory is always linked dynamically.
CHECK(::__interception::OverrideFunction(
"NtWaitForWorkViaWorkerFactory",
(uptr)WRAP(NtWaitForWorkViaWorkerFactory),
(uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
// Try to intercept kernel32!RaiseException, and if that fails, intercept
// ntdll!RtlRaiseException instead.
if (!::__interception::OverrideFunction("RaiseException",
(uptr)WRAP(RaiseException),
(uptr *)&REAL(RaiseException))) {
CHECK(::__interception::OverrideFunction("RtlRaiseException",
(uptr)WRAP(RtlRaiseException),
(uptr *)&REAL(RtlRaiseException)));
}
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
@ -229,8 +216,7 @@ void AsanOnDeadlySignal(int, void *siginfo, void *context) {
// Exception handler for dealing with shadow memory.
static LONG CALLBACK
ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
static uptr page_size = GetPageSizeCached();
static uptr alloc_granularity = GetMmapGranularity();
uptr page_size = GetPageSizeCached();
// Only handle access violations.
if (exception_pointers->ExceptionRecord->ExceptionCode !=
EXCEPTION_ACCESS_VIOLATION) {
@ -276,22 +262,57 @@ void InitializePlatformExceptionHandlers() {
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
// Check based on flags if we should report this exception.
static bool ShouldReportDeadlyException(unsigned code) {
switch (code) {
case EXCEPTION_ACCESS_VIOLATION:
case EXCEPTION_IN_PAGE_ERROR:
return common_flags()->handle_segv;
case EXCEPTION_BREAKPOINT:
case EXCEPTION_ILLEGAL_INSTRUCTION: {
return common_flags()->handle_sigill;
}
}
return false;
}
// Return the textual name for this exception.
const char *DescribeSignalOrException(int signo) {
unsigned code = signo;
// Get the string description of the exception if this is a known deadly
// exception.
switch (code) {
case EXCEPTION_ACCESS_VIOLATION:
return "access-violation";
case EXCEPTION_IN_PAGE_ERROR:
return "in-page-error";
case EXCEPTION_BREAKPOINT:
return "breakpoint";
case EXCEPTION_ILLEGAL_INSTRUCTION:
return "illegal-instruction";
}
return nullptr;
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
CONTEXT *context = info->ContextRecord;
if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
const char *description =
(exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
? "access-violation"
: "in-page-error";
SignalContext sig = SignalContext::Create(exception_record, context);
ReportDeadlySignal(description, sig);
}
// Continue the search if the signal wasn't deadly.
if (!ShouldReportDeadlyException(exception_record->ExceptionCode))
return EXCEPTION_CONTINUE_SEARCH;
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
SignalContext sig = SignalContext::Create(exception_record, context);
ReportDeadlySignal(exception_record->ExceptionCode, sig);
UNREACHABLE("returned from reporting deadly signal");
}
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
__asan_unhandled_exception_filter(info);
// Bubble out to the default exception filter.
return default_seh_handler(info);
}
@ -331,10 +352,25 @@ int __asan_set_seh_filter() {
// immediately after the CRT runs. This way, our exception filter is called
// first and we can delegate to their filter if appropriate.
#pragma section(".CRT$XCAB", long, read) // NOLINT
__declspec(allocate(".CRT$XCAB"))
int (*__intercept_seh)() = __asan_set_seh_filter;
__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() =
__asan_set_seh_filter;
// Piggyback on the TLS initialization callback directory to initialize asan as
// early as possible. Initializers in .CRT$XL* are called directly by ntdll,
// which run before the CRT. Users also add code to .CRT$XLC, so it's important
// to run our initializers first.
static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) {
if (reason == DLL_PROCESS_ATTACH) __asan_init();
}
#pragma section(".CRT$XLAB", long, read) // NOLINT
__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
#endif
ASAN_LINK_GLOBALS_WIN()
// }}}
} // namespace __asan
#endif // _WIN32
#endif // SANITIZER_WINDOWS

View File

@ -15,21 +15,30 @@
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dll_thunk.lib
// Only compile this code when building asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "asan_init_version.h"
#include "asan_globals_win.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#ifdef _M_IX86
#define WINAPI __stdcall
#else
#define WINAPI
#endif
// ---------- Function interception helper functions and macros ----------- {{{1
extern "C" {
void *__stdcall GetModuleHandleA(const char *module_name);
void *__stdcall GetProcAddress(void *module, const char *proc_name);
void *WINAPI GetModuleHandleA(const char *module_name);
void *WINAPI GetProcAddress(void *module, const char *proc_name);
void abort();
}
using namespace __sanitizer;
static uptr getRealProcAddressOrDie(const char *name) {
uptr ret =
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
@ -105,7 +114,7 @@ static void InterceptHooks();
// ---------- Function wrapping helpers ----------------------------------- {{{1
#define WRAP_V_V(name) \
extern "C" void name() { \
typedef void (*fntype)(); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(); \
} \
@ -113,7 +122,7 @@ static void InterceptHooks();
#define WRAP_V_W(name) \
extern "C" void name(void *arg) { \
typedef void (*fntype)(void *arg); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg); \
} \
@ -121,7 +130,7 @@ static void InterceptHooks();
#define WRAP_V_WW(name) \
extern "C" void name(void *arg1, void *arg2) { \
typedef void (*fntype)(void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2); \
} \
@ -129,7 +138,7 @@ static void InterceptHooks();
#define WRAP_V_WWW(name) \
extern "C" void name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2, arg3); \
} \
@ -137,7 +146,7 @@ static void InterceptHooks();
#define WRAP_W_V(name) \
extern "C" void *name() { \
typedef void *(*fntype)(); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(); \
} \
@ -145,7 +154,7 @@ static void InterceptHooks();
#define WRAP_W_W(name) \
extern "C" void *name(void *arg) { \
typedef void *(*fntype)(void *arg); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg); \
} \
@ -153,7 +162,7 @@ static void InterceptHooks();
#define WRAP_W_WW(name) \
extern "C" void *name(void *arg1, void *arg2) { \
typedef void *(*fntype)(void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2); \
} \
@ -161,7 +170,7 @@ static void InterceptHooks();
#define WRAP_W_WWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
typedef void *(*fntype)(void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3); \
} \
@ -169,7 +178,7 @@ static void InterceptHooks();
#define WRAP_W_WWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
typedef void *(*fntype)(void *, void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4); \
} \
@ -178,7 +187,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5); \
} \
@ -187,7 +196,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5, void *arg6) { \
typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
} \
@ -198,9 +207,11 @@ static void InterceptHooks();
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
extern "C" {
int __asan_option_detect_stack_use_after_return;
uptr __asan_shadow_memory_dynamic_address;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
@ -214,7 +225,8 @@ extern "C" {
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
__asan_shadow_memory_dynamic_address =
(uptr)__asan_get_shadow_memory_dynamic_address();
InterceptHooks();
}
}
@ -224,6 +236,7 @@ extern "C" void __asan_version_mismatch_check() {
}
INTERFACE_FUNCTION(__asan_handle_no_return)
INTERFACE_FUNCTION(__asan_unhandled_exception_filter)
INTERFACE_FUNCTION(__asan_report_store1)
INTERFACE_FUNCTION(__asan_report_store2)
@ -257,6 +270,13 @@ INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
INTERFACE_FUNCTION(__asan_set_shadow_00);
INTERFACE_FUNCTION(__asan_set_shadow_f1);
INTERFACE_FUNCTION(__asan_set_shadow_f2);
INTERFACE_FUNCTION(__asan_set_shadow_f3);
INTERFACE_FUNCTION(__asan_set_shadow_f5);
INTERFACE_FUNCTION(__asan_set_shadow_f8);
INTERFACE_FUNCTION(__asan_alloca_poison);
INTERFACE_FUNCTION(__asan_allocas_unpoison);
@ -306,17 +326,18 @@ INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_cov)
INTERFACE_FUNCTION(__sanitizer_cov_dump)
INTERFACE_FUNCTION(__sanitizer_dump_coverage)
INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard)
INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
INTERFACE_FUNCTION(__sanitizer_get_coverage_pc_buffer)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@ -327,6 +348,8 @@ INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
@ -347,6 +370,7 @@ INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
@ -368,6 +392,7 @@ WRAP_W_WW(realloc)
WRAP_W_WW(_realloc_base)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
WRAP_W_WWW(_recalloc_base)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
@ -444,4 +469,15 @@ static int call_asan_init() {
#pragma section(".CRT$XIB", long, read) // NOLINT
__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
static void WINAPI asan_thread_init(void *mod, unsigned long reason,
void *reserved) {
if (reason == /*DLL_PROCESS_ATTACH=*/1) __asan_init();
}
#pragma section(".CRT$XLAB", long, read) // NOLINT
__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
ASAN_LINK_GLOBALS_WIN()
#endif // ASAN_DLL_THUNK

View File

@ -1,4 +1,4 @@
//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@ -16,22 +16,25 @@
// This includes:
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
// - installing a custom SEH handlerx
// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
// Only compile this code when building asan_dynamic_runtime_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
#include "asan_globals_win.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
// First, declare CRT sections we'll be using in this file
#pragma section(".CRT$XIB", long, read) // NOLINT
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XCAB", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
#pragma section(".CRT$XLAB", long, read) // NOLINT
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
@ -42,14 +45,37 @@
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
// just to work around this issue, let's clone the a variable that is
// constant after initialization anyways.
// just to work around this issue, let's clone the variable that is constant
// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return;
__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
void* __asan_shadow_memory_dynamic_address;
}
static int InitializeClonedVariables() {
__asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
__asan_shadow_memory_dynamic_address =
__asan_get_shadow_memory_dynamic_address();
return 0;
}
static void NTAPI asan_thread_init(void *mod, unsigned long reason,
void *reserved) {
if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables();
}
// Our cloned variables must be initialized before C/C++ constructors. If TLS
// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
// initializer is needed as a backup.
__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() =
InitializeClonedVariables;
__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
unsigned long, void *) = asan_thread_init;
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
// unload or on exit. ASan relies on LLVM global_dtors to call
@ -73,6 +99,7 @@ void UnregisterGlobals() {
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
} // namespace
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
@ -81,8 +108,6 @@ int ScheduleUnregisterGlobals() {
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
} // namespace
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
@ -97,4 +122,6 @@ __declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
}
ASAN_LINK_GLOBALS_WIN()
#endif // ASAN_DYNAMIC_RUNTIME_THUNK

View File

@ -0,0 +1,3 @@
___asan_default_options
___asan_default_suppressions
___asan_on_error

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__adddf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vadd.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__adddf3vfp)

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__addsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vadd.f32 s14, s14, s15
vmov r0, s14 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__addsf3vfp)

View File

@ -26,10 +26,10 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmp ## cond) \
bl SYMBOL_NAME(__ ## cond ## sf2) SEPARATOR \
cmp r0, #0 SEPARATOR \
b ## cond 1f SEPARATOR \
mov r0, #0 SEPARATOR \
movs r0, #0 SEPARATOR \
pop { r4, pc } SEPARATOR \
1: SEPARATOR \
mov r0, #1 SEPARATOR \
movs r0, #1 SEPARATOR \
pop { r4, pc } SEPARATOR \
END_COMPILERRT_FUNCTION(__aeabi_fcmp ## cond)

View File

@ -15,16 +15,34 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_idivmod __rt_sdiv
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
#if __ARM_ARCH_ISA_THUMB == 1
push {r0, r1, lr}
bl SYMBOL_NAME(__divsi3)
pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
muls r2, r2, r0 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
#else
push { lr }
sub sp, sp, #4
mov r2, sp
#if defined(__MINGW32__)
mov r3, r0
mov r0, r1
mov r1, r3
#endif
bl SYMBOL_NAME(__divmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
#endif // __ARM_ARCH_ISA_THUMB == 1
END_COMPILERRT_FUNCTION(__aeabi_idivmod)
NO_EXEC_STACK_DIRECTIVE

View File

@ -16,6 +16,10 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_ldivmod __rt_sdiv64
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
#if defined(__MINGW32__)
mov r12, r0
mov r0, r2
mov r2, r12
mov r12, r1
mov r1, r3
mov r3, r12
#endif
bl SYMBOL_NAME(__divmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]

View File

@ -16,16 +16,40 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_uidivmod __rt_udiv
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
#if __ARM_ARCH_ISA_THUMB == 1
cmp r0, r1
bcc LOCAL_LABEL(case_denom_larger)
push {r0, r1, lr}
bl SYMBOL_NAME(__aeabi_uidiv)
pop {r1, r2, r3}
muls r2, r2, r0 // r2 = quot * denom
subs r1, r1, r2
JMP (r3)
LOCAL_LABEL(case_denom_larger):
movs r1, r0
movs r0, #0
JMP (lr)
#else
push { lr }
sub sp, sp, #4
mov r2, sp
#if defined(__MINGW32__)
mov r3, r0
mov r0, r1
mov r1, r3
#endif
bl SYMBOL_NAME(__udivmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
#endif
END_COMPILERRT_FUNCTION(__aeabi_uidivmod)
NO_EXEC_STACK_DIRECTIVE

View File

@ -16,6 +16,10 @@
// return {quot, rem};
// }
#if defined(__MINGW32__)
#define __aeabi_uldivmod __rt_udiv64
#endif
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
#if defined(__MINGW32__)
mov r12, r0
mov r0, r2
mov r2, r12
mov r12, r1
mov r1, r3
mov r3, r12
#endif
bl SYMBOL_NAME(__udivmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]

View File

@ -39,32 +39,64 @@
#include "../assembly.h"
.syntax unified
#if __ARM_ARCH_ISA_THUMB == 2
.thumb
#endif
.p2align 2
@ int __eqsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
// Make copies of a and b with the sign bit shifted off the top. These will
// be used to detect zeros and NaNs.
#if __ARM_ARCH_ISA_THUMB == 1
push {r6, lr}
lsls r2, r0, #1
lsls r3, r1, #1
#else
mov r2, r0, lsl #1
mov r3, r1, lsl #1
#endif
// We do the comparison in three stages (ignoring NaN values for the time
// being). First, we orr the absolute values of a and b; this sets the Z
// flag if both a and b are zero (of either sign). The shift of r3 doesn't
// effect this at all, but it *does* make sure that the C flag is clear for
// the subsequent operations.
#if __ARM_ARCH_ISA_THUMB == 1
lsrs r6, r3, #1
orrs r6, r2, r6
#else
orrs r12, r2, r3, lsr #1
#endif
// Next, we check if a and b have the same or different signs. If they have
// opposite signs, this eor will set the N flag.
#if __ARM_ARCH_ISA_THUMB == 1
beq 1f
movs r6, r0
eors r6, r1
1:
#else
it ne
eorsne r12, r0, r1
#endif
// If a and b are equal (either both zeros or bit identical; again, we're
// ignoring NaNs for now), this subtract will zero out r0. If they have the
// same sign, the flags are updated as they would be for a comparison of the
// absolute values of a and b.
#if __ARM_ARCH_ISA_THUMB == 1
bmi 1f
subs r0, r2, r3
1:
#else
it pl
subspl r0, r2, r3
#endif
// If a is smaller in magnitude than b and both have the same sign, place
// the negation of the sign of b in r0. Thus, if both are negative and
@ -76,41 +108,126 @@ DEFINE_COMPILERRT_FUNCTION(__eqsf2)
// still clear from the shift argument in orrs; if a is positive and b
// negative, this places 0 in r0; if a is negative and b positive, -1 is
// placed in r0.
#if __ARM_ARCH_ISA_THUMB == 1
bhs 1f
// Here if a and b have the same sign and absA < absB, the result is thus
// b < 0 ? 1 : -1. Same if a and b have the opposite sign (ignoring Nan).
movs r0, #1
lsrs r1, #31
bne LOCAL_LABEL(CHECK_NAN)
negs r0, r0
b LOCAL_LABEL(CHECK_NAN)
1:
#else
it lo
mvnlo r0, r1, asr #31
#endif
// If a is greater in magnitude than b and both have the same sign, place
// the sign of b in r0. Thus, if both are negative and a < b, -1 is placed
// in r0, which is the desired result. Conversely, if both are positive
// and a > b, zero is placed in r0.
#if __ARM_ARCH_ISA_THUMB == 1
bls 1f
// Here both have the same sign and absA > absB.
movs r0, #1
lsrs r1, #31
beq LOCAL_LABEL(CHECK_NAN)
negs r0, r0
1:
#else
it hi
movhi r0, r1, asr #31
#endif
// If you've been keeping track, at this point r0 contains -1 if a < b and
// 0 if a >= b. All that remains to be done is to set it to 1 if a > b.
// If a == b, then the Z flag is set, so we can get the correct final value
// into r0 by simply or'ing with 1 if Z is clear.
// For Thumb-1, r0 contains -1 if a < b, 0 if a > b and 0 if a == b.
#if __ARM_ARCH_ISA_THUMB != 1
it ne
orrne r0, r0, #1
#endif
// Finally, we need to deal with NaNs. If either argument is NaN, replace
// the value in r0 with 1.
#if __ARM_ARCH_ISA_THUMB == 1
LOCAL_LABEL(CHECK_NAN):
movs r6, #0xff
lsls r6, #24
cmp r2, r6
bhi 1f
cmp r3, r6
1:
bls 2f
movs r0, #1
2:
pop {r6, pc}
#else
cmp r2, #0xff000000
ite ls
cmpls r3, #0xff000000
movhi r0, #1
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__lesf2, __eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__ltsf2, __eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__nesf2, __eqsf2)
.p2align 2
@ int __gtsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtsf2)
// Identical to the preceding except in that we return -1 for NaN values.
// Given that the two paths share so much code, one might be tempted to
// Given that the two paths share so much code, one might be tempted to
// unify them; however, the extra code needed to do so makes the code size
// to performance tradeoff very hard to justify for such small functions.
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
#if __ARM_ARCH_ISA_THUMB == 1
push {r6, lr}
lsls r2, r0, #1
lsls r3, r1, #1
lsrs r6, r3, #1
orrs r6, r2, r6
beq 1f
movs r6, r0
eors r6, r1
1:
bmi 2f
subs r0, r2, r3
2:
bhs 3f
movs r0, #1
lsrs r1, #31
bne LOCAL_LABEL(CHECK_NAN_2)
negs r0, r0
b LOCAL_LABEL(CHECK_NAN_2)
3:
bls 4f
movs r0, #1
lsrs r1, #31
beq LOCAL_LABEL(CHECK_NAN_2)
negs r0, r0
4:
LOCAL_LABEL(CHECK_NAN_2):
movs r6, #0xff
lsls r6, #24
cmp r2, r6
bhi 5f
cmp r3, r6
5:
bls 6f
movs r0, #1
negs r0, r0
6:
pop {r6, pc}
#else
mov r2, r0, lsl #1
mov r3, r1, lsl #1
orrs r12, r2, r3, lsr #1
@ -129,23 +246,51 @@ DEFINE_COMPILERRT_FUNCTION(__gtsf2)
cmpls r3, #0xff000000
movhi r0, #-1
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__gtsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__gesf2, __gtsf2)
.p2align 2
@ int __unordsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
// Return 1 for NaN values, 0 otherwise.
mov r2, r0, lsl #1
mov r3, r1, lsl #1
mov r0, #0
lsls r2, r0, #1
lsls r3, r1, #1
movs r0, #0
#if __ARM_ARCH_ISA_THUMB == 1
movs r1, #0xff
lsls r1, #24
cmp r2, r1
bhi 1f
cmp r3, r1
1:
bls 2f
movs r0, #1
2:
#else
cmp r2, #0xff000000
ite ls
cmpls r3, #0xff000000
movhi r0, #1
#endif
JMP(lr)
END_COMPILERRT_FUNCTION(__unordsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmpum)
vmov s0, r0
vmov s1, r1
b SYMBOL_NAME(__unordsf2)
END_COMPILERRT_FUNCTION(__aeabi_fcmpum)
#else
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fcmpun, __unordsf2)
#endif
NO_EXEC_STACK_DIRECTIVE

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__divdf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vdiv.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vdiv.f64 d5, d6, d7
vdiv.f64 d5, d6, d7
vmov r0, r1, d5 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__divdf3vfp)

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__divsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vdiv.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vdiv.f32 s13, s14, s15
vmov r0, s13 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__divsf3vfp)

View File

@ -49,17 +49,37 @@ LOCAL_LABEL(divzero):
#else
ESTABLISH_FRAME
// Set aside the sign of the quotient.
# if __ARM_ARCH_ISA_THUMB == 1
movs r4, r0
eors r4, r1
# else
eor r4, r0, r1
# endif
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
# if __ARM_ARCH_ISA_THUMB == 1
asrs r2, r0, #31
asrs r3, r1, #31
eors r0, r2
eors r1, r3
subs r0, r0, r2
subs r1, r1, r3
# else
eor r2, r0, r0, asr #31
eor r3, r1, r1, asr #31
sub r0, r2, r0, asr #31
sub r1, r3, r1, asr #31
# endif
// abs(a) / abs(b)
bl SYMBOL_NAME(__udivsi3)
// Apply sign of quotient to result and return.
# if __ARM_ARCH_ISA_THUMB == 1
asrs r4, #31
eors r0, r4
subs r0, r0, r4
# else
eor r0, r0, r4, asr #31
sub r0, r0, r4, asr #31
# endif
CLEAR_FRAME_AND_RETURN
#endif
END_COMPILERRT_FUNCTION(__divsi3)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
moveq r0, #1 // set result register to 1 if equal
movne r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
moveq r0, #1 // set result register to 1 if equal
movne r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__extendsfdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.f64.f32 d0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.f64.f32 d7, s15 // convert single to double
vmov r0, r1, d7 // return result in r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__extendsfdf2vfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixdfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.s32.f64 s0, d0
vmov r0, s0
#else
vmov d7, r0, r1 // load double register from R0/R1
vcvt.s32.f64 s15, d7 // convert double to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixdfsivfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixsfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.s32.f32 s0, s0
vmov r0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.s32.f32 s15, s15 // convert single to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixsfsivfp)

View File

@ -20,9 +20,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixunsdfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.u32.f64 s0, d0
vmov r0, s0
#else
vmov d7, r0, r1 // load double register from R0/R1
vcvt.u32.f64 s15, d7 // convert double to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixunsdfsivfp)

View File

@ -20,9 +20,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixunssfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.u32.f32 s0, s0
vmov r0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.u32.f32 s15, s15 // convert single to 32-bit unsigned into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixunssfsivfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatsidfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f64.s32 d0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f64.s32 d7, s15 // convert 32-bit int in s15 to double in d7
vmov r0, r1, d7 // move d7 to result register pair r0/r1
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatsidfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatsisfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f32.s32 s0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f32.s32 s15, s15 // convert 32-bit int in s15 to float in s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatsisfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssidfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f64.u32 d0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f64.u32 d7, s15 // convert 32-bit int in s15 to double in d7
vmov r0, r1, d7 // move d7 to result register pair r0/r1
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatunssidfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssisfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f32.u32 s0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f32.u32 s15, s15 // convert 32-bit int in s15 to float in s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatunssisfvfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gedf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movgt r0, #1 // set result register to 1 if equal
movle r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movgt r0, #1 // set result register to 1 if equal
movle r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ledf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movls r0, #1 // set result register to 1 if equal
movhi r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__lesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movls r0, #1 // set result register to 1 if equal
movhi r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ltdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ltsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__muldf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmul.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vmul.f64 d6, d6, d7
vmul.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__muldf3vfp)

View File

@ -18,9 +18,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__mulsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmul.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vmul.f32 s13, s14, s15
#endif
vmov r0, s13 // move result back to r0
bx lr
END_COMPILERRT_FUNCTION(__mulsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__nedf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movne r0, #1 // set result register to 0 if unequal
moveq r0, #0

View File

@ -18,7 +18,11 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__negdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vneg.f64 d0, d0
#else
eor r1, r1, #-2147483648 // flip sign bit on double in r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__negdf2vfp)

View File

@ -18,7 +18,11 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__negsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vneg.f32 s0, s0
#else
eor r0, r0, #-2147483648 // flip sign bit on float in r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__negsf2vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__nesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movne r0, #1 // set result register to 1 if unequal
moveq r0, #0

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__subdf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vsub.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vsub.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__subdf3vfp)

View File

@ -12,17 +12,21 @@
//
// extern float __subsf3vfp(float a, float b);
//
// Returns the difference between two single precision floating point numbers
// Returns the difference between two single precision floating point numbers
// using the Darwin calling convention where single arguments are passsed
// like 32-bit ints.
//
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__subsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vsub.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vsub.f32 s14, s14, s15
vmov r0, s14 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__subsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__truncdfsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.f32.f64 s0, d0
#else
vmov d7, r0, r1 // load double from r0/r1 pair
vcvt.f32.f64 s15, d7 // convert double to single (trucate precision)
vmov r0, s15 // return result in r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__truncdfsf2vfp)

View File

@ -40,12 +40,26 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
#else
cmp r1, #1
bcc LOCAL_LABEL(divby0)
#if __ARM_ARCH_ISA_THUMB == 1
bne LOCAL_LABEL(num_neq_denom)
JMP(lr)
LOCAL_LABEL(num_neq_denom):
#else
IT(eq)
JMPc(lr, eq)
#endif
cmp r0, r1
#if __ARM_ARCH_ISA_THUMB == 1
bhs LOCAL_LABEL(num_ge_denom)
movs r0, #0
JMP(lr)
LOCAL_LABEL(num_ge_denom):
#else
ITT(cc)
movcc r0, #0
JMPc(lr, cc)
#endif
/*
* Implement division using binary long division algorithm.
*
@ -62,7 +76,7 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
* that (r0 << shift) < 2 * r1. The quotient is stored in r3.
*/
# ifdef __ARM_FEATURE_CLZ
# if defined(__ARM_FEATURE_CLZ)
clz ip, r0
clz r3, r1
/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
@ -77,49 +91,128 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
sub ip, ip, r3, lsl #3
mov r3, #0
bx ip
# else
# else /* No CLZ Feature */
# if __ARM_ARCH_ISA_THUMB == 2
# error THUMB mode requires CLZ or UDIV
# endif
# if __ARM_ARCH_ISA_THUMB == 1
# define BLOCK_SIZE 10
# else
# define BLOCK_SIZE 12
# endif
mov r2, r0
# if __ARM_ARCH_ISA_THUMB == 1
mov ip, r0
adr r0, LOCAL_LABEL(div0block)
adds r0, #1
# else
adr ip, LOCAL_LABEL(div0block)
lsr r3, r2, #16
# endif
lsrs r3, r2, #16
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_16)
movs r2, r3
subs r0, r0, #(16 * BLOCK_SIZE)
LOCAL_LABEL(skip_16):
# else
movhs r2, r3
subhs ip, ip, #(16 * 12)
subhs ip, ip, #(16 * BLOCK_SIZE)
# endif
lsr r3, r2, #8
lsrs r3, r2, #8
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_8)
movs r2, r3
subs r0, r0, #(8 * BLOCK_SIZE)
LOCAL_LABEL(skip_8):
# else
movhs r2, r3
subhs ip, ip, #(8 * 12)
subhs ip, ip, #(8 * BLOCK_SIZE)
# endif
lsr r3, r2, #4
lsrs r3, r2, #4
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_4)
movs r2, r3
subs r0, r0, #(4 * BLOCK_SIZE)
LOCAL_LABEL(skip_4):
# else
movhs r2, r3
subhs ip, #(4 * 12)
subhs ip, #(4 * BLOCK_SIZE)
# endif
lsr r3, r2, #2
lsrs r3, r2, #2
cmp r3, r1
# if __ARM_ARCH_ISA_THUMB == 1
blo LOCAL_LABEL(skip_2)
movs r2, r3
subs r0, r0, #(2 * BLOCK_SIZE)
LOCAL_LABEL(skip_2):
# else
movhs r2, r3
subhs ip, ip, #(2 * 12)
subhs ip, ip, #(2 * BLOCK_SIZE)
# endif
/* Last block, no need to update r2 or r3. */
cmp r1, r2, lsr #1
subls ip, ip, #(1 * 12)
# if __ARM_ARCH_ISA_THUMB == 1
lsrs r3, r2, #1
cmp r3, r1
blo LOCAL_LABEL(skip_1)
subs r0, r0, #(1 * BLOCK_SIZE)
LOCAL_LABEL(skip_1):
movs r2, r0
mov r0, ip
movs r3, #0
JMP (r2)
mov r3, #0
# else
cmp r1, r2, lsr #1
subls ip, ip, #(1 * BLOCK_SIZE)
movs r3, #0
JMP(ip)
# endif
# endif
# endif /* __ARM_FEATURE_CLZ */
#define IMM #
/* due to the range limit of branch in Thumb1, we have to place the
block closer */
LOCAL_LABEL(divby0):
movs r0, #0
# if defined(__ARM_EABI__)
bl __aeabi_idiv0 // due to relocation limit, can't use b.
# endif
JMP(lr)
#if __ARM_ARCH_ISA_THUMB == 1
#define block(shift) \
lsls r2, r1, IMM shift; \
cmp r0, r2; \
blo LOCAL_LABEL(block_skip_##shift); \
subs r0, r0, r2; \
LOCAL_LABEL(block_skip_##shift) :; \
adcs r3, r3 /* same as ((r3 << 1) | Carry). Carry is set if r0 >= r2. */
/* TODO: if current location counter is not not word aligned, we don't
need the .p2align and nop */
/* Label div0block must be word-aligned. First align block 31 */
.p2align 2
nop /* Padding to align div0block as 31 blocks = 310 bytes */
#else
#define block(shift) \
cmp r0, r1, lsl IMM shift; \
ITT(hs); \
WIDE(addhs) r3, r3, IMM (1 << shift); \
WIDE(subhs) r0, r0, r1, lsl IMM shift
#endif
block(31)
block(30)
@ -159,12 +252,14 @@ LOCAL_LABEL(div0block):
JMP(lr)
#endif /* __ARM_ARCH_EXT_IDIV__ */
#if __ARM_ARCH_EXT_IDIV__
LOCAL_LABEL(divby0):
mov r0, #0
#ifdef __ARM_EABI__
b __aeabi_idiv0
#else
JMP(lr)
mov r0, #0
# ifdef __ARM_EABI__
b __aeabi_idiv0
# else
JMP(lr)
# endif
#endif
END_COMPILERRT_FUNCTION(__udivsi3)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unorddf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0

View File

@ -70,7 +70,7 @@
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
#if !defined(__ARM_FEATURE_CLZ) && \
#if !defined(__ARM_FEATURE_CLZ) && __ARM_ARCH_ISA_THUMB != 1 && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif

View File

@ -229,13 +229,20 @@ void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
// Where the size is known at compile time, the compiler may emit calls to
// specialised versions of the above functions.
////////////////////////////////////////////////////////////////////////////////
#ifdef __SIZEOF_INT128__
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
/* FIXME: __uint128_t isn't available on 32 bit platforms.
OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
#else
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
#endif
#define OPTIMISED_CASE(n, lockfree, type)\
type __atomic_load_##n(type *src, int model) {\

View File

@ -110,10 +110,12 @@ void __clear_cache(void *start, void *end) {
#elif defined(__linux__)
register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
const register int flags __asm("r2") = 0;
const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
__asm __volatile("svc 0x0"
: "=r"(start_reg)
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg));
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg),
"r"(flags));
if (start_reg != 0) {
compilerrt_abort();
}

View File

@ -0,0 +1,82 @@
//===-- lib/floattitf.c - int128 -> quad-precision conversion -----*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements ti_int to quad-precision conversion for the
// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
// mode.
//
//===----------------------------------------------------------------------===//
#define QUAD_PRECISION
#include "fp_lib.h"
#include "int_lib.h"
/* Returns: convert a ti_int to a fp_t, rounding toward even. */
/* Assumption: fp_t is a IEEE 128 bit floating point type
* ti_int is a 128 bit integral type
*/
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm |
* mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
*/
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t
__floattitf(ti_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
const ti_int s = a >> (N-1);
a = (a ^ s) - s;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > LDBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
* 1 = msb 1 bit
* P = bit LDBL_MANT_DIG-1 bits to the right of 1
* Q = bit LDBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd) {
case LDBL_MANT_DIG + 1:
a <<= 1;
break;
case LDBL_MANT_DIG + 2:
break;
default:
a = ((tu_int)a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to LDBL_MANT_DIG bits */
} else {
a <<= (LDBL_MANT_DIG - sd);
/* a is now rounded to LDBL_MANT_DIG bits */
}
long_double_bits fb;
fb.u.high.all = (s & 0x8000000000000000LL) /* sign */
| (du_int)(e + 16383) << 48 /* exponent */
| ((a >> 64) & 0x0000ffffffffffffLL); /* significand */
fb.u.low.all = (du_int)(a);
return fb.f;
}
#endif

View File

@ -0,0 +1,79 @@
//===-- lib/floatuntitf.c - uint128 -> quad-precision conversion --*- C -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements tu_int to quad-precision conversion for the
// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
// mode.
//
//===----------------------------------------------------------------------===//
#define QUAD_PRECISION
#include "fp_lib.h"
#include "int_lib.h"
/* Returns: convert a tu_int to a fp_t, rounding toward even. */
/* Assumption: fp_t is a IEEE 128 bit floating point type
* tu_int is a 128 bit integral type
*/
/* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm |
* mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm
*/
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t
__floatuntitf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
int sd = N - __clzti2(a); /* number of significant digits */
int e = sd - 1; /* exponent */
if (sd > LDBL_MANT_DIG) {
/* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
* finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
* 12345678901234567890123456
* 1 = msb 1 bit
* P = bit LDBL_MANT_DIG-1 bits to the right of 1
* Q = bit LDBL_MANT_DIG bits to the right of 1
* R = "or" of all bits to the right of Q
*/
switch (sd) {
case LDBL_MANT_DIG + 1:
a <<= 1;
break;
case LDBL_MANT_DIG + 2:
break;
default:
a = (a >> (sd - (LDBL_MANT_DIG+2))) |
((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0);
};
/* finish: */
a |= (a & 4) != 0; /* Or P into R */
++a; /* round - this step may add a significant bit */
a >>= 2; /* dump Q and R */
/* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */
if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
a >>= 1;
++e;
}
/* a is now rounded to LDBL_MANT_DIG bits */
} else {
a <<= (LDBL_MANT_DIG - sd);
/* a is now rounded to LDBL_MANT_DIG bits */
}
long_double_bits fb;
fb.u.high.all = (du_int)(e + 16383) << 48 /* exponent */
| ((a >> 64) & 0x0000ffffffffffffLL); /* significand */
fb.u.low.all = (du_int)(a);
return fb.f;
}
#endif

View File

@ -121,14 +121,14 @@ COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
#include <intrin.h>
uint32_t __inline __builtin_ctz(uint32_t value) {
uint32_t trailing_zero = 0;
unsigned long trailing_zero = 0;
if (_BitScanForward(&trailing_zero, value))
return trailing_zero;
return 32;
}
uint32_t __inline __builtin_clz(uint32_t value) {
uint32_t leading_zero = 0;
unsigned long leading_zero = 0;
if (_BitScanReverse(&leading_zero, value))
return 31 - leading_zero;
return 32;
@ -136,7 +136,7 @@ uint32_t __inline __builtin_clz(uint32_t value) {
#if defined(_M_ARM) || defined(_M_X64)
uint32_t __inline __builtin_clzll(uint64_t value) {
uint32_t leading_zero = 0;
unsigned long leading_zero = 0;
if (_BitScanReverse64(&leading_zero, value))
return 63 - leading_zero;
return 64;

View File

@ -0,0 +1,36 @@
/* ===-- mingw_fixfloat.c - Wrap int/float conversions for arm/windows -----===
*
* The LLVM Compiler Infrastructure
*
* This file is dual licensed under the MIT and the University of Illinois Open
* Source Licenses. See LICENSE.TXT for details.
*
* ===----------------------------------------------------------------------===
*/
#include "int_lib.h"
COMPILER_RT_ABI di_int __fixdfdi(double a);
COMPILER_RT_ABI di_int __fixsfdi(float a);
COMPILER_RT_ABI du_int __fixunsdfdi(double a);
COMPILER_RT_ABI du_int __fixunssfdi(float a);
COMPILER_RT_ABI double __floatdidf(di_int a);
COMPILER_RT_ABI float __floatdisf(di_int a);
COMPILER_RT_ABI double __floatundidf(du_int a);
COMPILER_RT_ABI float __floatundisf(du_int a);
COMPILER_RT_ABI di_int __dtoi64(double a) { return __fixdfdi(a); }
COMPILER_RT_ABI di_int __stoi64(float a) { return __fixsfdi(a); }
COMPILER_RT_ABI du_int __dtou64(double a) { return __fixunsdfdi(a); }
COMPILER_RT_ABI du_int __stou64(float a) { return __fixunssfdi(a); }
COMPILER_RT_ABI double __i64tod(di_int a) { return __floatdidf(a); }
COMPILER_RT_ABI float __i64tos(di_int a) { return __floatdisf(a); }
COMPILER_RT_ABI double __u64tod(du_int a) { return __floatundidf(a); }
COMPILER_RT_ABI float __u64tos(du_int a) { return __floatundisf(a); }

View File

@ -30,6 +30,8 @@ typedef ElfW(Ehdr) Elf_Ehdr;
#include "ubsan/ubsan_handlers.h"
#endif
using namespace __sanitizer;
namespace __cfi {
#define kCfiShadowLimitsStorageSize 4096 // 1 page

View File

@ -114,6 +114,26 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;
// | reserved by kernel |
// +--------------------+ 0x0000000000
// On Linux/AArch64 (48-bit VMA), memory is laid out as follow:
//
// +--------------------+ 0x1000000000000 (top of memory)
// | application memory |
// +--------------------+ 0xffff00008000 (kAppAddr)
// | unused |
// +--------------------+ 0xaaaab0000000 (top of PIE address)
// | application PIE |
// +--------------------+ 0xaaaaa0000000 (top of PIE address)
// | |
// | unused |
// | |
// +--------------------+ 0x1200000000 (kUnusedAddr)
// | union table |
// +--------------------+ 0x8000000000 (kUnionTableAddr)
// | shadow memory |
// +--------------------+ 0x0000010000 (kShadowAddr)
// | reserved by kernel |
// +--------------------+ 0x0000000000
typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
#ifdef DFSAN_RUNTIME_VMA
@ -372,11 +392,12 @@ static void InitializePlatformEarly() {
#ifdef DFSAN_RUNTIME_VMA
__dfsan::vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42) {
if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 ||
__dfsan::vmaSize == 48) {
__dfsan_shadow_ptr_mask = ShadowMask();
} else {
Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %d - Supported 39 and 42\n", __dfsan::vmaSize);
Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize);
Die();
}
#endif

View File

@ -18,6 +18,9 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "dfsan_platform.h"
using __sanitizer::uptr;
using __sanitizer::u16;
// Copy declarations from public sanitizer/dfsan_interface.h header here.
typedef u16 dfsan_label;

View File

@ -16,6 +16,8 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
using namespace __sanitizer;
INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF_T offset) {
void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);

View File

@ -46,6 +46,13 @@ struct Mapping42 {
static const uptr kShadowMask = ~0x3c000000000;
};
struct Mapping48 {
static const uptr kShadowAddr = 0x10000;
static const uptr kUnionTableAddr = 0x8000000000;
static const uptr kAppAddr = 0xffff00008000;
static const uptr kShadowMask = ~0xfffff0000000;
};
extern int vmaSize;
# define DFSAN_RUNTIME_VMA 1
#else
@ -72,11 +79,13 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
if (vmaSize == 39)
return MappingImpl<Mapping39, Type>();
else
return MappingImpl<Mapping42, Type>();
switch (vmaSize) {
case 39: return MappingImpl<Mapping39, Type>();
case 42: return MappingImpl<Mapping42, Type>();
case 48: return MappingImpl<Mapping48, Type>();
}
DCHECK(0);
return 0;
#else
return MappingImpl<Mapping, Type>();
#endif

View File

@ -266,6 +266,14 @@ fun:reflect.makeFuncStub=discard
# Replaces __sanitizer_cov_trace_cmp with __dfsw___sanitizer_cov_trace_cmp
fun:__sanitizer_cov_trace_cmp=custom
fun:__sanitizer_cov_trace_cmp=uninstrumented
fun:__sanitizer_cov_trace_cmp1=custom
fun:__sanitizer_cov_trace_cmp1=uninstrumented
fun:__sanitizer_cov_trace_cmp2=custom
fun:__sanitizer_cov_trace_cmp2=uninstrumented
fun:__sanitizer_cov_trace_cmp4=custom
fun:__sanitizer_cov_trace_cmp4=uninstrumented
fun:__sanitizer_cov_trace_cmp8=custom
fun:__sanitizer_cov_trace_cmp8=uninstrumented
# Similar for __sanitizer_cov_trace_switch
fun:__sanitizer_cov_trace_switch=custom
fun:__sanitizer_cov_trace_switch=uninstrumented

Some files were not shown because too many files have changed in this diff Show More